From: Treehugger Robot Date: Thu, 19 May 2016 12:19:53 +0000 (+0000) Subject: Merge "Allow libcore and JDWP tests to be executed without JIT." X-Git-Url: http://git.osdn.net/view?a=commitdiff_plain;h=6c313600e995ac81a6e68e84b64761a10fb1311c;hp=5db109bc8030858f2830f4217333768c3c77095f;p=android-x86%2Fart.git Merge "Allow libcore and JDWP tests to be executed without JIT." --- diff --git a/Android.mk b/Android.mk index 25796a017..bb1334a05 100644 --- a/Android.mk +++ b/Android.mk @@ -93,6 +93,7 @@ include $(art_path)/tools/ahat/Android.mk include $(art_path)/tools/dexfuzz/Android.mk include $(art_path)/tools/dmtracedump/Android.mk include $(art_path)/sigchainlib/Android.mk +include $(art_path)/libart_fake/Android.mk # ART_HOST_DEPENDENCIES depends on Android.executable.mk above for ART_HOST_EXECUTABLES diff --git a/benchmark/jni-perf/src/JniPerfBenchmark.java b/benchmark/jni-perf/src/JniPerfBenchmark.java index b1b21ce0b..1e7cc2bf4 100644 --- a/benchmark/jni-perf/src/JniPerfBenchmark.java +++ b/benchmark/jni-perf/src/JniPerfBenchmark.java @@ -14,9 +14,7 @@ * limitations under the License. */ -import com.google.caliper.SimpleBenchmark; - -public class JniPerfBenchmark extends SimpleBenchmark { +public class JniPerfBenchmark { private static final String MSG = "ABCDE"; native void perfJniEmptyCall(); diff --git a/benchmark/jobject-benchmark/src/JObjectBenchmark.java b/benchmark/jobject-benchmark/src/JObjectBenchmark.java index f4c059c58..90a53b399 100644 --- a/benchmark/jobject-benchmark/src/JObjectBenchmark.java +++ b/benchmark/jobject-benchmark/src/JObjectBenchmark.java @@ -14,9 +14,7 @@ * limitations under the License. */ -import com.google.caliper.SimpleBenchmark; - -public class JObjectBenchmark extends SimpleBenchmark { +public class JObjectBenchmark { public JObjectBenchmark() { // Make sure to link methods before benchmark starts. System.loadLibrary("artbenchmark"); diff --git a/benchmark/scoped-primitive-array/src/ScopedPrimitiveArrayBenchmark.java b/benchmark/scoped-primitive-array/src/ScopedPrimitiveArrayBenchmark.java index be276fe48..0ad9c3695 100644 --- a/benchmark/scoped-primitive-array/src/ScopedPrimitiveArrayBenchmark.java +++ b/benchmark/scoped-primitive-array/src/ScopedPrimitiveArrayBenchmark.java @@ -14,9 +14,7 @@ * limitations under the License. */ -import com.google.caliper.SimpleBenchmark; - -public class ScopedPrimitiveArrayBenchmark extends SimpleBenchmark { +public class ScopedPrimitiveArrayBenchmark { // Measure adds the first and last element of the array by using ScopedPrimitiveArray. static native long measureByteArray(int reps, byte[] arr); static native long measureShortArray(int reps, short[] arr); diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk index dde3cdb1d..0235a308f 100644 --- a/build/Android.common_build.mk +++ b/build/Android.common_build.mk @@ -363,11 +363,20 @@ ART_HOST_ASFLAGS += $(art_asflags) ifndef LIBART_IMG_TARGET_BASE_ADDRESS $(error LIBART_IMG_TARGET_BASE_ADDRESS unset) endif + +ART_TARGET_CFLAGS += $(art_cflags) -DART_TARGET \ + -DART_BASE_ADDRESS=$(LIBART_IMG_TARGET_BASE_ADDRESS) \ + +ifeq ($(ART_TARGET_LINUX),true) +# Setting ART_TARGET_LINUX to true compiles art/ assuming that the target device +# will be running linux rather than android. +ART_TARGET_CFLAGS += -DART_TARGET_LINUX +else # The ART_TARGET_ANDROID macro is passed to target builds, which check # against it instead of against __ANDROID__ (which is provided by target # toolchains). -ART_TARGET_CFLAGS += $(art_cflags) -DART_TARGET -DART_TARGET_ANDROID \ - -DART_BASE_ADDRESS=$(LIBART_IMG_TARGET_BASE_ADDRESS) \ +ART_TARGET_CFLAGS += -DART_TARGET_ANDROID +endif ART_TARGET_CFLAGS += $(art_target_cflags) ART_TARGET_ASFLAGS += $(art_asflags) diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk index cd463ecc7..3b459c3ad 100644 --- a/build/Android.gtest.mk +++ b/build/Android.gtest.mk @@ -650,11 +650,11 @@ endef # define-art-gtest ifeq ($(ART_BUILD_TARGET),true) $(foreach file,$(RUNTIME_GTEST_TARGET_SRC_FILES), $(eval $(call define-art-gtest,target,$(file),,libbacktrace))) - $(foreach file,$(COMPILER_GTEST_TARGET_SRC_FILES), $(eval $(call define-art-gtest,target,$(file),art/compiler,libartd-compiler libbacktrace))) + $(foreach file,$(COMPILER_GTEST_TARGET_SRC_FILES), $(eval $(call define-art-gtest,target,$(file),art/compiler,libartd-compiler libbacktrace libnativeloader))) endif ifeq ($(ART_BUILD_HOST),true) $(foreach file,$(RUNTIME_GTEST_HOST_SRC_FILES), $(eval $(call define-art-gtest,host,$(file),,libbacktrace))) - $(foreach file,$(COMPILER_GTEST_HOST_SRC_FILES), $(eval $(call define-art-gtest,host,$(file),art/compiler,libartd-compiler libbacktrace))) + $(foreach file,$(COMPILER_GTEST_HOST_SRC_FILES), $(eval $(call define-art-gtest,host,$(file),art/compiler,libartd-compiler libbacktrace libnativeloader))) endif # Used outside the art project to get a list of the current tests diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc index 81b854e9c..7c53e01c4 100644 --- a/cmdline/cmdline_parser_test.cc +++ b/cmdline/cmdline_parser_test.cc @@ -461,8 +461,8 @@ TEST_F(CmdlineParserTest, TestJitOptions) { * Test successes */ { - EXPECT_SINGLE_PARSE_VALUE(true, "-Xusejit:true", M::UseJIT); - EXPECT_SINGLE_PARSE_VALUE(false, "-Xusejit:false", M::UseJIT); + EXPECT_SINGLE_PARSE_VALUE(true, "-Xusejit:true", M::UseJitCompilation); + EXPECT_SINGLE_PARSE_VALUE(false, "-Xusejit:false", M::UseJitCompilation); } { EXPECT_SINGLE_PARSE_VALUE( diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h index c0a00cce7..4797540c3 100644 --- a/cmdline/cmdline_types.h +++ b/cmdline/cmdline_types.h @@ -620,6 +620,8 @@ struct CmdlineType : CmdlineTypeParser { log_verbosity.verifier = true; } else if (verbose_options[j] == "image") { log_verbosity.image = true; + } else if (verbose_options[j] == "systrace-locks") { + log_verbosity.systrace_lock_logging = true; } else { return Result::Usage(std::string("Unknown -verbose option ") + verbose_options[j]); } diff --git a/compiler/cfi_test.h b/compiler/cfi_test.h index 230cb9aee..f8b746093 100644 --- a/compiler/cfi_test.h +++ b/compiler/cfi_test.h @@ -55,7 +55,9 @@ class CFITest : public dwarf::DwarfTest { kCFIFormat, 0, &debug_frame_data_, &debug_frame_patches); ReformatCfi(Objdump(false, "-W"), &lines); // Pretty-print assembly. - auto* opts = new DisassemblerOptions(false, actual_asm.data(), true); + const uint8_t* asm_base = actual_asm.data(); + const uint8_t* asm_end = asm_base + actual_asm.size(); + auto* opts = new DisassemblerOptions(false, asm_base, asm_end, true); std::unique_ptr disasm(Disassembler::Create(isa, opts)); std::stringstream stream; const uint8_t* base = actual_asm.data() + (isa == kThumb2 ? 1 : 0); diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc index f75a252df..bf29e1c31 100644 --- a/compiler/common_compiler_test.cc +++ b/compiler/common_compiler_test.cc @@ -180,6 +180,7 @@ void CommonCompilerTest::CreateCompilerDriver(Compiler::Kind kind, isa, instruction_set_features_.get(), /* boot_image */ true, + /* app_image */ false, GetImageClasses(), GetCompiledClasses(), GetCompiledMethods(), diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc index 1491a183f..606302bd7 100644 --- a/compiler/dex/verification_results.cc +++ b/compiler/dex/verification_results.cc @@ -60,7 +60,7 @@ void VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method // TODO: Investigate why are we doing the work again for this method and try to avoid it. LOG(WARNING) << "Method processed more than once: " << PrettyMethod(ref.dex_method_index, *ref.dex_file); - if (!Runtime::Current()->UseJit()) { + if (!Runtime::Current()->UseJitCompilation()) { DCHECK_EQ(it->second->GetDevirtMap().size(), verified_method->GetDevirtMap().size()); DCHECK_EQ(it->second->GetSafeCastSet().size(), verified_method->GetSafeCastSet().size()); } diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc index 5c0253c29..bace01471 100644 --- a/compiler/dex/verified_method.cc +++ b/compiler/dex/verified_method.cc @@ -54,7 +54,8 @@ const VerifiedMethod* VerifiedMethod::Create(verifier::MethodVerifier* method_ve } // Only need dequicken info for JIT so far. - if (Runtime::Current()->UseJit() && !verified_method->GenerateDequickenMap(method_verifier)) { + if (Runtime::Current()->UseJitCompilation() && + !verified_method->GenerateDequickenMap(method_verifier)) { return nullptr; } } @@ -72,7 +73,7 @@ const MethodReference* VerifiedMethod::GetDevirtTarget(uint32_t dex_pc) const { } const DexFileReference* VerifiedMethod::GetDequickenIndex(uint32_t dex_pc) const { - DCHECK(Runtime::Current()->UseJit()); + DCHECK(Runtime::Current()->UseJitCompilation()); auto it = dequicken_map_.find(dex_pc); return (it != dequicken_map_.end()) ? &it->second : nullptr; } diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc index 9e0c22c68..6863f42d1 100644 --- a/compiler/driver/compiled_method_storage_test.cc +++ b/compiler/driver/compiled_method_storage_test.cc @@ -36,6 +36,7 @@ TEST(CompiledMethodStorage, Deduplicate) { /* instruction_set_ */ kNone, /* instruction_set_features */ nullptr, /* boot_image */ false, + /* app_image */ false, /* image_classes */ nullptr, /* compiled_classes */ nullptr, /* compiled_methods */ nullptr, diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index be82956e7..1ab1d31f0 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -341,6 +341,7 @@ CompilerDriver::CompilerDriver( InstructionSet instruction_set, const InstructionSetFeatures* instruction_set_features, bool boot_image, + bool app_image, std::unordered_set* image_classes, std::unordered_set* compiled_classes, std::unordered_set* compiled_methods, @@ -363,6 +364,7 @@ CompilerDriver::CompilerDriver( compiled_methods_(MethodTable::key_compare()), non_relative_linker_patch_count_(0u), boot_image_(boot_image), + app_image_(app_image), image_classes_(image_classes), classes_to_compile_(compiled_classes), methods_to_compile_(compiled_methods), @@ -473,7 +475,7 @@ static optimizer::DexToDexCompilationLevel GetDexToDexCompilationLevel( const DexFile& dex_file, const DexFile::ClassDef& class_def) SHARED_REQUIRES(Locks::mutator_lock_) { auto* const runtime = Runtime::Current(); - if (runtime->UseJit() || driver.GetCompilerOptions().VerifyAtRuntime()) { + if (runtime->UseJitCompilation() || driver.GetCompilerOptions().VerifyAtRuntime()) { // Verify at runtime shouldn't dex to dex since we didn't resolve of verify. return optimizer::DexToDexCompilationLevel::kDontDexToDexCompile; } @@ -945,7 +947,7 @@ bool CompilerDriver::ShouldVerifyClassBasedOnProfile(const DexFile& dex_file, class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor { public: - ResolveCatchBlockExceptionsClassVisitor( + explicit ResolveCatchBlockExceptionsClassVisitor( std::set>& exceptions_to_resolve) : exceptions_to_resolve_(exceptions_to_resolve) {} @@ -1268,7 +1270,7 @@ void CompilerDriver::UpdateImageClasses(TimingLogger* timings) { bool CompilerDriver::CanAssumeClassIsLoaded(mirror::Class* klass) { Runtime* runtime = Runtime::Current(); if (!runtime->IsAotCompiler()) { - DCHECK(runtime->UseJit()); + DCHECK(runtime->UseJitCompilation()); // Having the klass reference here implies that the klass is already loaded. return true; } @@ -1289,7 +1291,7 @@ bool CompilerDriver::CanAssumeTypeIsPresentInDexCache(Handle d if ((IsBootImage() && IsImageClass(dex_cache->GetDexFile()->StringDataByIdx( dex_cache->GetDexFile()->GetTypeId(type_idx).descriptor_idx_))) || - Runtime::Current()->UseJit()) { + Runtime::Current()->UseJitCompilation()) { mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx); result = (resolved_class != nullptr); } @@ -1307,7 +1309,7 @@ bool CompilerDriver::CanAssumeStringIsPresentInDexCache(const DexFile& dex_file, // See also Compiler::ResolveDexFile bool result = false; - if (IsBootImage() || Runtime::Current()->UseJit()) { + if (IsBootImage() || Runtime::Current()->UseJitCompilation()) { ScopedObjectAccess soa(Thread::Current()); StackHandleScope<1> hs(soa.Self()); ClassLinker* const class_linker = Runtime::Current()->GetClassLinker(); @@ -1319,7 +1321,7 @@ bool CompilerDriver::CanAssumeStringIsPresentInDexCache(const DexFile& dex_file, result = true; } else { // Just check whether the dex cache already has the string. - DCHECK(Runtime::Current()->UseJit()); + DCHECK(Runtime::Current()->UseJitCompilation()); result = (dex_cache->GetResolvedString(string_idx) != nullptr); } } @@ -1427,7 +1429,7 @@ bool CompilerDriver::CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_i } else { return false; } - } else if (runtime->UseJit() && !heap->IsMovableObject(resolved_class)) { + } else if (runtime->UseJitCompilation() && !heap->IsMovableObject(resolved_class)) { *is_type_initialized = resolved_class->IsInitialized(); // If the class may move around, then don't embed it as a direct pointer. *use_direct_type_ptr = true; @@ -1604,7 +1606,7 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType } } } - if (runtime->UseJit()) { + if (runtime->UseJitCompilation()) { // If we are the JIT, then don't allow a direct call to the interpreter bridge since this will // never be updated even after we compile the method. if (cl->IsQuickToInterpreterBridge( @@ -1636,7 +1638,7 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType bool must_use_direct_pointers = false; mirror::DexCache* dex_cache = declaring_class->GetDexCache(); if (target_method->dex_file == dex_cache->GetDexFile() && - !(runtime->UseJit() && dex_cache->GetResolvedMethod( + !(runtime->UseJitCompilation() && dex_cache->GetResolvedMethod( method->GetDexMethodIndex(), pointer_size) == nullptr)) { target_method->dex_method_index = method->GetDexMethodIndex(); } else { @@ -1673,7 +1675,7 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType break; } } - if (method_in_image || compiling_boot || runtime->UseJit()) { + if (method_in_image || compiling_boot || runtime->UseJitCompilation()) { // We know we must be able to get to the method in the image, so use that pointer. // In the case where we are the JIT, we can always use direct pointers since we know where // the method and its code are / will be. We don't sharpen to interpreter bridge since we @@ -2440,9 +2442,12 @@ void CompilerDriver::InitializeClasses(jobject jni_class_loader, context.ForAll(0, dex_file.NumClassDefs(), &visitor, init_thread_count); } -class InitializeArrayClassVisitor : public ClassVisitor { +class InitializeArrayClassesAndCreateConflictTablesVisitor : public ClassVisitor { public: virtual bool operator()(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) { + return true; + } if (klass->IsArrayClass()) { StackHandleScope<1> hs(Thread::Current()); Runtime::Current()->GetClassLinker()->EnsureInitialized(hs.Self(), @@ -2450,6 +2455,10 @@ class InitializeArrayClassVisitor : public ClassVisitor { true, true); } + // Create the conflict tables. + if (!klass->IsTemp() && klass->ShouldHaveEmbeddedImtAndVTable()) { + Runtime::Current()->GetClassLinker()->FillIMTAndConflictTables(klass); + } return true; } }; @@ -2462,13 +2471,15 @@ void CompilerDriver::InitializeClasses(jobject class_loader, CHECK(dex_file != nullptr); InitializeClasses(class_loader, *dex_file, dex_files, timings); } - { + if (boot_image_ || app_image_) { // Make sure that we call EnsureIntiailized on all the array classes to call // SetVerificationAttempted so that the access flags are set. If we do not do this they get // changed at runtime resulting in more dirty image pages. + // Also create conflict tables. + // Only useful if we are compiling an image (image_classes_ is not null). ScopedObjectAccess soa(Thread::Current()); - InitializeArrayClassVisitor visitor; - Runtime::Current()->GetClassLinker()->VisitClasses(&visitor); + InitializeArrayClassesAndCreateConflictTablesVisitor visitor; + Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(&visitor); } if (IsBootImage()) { // Prune garbage objects created during aborted transactions. diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h index d63dffa49..19a1ecc49 100644 --- a/compiler/driver/compiler_driver.h +++ b/compiler/driver/compiler_driver.h @@ -92,6 +92,7 @@ class CompilerDriver { InstructionSet instruction_set, const InstructionSetFeatures* instruction_set_features, bool boot_image, + bool app_image, std::unordered_set* image_classes, std::unordered_set* compiled_classes, std::unordered_set* compiled_methods, @@ -652,6 +653,7 @@ class CompilerDriver { size_t non_relative_linker_patch_count_ GUARDED_BY(compiled_methods_lock_); const bool boot_image_; + const bool app_image_; // If image_ is true, specifies the classes that will be included in the image. // Note if image_classes_ is null, all classes are included in the image. diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc index 1bd4c3ad8..f20dba34a 100644 --- a/compiler/driver/compiler_options.cc +++ b/compiler/driver/compiler_options.cc @@ -21,7 +21,7 @@ namespace art { CompilerOptions::CompilerOptions() - : compiler_filter_(kDefaultCompilerFilter), + : compiler_filter_(CompilerFilter::kDefaultCompilerFilter), huge_method_threshold_(kDefaultHugeMethodThreshold), large_method_threshold_(kDefaultLargeMethodThreshold), small_method_threshold_(kDefaultSmallMethodThreshold), diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h index c67ab6ef1..6bbd3c5a1 100644 --- a/compiler/driver/compiler_options.h +++ b/compiler/driver/compiler_options.h @@ -31,7 +31,6 @@ namespace art { class CompilerOptions FINAL { public: // Guide heuristics to determine whether to compile method if profile data not available. - static const CompilerFilter::Filter kDefaultCompilerFilter = CompilerFilter::kSpeed; static const size_t kDefaultHugeMethodThreshold = 10000; static const size_t kDefaultLargeMethodThreshold = 600; static const size_t kDefaultSmallMethodThreshold = 60; diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index 8bb462c66..be720ad2f 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -653,8 +653,7 @@ bool ImageWriter::AllocMemory() { for (ImageInfo& image_info : image_infos_) { ImageSection unused_sections[ImageHeader::kSectionCount]; const size_t length = RoundUp( - image_info.CreateImageSections(target_ptr_size_, unused_sections), - kPageSize); + image_info.CreateImageSections(unused_sections), kPageSize); std::string error_msg; image_info.image_.reset(MemMap::MapAnonymous("image writer image", @@ -1029,6 +1028,9 @@ ObjectArray* ImageWriter::CreateImageRoots(size_t oat_index) const { for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) { mirror::DexCache* dex_cache = down_cast(self->DecodeJObject(data.weak_root)); + if (dex_cache == nullptr) { + continue; + } const DexFile* dex_file = dex_cache->GetDexFile(); if (!IsInBootImage(dex_cache)) { dex_cache_count += image_dex_files.find(dex_file) != image_dex_files.end() ? 1u : 0u; @@ -1045,6 +1047,9 @@ ObjectArray* ImageWriter::CreateImageRoots(size_t oat_index) const { for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) { mirror::DexCache* dex_cache = down_cast(self->DecodeJObject(data.weak_root)); + if (dex_cache == nullptr) { + continue; + } const DexFile* dex_file = dex_cache->GetDexFile(); if (!IsInBootImage(dex_cache)) { non_image_dex_caches += image_dex_files.find(dex_file) != image_dex_files.end() ? 1u : 0u; @@ -1056,6 +1061,9 @@ ObjectArray* ImageWriter::CreateImageRoots(size_t oat_index) const { for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) { mirror::DexCache* dex_cache = down_cast(self->DecodeJObject(data.weak_root)); + if (dex_cache == nullptr) { + continue; + } const DexFile* dex_file = dex_cache->GetDexFile(); if (!IsInBootImage(dex_cache) && image_dex_files.find(dex_file) != image_dex_files.end()) { dex_caches->Set(i, dex_cache); @@ -1215,6 +1223,19 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) { } (any_dirty ? dirty_methods_ : clean_methods_) += num_methods; } + // Assign offsets for all runtime methods in the IMT since these may hold conflict tables + // live. + if (as_klass->ShouldHaveEmbeddedImtAndVTable()) { + for (size_t i = 0; i < mirror::Class::kImtSize; ++i) { + ArtMethod* imt_method = as_klass->GetEmbeddedImTableEntry(i, target_ptr_size_); + DCHECK(imt_method != nullptr); + if (imt_method->IsRuntimeMethod() && + !IsInBootImage(imt_method) && + !NativeRelocationAssigned(imt_method)) { + AssignMethodOffset(imt_method, kNativeObjectRelocationTypeRuntimeMethod, oat_index); + } + } + } } else if (h_obj->IsObjectArray()) { // Walk elements of an object array. int32_t length = h_obj->AsObjectArray()->GetLength(); @@ -1237,13 +1258,37 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) { } } +bool ImageWriter::NativeRelocationAssigned(void* ptr) const { + return native_object_relocations_.find(ptr) != native_object_relocations_.end(); +} + +void ImageWriter::TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index) { + // No offset, or already assigned. + if (table == nullptr || NativeRelocationAssigned(table)) { + return; + } + CHECK(!IsInBootImage(table)); + // If the method is a conflict method we also want to assign the conflict table offset. + ImageInfo& image_info = GetImageInfo(oat_index); + const size_t size = table->ComputeSize(target_ptr_size_); + native_object_relocations_.emplace( + table, + NativeObjectRelocation { + oat_index, + image_info.bin_slot_sizes_[kBinIMTConflictTable], + kNativeObjectRelocationTypeIMTConflictTable}); + image_info.bin_slot_sizes_[kBinIMTConflictTable] += size; +} + void ImageWriter::AssignMethodOffset(ArtMethod* method, NativeObjectRelocationType type, size_t oat_index) { DCHECK(!IsInBootImage(method)); - auto it = native_object_relocations_.find(method); - CHECK(it == native_object_relocations_.end()) << "Method " << method << " already assigned " + CHECK(!NativeRelocationAssigned(method)) << "Method " << method << " already assigned " << PrettyMethod(method); + if (method->IsRuntimeMethod()) { + TryAssignConflictTableOffset(method->GetImtConflictTable(target_ptr_size_), oat_index); + } ImageInfo& image_info = GetImageInfo(oat_index); size_t& offset = image_info.bin_slot_sizes_[BinTypeForNativeRelocationType(type)]; native_object_relocations_.emplace(method, NativeObjectRelocation { oat_index, offset, type }); @@ -1292,8 +1337,7 @@ void ImageWriter::CalculateNewObjectOffsets() { // know where image_roots is going to end up image_objects_offset_begin_ = RoundUp(sizeof(ImageHeader), kObjectAlignment); // 64-bit-alignment - // Clear any pre-existing monitors which may have been in the monitor words, assign bin slots. - heap->VisitObjects(WalkFieldsCallback, this); + const size_t method_alignment = ArtMethod::Alignment(target_ptr_size_); // Write the image runtime methods. image_methods_[ImageHeader::kResolutionMethod] = runtime->GetResolutionMethod(); image_methods_[ImageHeader::kImtConflictMethod] = runtime->GetImtConflictMethod(); @@ -1303,31 +1347,19 @@ void ImageWriter::CalculateNewObjectOffsets() { runtime->GetCalleeSaveMethod(Runtime::kRefsOnly); image_methods_[ImageHeader::kRefsAndArgsSaveMethod] = runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs); - - // Add room for fake length prefixed array for holding the image methods. - const auto image_method_type = kNativeObjectRelocationTypeArtMethodArrayClean; - auto it = native_object_relocations_.find(&image_method_array_); - CHECK(it == native_object_relocations_.end()); - ImageInfo& default_image_info = GetImageInfo(GetDefaultOatIndex()); - size_t& offset = - default_image_info.bin_slot_sizes_[BinTypeForNativeRelocationType(image_method_type)]; - if (!compile_app_image_) { - native_object_relocations_.emplace(&image_method_array_, - NativeObjectRelocation { GetDefaultOatIndex(), offset, image_method_type }); - } - size_t method_alignment = ArtMethod::Alignment(target_ptr_size_); - const size_t array_size = LengthPrefixedArray::ComputeSize( - 0, ArtMethod::Size(target_ptr_size_), method_alignment); - CHECK_ALIGNED_PARAM(array_size, method_alignment); - offset += array_size; + // Visit image methods first to have the main runtime methods in the first image. for (auto* m : image_methods_) { CHECK(m != nullptr); CHECK(m->IsRuntimeMethod()); DCHECK_EQ(compile_app_image_, IsInBootImage(m)) << "Trampolines should be in boot image"; if (!IsInBootImage(m)) { - AssignMethodOffset(m, kNativeObjectRelocationTypeArtMethodClean, GetDefaultOatIndex()); + AssignMethodOffset(m, kNativeObjectRelocationTypeRuntimeMethod, GetDefaultOatIndex()); } } + + // Clear any pre-existing monitors which may have been in the monitor words, assign bin slots. + heap->VisitObjects(WalkFieldsCallback, this); + // Calculate size of the dex cache arrays slot and prepare offsets. PrepareDexCacheArraySlots(); @@ -1346,15 +1378,22 @@ void ImageWriter::CalculateNewObjectOffsets() { for (ImageInfo& image_info : image_infos_) { size_t bin_offset = image_objects_offset_begin_; for (size_t i = 0; i != kBinSize; ++i) { + switch (i) { + case kBinArtMethodClean: + case kBinArtMethodDirty: { + bin_offset = RoundUp(bin_offset, method_alignment); + break; + } + case kBinIMTConflictTable: { + bin_offset = RoundUp(bin_offset, target_ptr_size_); + break; + } + default: { + // Normal alignment. + } + } image_info.bin_slot_offsets_[i] = bin_offset; bin_offset += image_info.bin_slot_sizes_[i]; - if (i == kBinArtField) { - static_assert(kBinArtField + 1 == kBinArtMethodClean, "Methods follow fields."); - static_assert(alignof(ArtField) == 4u, "ArtField alignment is 4."); - DCHECK_ALIGNED(bin_offset, 4u); - DCHECK(method_alignment == 4u || method_alignment == 8u); - bin_offset = RoundUp(bin_offset, method_alignment); - } } // NOTE: There may be additional padding between the bin slots and the intern table. DCHECK_EQ(image_info.image_end_, @@ -1367,9 +1406,7 @@ void ImageWriter::CalculateNewObjectOffsets() { image_info.image_begin_ = global_image_begin_ + image_offset; image_info.image_offset_ = image_offset; ImageSection unused_sections[ImageHeader::kSectionCount]; - image_info.image_size_ = RoundUp( - image_info.CreateImageSections(target_ptr_size_, unused_sections), - kPageSize); + image_info.image_size_ = RoundUp(image_info.CreateImageSections(unused_sections), kPageSize); // There should be no gaps until the next image. image_offset += image_info.image_size_; } @@ -1396,42 +1433,52 @@ void ImageWriter::CalculateNewObjectOffsets() { // Note that image_info.image_end_ is left at end of used mirror object section. } -size_t ImageWriter::ImageInfo::CreateImageSections(size_t target_ptr_size, - ImageSection* out_sections) const { +size_t ImageWriter::ImageInfo::CreateImageSections(ImageSection* out_sections) const { DCHECK(out_sections != nullptr); + + // Do not round up any sections here that are represented by the bins since it will break + // offsets. + // Objects section - auto* objects_section = &out_sections[ImageHeader::kSectionObjects]; + ImageSection* objects_section = &out_sections[ImageHeader::kSectionObjects]; *objects_section = ImageSection(0u, image_end_); - size_t cur_pos = objects_section->End(); + // Add field section. - auto* field_section = &out_sections[ImageHeader::kSectionArtFields]; - *field_section = ImageSection(cur_pos, bin_slot_sizes_[kBinArtField]); + ImageSection* field_section = &out_sections[ImageHeader::kSectionArtFields]; + *field_section = ImageSection(bin_slot_offsets_[kBinArtField], bin_slot_sizes_[kBinArtField]); CHECK_EQ(bin_slot_offsets_[kBinArtField], field_section->Offset()); - cur_pos = field_section->End(); - // Round up to the alignment the required by the method section. - cur_pos = RoundUp(cur_pos, ArtMethod::Alignment(target_ptr_size)); + // Add method section. - auto* methods_section = &out_sections[ImageHeader::kSectionArtMethods]; - *methods_section = ImageSection(cur_pos, - bin_slot_sizes_[kBinArtMethodClean] + - bin_slot_sizes_[kBinArtMethodDirty]); - CHECK_EQ(bin_slot_offsets_[kBinArtMethodClean], methods_section->Offset()); - cur_pos = methods_section->End(); + ImageSection* methods_section = &out_sections[ImageHeader::kSectionArtMethods]; + *methods_section = ImageSection( + bin_slot_offsets_[kBinArtMethodClean], + bin_slot_sizes_[kBinArtMethodClean] + bin_slot_sizes_[kBinArtMethodDirty]); + + // Conflict tables section. + ImageSection* imt_conflict_tables_section = &out_sections[ImageHeader::kSectionIMTConflictTables]; + *imt_conflict_tables_section = ImageSection(bin_slot_offsets_[kBinIMTConflictTable], + bin_slot_sizes_[kBinIMTConflictTable]); + + // Runtime methods section. + ImageSection* runtime_methods_section = &out_sections[ImageHeader::kSectionRuntimeMethods]; + *runtime_methods_section = ImageSection(bin_slot_offsets_[kBinRuntimeMethod], + bin_slot_sizes_[kBinRuntimeMethod]); + // Add dex cache arrays section. - auto* dex_cache_arrays_section = &out_sections[ImageHeader::kSectionDexCacheArrays]; - *dex_cache_arrays_section = ImageSection(cur_pos, bin_slot_sizes_[kBinDexCacheArray]); - CHECK_EQ(bin_slot_offsets_[kBinDexCacheArray], dex_cache_arrays_section->Offset()); - cur_pos = dex_cache_arrays_section->End(); + ImageSection* dex_cache_arrays_section = &out_sections[ImageHeader::kSectionDexCacheArrays]; + *dex_cache_arrays_section = ImageSection(bin_slot_offsets_[kBinDexCacheArray], + bin_slot_sizes_[kBinDexCacheArray]); + // Round up to the alignment the string table expects. See HashSet::WriteToMemory. - cur_pos = RoundUp(cur_pos, sizeof(uint64_t)); + size_t cur_pos = RoundUp(dex_cache_arrays_section->End(), sizeof(uint64_t)); // Calculate the size of the interned strings. - auto* interned_strings_section = &out_sections[ImageHeader::kSectionInternedStrings]; + ImageSection* interned_strings_section = &out_sections[ImageHeader::kSectionInternedStrings]; *interned_strings_section = ImageSection(cur_pos, intern_table_bytes_); cur_pos = interned_strings_section->End(); // Round up to the alignment the class table expects. See HashSet::WriteToMemory. cur_pos = RoundUp(cur_pos, sizeof(uint64_t)); // Calculate the size of the class table section. - auto* class_table_section = &out_sections[ImageHeader::kSectionClassTable]; + ImageSection* class_table_section = &out_sections[ImageHeader::kSectionClassTable]; *class_table_section = ImageSection(cur_pos, class_table_bytes_); cur_pos = class_table_section->End(); // Image end goes right before the start of the image bitmap. @@ -1446,7 +1493,7 @@ void ImageWriter::CreateHeader(size_t oat_index) { // Create the image sections. ImageSection sections[ImageHeader::kSectionCount]; - const size_t image_end = image_info.CreateImageSections(target_ptr_size_, sections); + const size_t image_end = image_info.CreateImageSections(sections); // Finally bitmap section. const size_t bitmap_bytes = image_info.image_bitmap_->Size(); @@ -1531,8 +1578,20 @@ class FixupRootVisitor : public RootVisitor { ImageWriter* const image_writer_; }; +void ImageWriter::CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy) { + const size_t count = orig->NumEntries(target_ptr_size_); + for (size_t i = 0; i < count; ++i) { + ArtMethod* interface_method = orig->GetInterfaceMethod(i, target_ptr_size_); + ArtMethod* implementation_method = orig->GetImplementationMethod(i, target_ptr_size_); + copy->SetInterfaceMethod(i, target_ptr_size_, NativeLocationInImage(interface_method)); + copy->SetImplementationMethod(i, + target_ptr_size_, + NativeLocationInImage(implementation_method)); + } +} + void ImageWriter::CopyAndFixupNativeData(size_t oat_index) { - ImageInfo& image_info = GetImageInfo(oat_index); + const ImageInfo& image_info = GetImageInfo(oat_index); // Copy ArtFields and methods to their locations and update the array for convenience. for (auto& pair : native_object_relocations_) { NativeObjectRelocation& relocation = pair.second; @@ -1550,6 +1609,7 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) { GetImageAddress(reinterpret_cast(pair.first)->GetDeclaringClass())); break; } + case kNativeObjectRelocationTypeRuntimeMethod: case kNativeObjectRelocationTypeArtMethodClean: case kNativeObjectRelocationTypeArtMethodDirty: { CopyAndFixupMethod(reinterpret_cast(pair.first), @@ -1575,26 +1635,22 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) { case kNativeObjectRelocationTypeDexCacheArray: // Nothing to copy here, everything is done in FixupDexCache(). break; + case kNativeObjectRelocationTypeIMTConflictTable: { + auto* orig_table = reinterpret_cast(pair.first); + CopyAndFixupImtConflictTable( + orig_table, + new(dest)ImtConflictTable(orig_table->NumEntries(target_ptr_size_), target_ptr_size_)); + break; + } } } // Fixup the image method roots. auto* image_header = reinterpret_cast(image_info.image_->Begin()); - const ImageSection& methods_section = image_header->GetMethodsSection(); for (size_t i = 0; i < ImageHeader::kImageMethodsCount; ++i) { ArtMethod* method = image_methods_[i]; CHECK(method != nullptr); - // Only place runtime methods in the image of the default oat file. - if (method->IsRuntimeMethod() && oat_index != GetDefaultOatIndex()) { - continue; - } if (!IsInBootImage(method)) { - auto it = native_object_relocations_.find(method); - CHECK(it != native_object_relocations_.end()) << "No forwarding for " << PrettyMethod(method); - NativeObjectRelocation& relocation = it->second; - CHECK(methods_section.Contains(relocation.offset)) << relocation.offset << " not in " - << methods_section; - CHECK(relocation.IsArtMethodRelocation()) << relocation.type; - method = reinterpret_cast(global_image_begin_ + it->second.offset); + method = NativeLocationInImage(method); } image_header->SetImageMethod(static_cast(i), method); } @@ -2057,24 +2113,28 @@ void ImageWriter::CopyAndFixupMethod(ArtMethod* orig, // The resolution method has a special trampoline to call. Runtime* runtime = Runtime::Current(); - if (UNLIKELY(orig == runtime->GetResolutionMethod())) { - copy->SetEntryPointFromQuickCompiledCodePtrSize( - GetOatAddress(kOatAddressQuickResolutionTrampoline), target_ptr_size_); - } else if (UNLIKELY(orig == runtime->GetImtConflictMethod() || - orig == runtime->GetImtUnimplementedMethod())) { - copy->SetEntryPointFromQuickCompiledCodePtrSize( - GetOatAddress(kOatAddressQuickIMTConflictTrampoline), target_ptr_size_); - } else if (UNLIKELY(orig->IsRuntimeMethod())) { - bool found_one = false; - for (size_t i = 0; i < static_cast(Runtime::kLastCalleeSaveType); ++i) { - auto idx = static_cast(i); - if (runtime->HasCalleeSaveMethod(idx) && runtime->GetCalleeSaveMethod(idx) == orig) { - found_one = true; - break; + if (orig->IsRuntimeMethod()) { + ImtConflictTable* orig_table = orig->GetImtConflictTable(target_ptr_size_); + if (orig_table != nullptr) { + // Special IMT conflict method, normal IMT conflict method or unimplemented IMT method. + copy->SetEntryPointFromQuickCompiledCodePtrSize( + GetOatAddress(kOatAddressQuickIMTConflictTrampoline), target_ptr_size_); + copy->SetImtConflictTable(NativeLocationInImage(orig_table), target_ptr_size_); + } else if (UNLIKELY(orig == runtime->GetResolutionMethod())) { + copy->SetEntryPointFromQuickCompiledCodePtrSize( + GetOatAddress(kOatAddressQuickResolutionTrampoline), target_ptr_size_); + } else { + bool found_one = false; + for (size_t i = 0; i < static_cast(Runtime::kLastCalleeSaveType); ++i) { + auto idx = static_cast(i); + if (runtime->HasCalleeSaveMethod(idx) && runtime->GetCalleeSaveMethod(idx) == orig) { + found_one = true; + break; + } } + CHECK(found_one) << "Expected to find callee save method but got " << PrettyMethod(orig); + CHECK(copy->IsRuntimeMethod()); } - CHECK(found_one) << "Expected to find callee save method but got " << PrettyMethod(orig); - CHECK(copy->IsRuntimeMethod()); } else { // We assume all methods have code. If they don't currently then we set them to the use the // resolution trampoline. Abstract methods never have code and so we need to make sure their @@ -2141,6 +2201,10 @@ ImageWriter::Bin ImageWriter::BinTypeForNativeRelocationType(NativeObjectRelocat return kBinArtMethodDirty; case kNativeObjectRelocationTypeDexCacheArray: return kBinDexCacheArray; + case kNativeObjectRelocationTypeRuntimeMethod: + return kBinRuntimeMethod; + case kNativeObjectRelocationTypeIMTConflictTable: + return kBinIMTConflictTable; } UNREACHABLE(); } @@ -2242,7 +2306,6 @@ ImageWriter::ImageWriter( compile_app_image_(compile_app_image), target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())), image_infos_(oat_filenames.size()), - image_method_array_(ImageHeader::kImageMethodsCount), dirty_methods_(0u), clean_methods_(0u), image_storage_mode_(image_storage_mode), diff --git a/compiler/image_writer.h b/compiler/image_writer.h index 0cb6aea9b..51976c511 100644 --- a/compiler/image_writer.h +++ b/compiler/image_writer.h @@ -169,6 +169,10 @@ class ImageWriter FINAL { // ArtMethods may be dirty if the class has native methods or a declaring class that isn't // initialized. kBinArtMethodDirty, + // Conflict tables (clean). + kBinIMTConflictTable, + // Runtime methods (always clean, do not have a length prefix array). + kBinRuntimeMethod, // Dex cache arrays have a special slot for PC-relative addressing. Since they are // huge, and as such their dirtiness is not important for the clean/dirty separation, // we arbitrarily keep them at the end of the native data. @@ -186,6 +190,8 @@ class ImageWriter FINAL { kNativeObjectRelocationTypeArtMethodArrayClean, kNativeObjectRelocationTypeArtMethodDirty, kNativeObjectRelocationTypeArtMethodArrayDirty, + kNativeObjectRelocationTypeRuntimeMethod, + kNativeObjectRelocationTypeIMTConflictTable, kNativeObjectRelocationTypeDexCacheArray, }; friend std::ostream& operator<<(std::ostream& stream, const NativeObjectRelocationType& type); @@ -240,7 +246,7 @@ class ImageWriter FINAL { // Create the image sections into the out sections variable, returns the size of the image // excluding the bitmap. - size_t CreateImageSections(size_t target_ptr_size, ImageSection* out_sections) const; + size_t CreateImageSections(ImageSection* out_sections) const; std::unique_ptr image_; // Memory mapped for generating the image. @@ -395,6 +401,8 @@ class ImageWriter FINAL { void CopyAndFixupObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info) SHARED_REQUIRES(Locks::mutator_lock_); + void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy) + SHARED_REQUIRES(Locks::mutator_lock_); void FixupClass(mirror::Class* orig, mirror::Class* copy) SHARED_REQUIRES(Locks::mutator_lock_); void FixupObject(mirror::Object* orig, mirror::Object* copy) @@ -425,6 +433,11 @@ class ImageWriter FINAL { size_t oat_index) SHARED_REQUIRES(Locks::mutator_lock_); + // Assign the offset for an IMT conflict table. Does nothing if the table already has a native + // relocation. + void TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index) + SHARED_REQUIRES(Locks::mutator_lock_); + // Return true if klass is loaded by the boot class loader but not in the boot image. bool IsBootClassLoaderNonImageClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); @@ -481,6 +494,9 @@ class ImageWriter FINAL { // remove duplicates in the multi image and app image case. mirror::String* FindInternedString(mirror::String* string) SHARED_REQUIRES(Locks::mutator_lock_); + // Return true if there already exists a native allocation for an object. + bool NativeRelocationAssigned(void* ptr) const; + const CompilerDriver& compiler_driver_; // Beginning target image address for the first image. @@ -517,16 +533,14 @@ class ImageWriter FINAL { bool IsArtMethodRelocation() const { return type == kNativeObjectRelocationTypeArtMethodClean || - type == kNativeObjectRelocationTypeArtMethodDirty; + type == kNativeObjectRelocationTypeArtMethodDirty || + type == kNativeObjectRelocationTypeRuntimeMethod; } }; std::unordered_map native_object_relocations_; // Runtime ArtMethods which aren't reachable from any Class but need to be copied into the image. ArtMethod* image_methods_[ImageHeader::kImageMethodsCount]; - // Fake length prefixed array for image methods. This array does not contain the actual - // ArtMethods. We only use it for the header and relocation addresses. - LengthPrefixedArray image_method_array_; // Counters for measurements, used for logging only. uint64_t dirty_methods_; diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc index 5de9842d9..178533849 100644 --- a/compiler/jit/jit_compiler.cc +++ b/compiler/jit/jit_compiler.cc @@ -88,7 +88,7 @@ NO_RETURN static void Usage(const char* fmt, ...) { JitCompiler::JitCompiler() { compiler_options_.reset(new CompilerOptions( - CompilerOptions::kDefaultCompilerFilter, + CompilerFilter::kDefaultCompilerFilter, CompilerOptions::kDefaultHugeMethodThreshold, CompilerOptions::kDefaultLargeMethodThreshold, CompilerOptions::kDefaultSmallMethodThreshold, @@ -155,7 +155,8 @@ JitCompiler::JitCompiler() { Compiler::kOptimizing, instruction_set, instruction_set_features_.get(), - /* image */ false, + /* boot_image */ false, + /* app_image */ false, /* image_classes */ nullptr, /* compiled_classes */ nullptr, /* compiled_methods */ nullptr, diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc index cf836a9c9..c4c2399cc 100644 --- a/compiler/jni/jni_compiler_test.cc +++ b/compiler/jni/jni_compiler_test.cc @@ -31,6 +31,7 @@ #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" #include "mirror/stack_trace_element.h" +#include "nativeloader/native_loader.h" #include "runtime.h" #include "ScopedLocalRef.h" #include "scoped_thread_state_change.h" @@ -53,6 +54,11 @@ class JniCompilerTest : public CommonCompilerTest { check_generic_jni_ = false; } + void TearDown() OVERRIDE { + android::ResetNativeLoader(); + CommonCompilerTest::TearDown(); + } + void SetCheckGenericJni(bool generic) { check_generic_jni_ = generic; } @@ -92,11 +98,13 @@ class JniCompilerTest : public CommonCompilerTest { CompileForTest(class_loader_, direct, method_name, method_sig); // Start runtime. Thread::Current()->TransitionFromSuspendedToRunnable(); + android::InitializeNativeLoader(); bool started = runtime_->Start(); CHECK(started); } // JNI operations after runtime start. env_ = Thread::Current()->GetJniEnv(); + library_search_path_ = env_->NewStringUTF(""); jklass_ = env_->FindClass("MyClassNatives"); ASSERT_TRUE(jklass_ != nullptr) << method_name << " " << method_sig; @@ -168,6 +176,7 @@ class JniCompilerTest : public CommonCompilerTest { void StackArgsSignExtendedMips64Impl(); JNIEnv* env_; + jstring library_search_path_; jmethodID jmethod_; bool check_generic_jni_; }; @@ -220,7 +229,7 @@ void JniCompilerTest::CompileAndRunIntMethodThroughStubImpl() { std::string reason; ASSERT_TRUE(Runtime::Current()->GetJavaVM()-> - LoadNativeLibrary(env_, "", class_loader_, nullptr, &reason)) + LoadNativeLibrary(env_, "", class_loader_, library_search_path_, &reason)) << reason; jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 24); @@ -235,7 +244,7 @@ void JniCompilerTest::CompileAndRunStaticIntMethodThroughStubImpl() { std::string reason; ASSERT_TRUE(Runtime::Current()->GetJavaVM()-> - LoadNativeLibrary(env_, "", class_loader_, nullptr, &reason)) + LoadNativeLibrary(env_, "", class_loader_, library_search_path_, &reason)) << reason; jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 42); diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h index c07de7998..ec69107d8 100644 --- a/compiler/linker/relative_patcher_test.h +++ b/compiler/linker/relative_patcher_test.h @@ -51,6 +51,7 @@ class RelativePatcherTest : public testing::Test { instruction_set, /* instruction_set_features*/ nullptr, /* boot_image */ false, + /* app_image */ false, /* image_classes */ nullptr, /* compiled_classes */ nullptr, /* compiled_methods */ nullptr, diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc index 73b16d5b4..5b192846b 100644 --- a/compiler/oat_test.cc +++ b/compiler/oat_test.cc @@ -112,6 +112,7 @@ class OatTest : public CommonCompilerTest { insn_set, insn_features_.get(), /* boot_image */ false, + /* app_image */ false, /* image_classes */ nullptr, /* compiled_classes */ nullptr, /* compiled_methods */ nullptr, diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc index e804beef0..8da9f06dd 100644 --- a/compiler/oat_writer.cc +++ b/compiler/oat_writer.cc @@ -1127,17 +1127,23 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { return target_offset; } - mirror::Class* GetTargetType(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) { - mirror::DexCache* dex_cache = (dex_file_ == patch.TargetTypeDexFile()) + mirror::DexCache* GetDexCache(const DexFile* target_dex_file) + SHARED_REQUIRES(Locks::mutator_lock_) { + return (target_dex_file == dex_file_) ? dex_cache_ - : class_linker_->FindDexCache(Thread::Current(), *patch.TargetTypeDexFile()); + : class_linker_->FindDexCache(Thread::Current(), *target_dex_file); + } + + mirror::Class* GetTargetType(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) { + mirror::DexCache* dex_cache = GetDexCache(patch.TargetTypeDexFile()); mirror::Class* type = dex_cache->GetResolvedType(patch.TargetTypeIndex()); CHECK(type != nullptr); return type; } mirror::String* GetTargetString(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) { - mirror::String* string = dex_cache_->GetResolvedString(patch.TargetStringIndex()); + mirror::DexCache* dex_cache = GetDexCache(patch.TargetStringDexFile()); + mirror::String* string = dex_cache->GetResolvedString(patch.TargetStringIndex()); DCHECK(string != nullptr); DCHECK(writer_->HasBootImage() || Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(string)); diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc index 659c6f849..6c6e5af0b 100644 --- a/compiler/optimizing/bounds_check_elimination.cc +++ b/compiler/optimizing/bounds_check_elimination.cc @@ -552,7 +552,11 @@ class BCEVisitor : public HGraphVisitor { DCHECK(!IsAddedBlock(block)); first_index_bounds_check_map_.clear(); HGraphVisitor::VisitBasicBlock(block); - AddComparesWithDeoptimization(block); + // We should never deoptimize from an osr method, otherwise we might wrongly optimize + // code dominated by the deoptimization. + if (!GetGraph()->IsCompilingOsr()) { + AddComparesWithDeoptimization(block); + } } void Finish() { @@ -796,6 +800,27 @@ class BCEVisitor : public HGraphVisitor { ValueRange(GetGraph()->GetArena(), ValueBound::Min(), new_upper); ApplyRangeFromComparison(left, block, false_successor, new_range); } + } else if (cond == kCondNE || cond == kCondEQ) { + if (left->IsArrayLength() && lower.IsConstant() && upper.IsConstant()) { + // Special case: + // length == [c,d] yields [c, d] along true + // length != [c,d] yields [c, d] along false + if (!lower.Equals(ValueBound::Min()) || !upper.Equals(ValueBound::Max())) { + ValueRange* new_range = new (GetGraph()->GetArena()) + ValueRange(GetGraph()->GetArena(), lower, upper); + ApplyRangeFromComparison( + left, block, cond == kCondEQ ? true_successor : false_successor, new_range); + } + // In addition: + // length == 0 yields [1, max] along false + // length != 0 yields [1, max] along true + if (lower.GetConstant() == 0 && upper.GetConstant() == 0) { + ValueRange* new_range = new (GetGraph()->GetArena()) + ValueRange(GetGraph()->GetArena(), ValueBound(nullptr, 1), ValueBound::Max()); + ApplyRangeFromComparison( + left, block, cond == kCondEQ ? false_successor : true_successor, new_range); + } + } } } @@ -951,13 +976,7 @@ class BCEVisitor : public HGraphVisitor { void VisitIf(HIf* instruction) OVERRIDE { if (instruction->InputAt(0)->IsCondition()) { HCondition* cond = instruction->InputAt(0)->AsCondition(); - IfCondition cmp = cond->GetCondition(); - if (cmp == kCondGT || cmp == kCondGE || - cmp == kCondLT || cmp == kCondLE) { - HInstruction* left = cond->GetLeft(); - HInstruction* right = cond->GetRight(); - HandleIf(instruction, left, right, cmp); - } + HandleIf(instruction, cond->GetLeft(), cond->GetRight(), cond->GetCondition()); } } @@ -1358,6 +1377,11 @@ class BCEVisitor : public HGraphVisitor { if (loop->IsIrreducible()) { return false; } + // We should never deoptimize from an osr method, otherwise we might wrongly optimize + // code dominated by the deoptimization. + if (GetGraph()->IsCompilingOsr()) { + return false; + } // A try boundary preheader is hard to handle. // TODO: remove this restriction. if (loop->GetPreHeader()->GetLastInstruction()->IsTryBoundary()) { diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index e7fa4e472..51fbaea51 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -50,6 +50,7 @@ #include "mirror/array-inl.h" #include "mirror/object_array-inl.h" #include "mirror/object_reference.h" +#include "mirror/string.h" #include "parallel_move_resolver.h" #include "ssa_liveness_analysis.h" #include "utils/assembler.h" @@ -139,6 +140,12 @@ size_t CodeGenerator::GetCachePointerOffset(uint32_t index) { return pointer_size * index; } +uint32_t CodeGenerator::GetArrayLengthOffset(HArrayLength* array_length) { + return array_length->IsStringLength() + ? mirror::String::CountOffset().Uint32Value() + : mirror::Array::LengthOffset().Uint32Value(); +} + bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const { DCHECK_EQ((*block_order_)[current_block_index_], current); return GetNextBlockToEmit() == FirstNonEmptyBlock(next); diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index d69c41055..6e75e3bb2 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -340,6 +340,11 @@ class CodeGenerator : public DeletableArenaObject { // Pointer variant for ArtMethod and ArtField arrays. size_t GetCachePointerOffset(uint32_t index); + // Helper that returns the offset of the array's length field. + // Note: Besides the normal arrays, we also use the HArrayLength for + // accessing the String's `count` field in String intrinsics. + static uint32_t GetArrayLengthOffset(HArrayLength* array_length); + void EmitParallelMoves(Location from1, Location to1, Primitive::Type type1, diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 45d23fe51..e0106628c 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -4742,7 +4742,7 @@ void LocationsBuilderARM::VisitArrayLength(HArrayLength* instruction) { void InstructionCodeGeneratorARM::VisitArrayLength(HArrayLength* instruction) { LocationSummary* locations = instruction->GetLocations(); - uint32_t offset = mirror::Array::LengthOffset().Uint32Value(); + uint32_t offset = CodeGenerator::GetArrayLengthOffset(instruction); Register obj = locations->InAt(0).AsRegister(); Register out = locations->Out().AsRegister(); __ LoadFromOffset(kLoadWord, out, obj, offset); @@ -5183,10 +5183,10 @@ HLoadString::LoadKind CodeGeneratorARM::GetSupportedLoadStringKind( case HLoadString::LoadKind::kBootImageAddress: break; case HLoadString::LoadKind::kDexCacheAddress: - DCHECK(Runtime::Current()->UseJit()); + DCHECK(Runtime::Current()->UseJitCompilation()); break; case HLoadString::LoadKind::kDexCachePcRelative: - DCHECK(!Runtime::Current()->UseJit()); + DCHECK(!Runtime::Current()->UseJitCompilation()); // We disable pc-relative load when there is an irreducible loop, as the optimization // is incompatible with it. // TODO: Create as many ArmDexCacheArraysBase instructions as needed for methods diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index e8e6b6897..261c04f06 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -2118,9 +2118,9 @@ void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) { } void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction) { + uint32_t offset = CodeGenerator::GetArrayLengthOffset(instruction); BlockPoolsScope block_pools(GetVIXLAssembler()); - __ Ldr(OutputRegister(instruction), - HeapOperand(InputRegisterAt(instruction, 0), mirror::Array::LengthOffset())); + __ Ldr(OutputRegister(instruction), HeapOperand(InputRegisterAt(instruction, 0), offset)); codegen_->MaybeRecordImplicitNullCheck(instruction); } @@ -4010,10 +4010,10 @@ HLoadString::LoadKind CodeGeneratorARM64::GetSupportedLoadStringKind( case HLoadString::LoadKind::kBootImageAddress: break; case HLoadString::LoadKind::kDexCacheAddress: - DCHECK(Runtime::Current()->UseJit()); + DCHECK(Runtime::Current()->UseJitCompilation()); break; case HLoadString::LoadKind::kDexCachePcRelative: - DCHECK(!Runtime::Current()->UseJit()); + DCHECK(!Runtime::Current()->UseJitCompilation()); break; case HLoadString::LoadKind::kDexCacheViaMethod: break; diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index 12d1164d0..fb50680c9 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -1803,7 +1803,7 @@ void LocationsBuilderMIPS::VisitArrayLength(HArrayLength* instruction) { void InstructionCodeGeneratorMIPS::VisitArrayLength(HArrayLength* instruction) { LocationSummary* locations = instruction->GetLocations(); - uint32_t offset = mirror::Array::LengthOffset().Uint32Value(); + uint32_t offset = CodeGenerator::GetArrayLengthOffset(instruction); Register obj = locations->InAt(0).AsRegister(); Register out = locations->Out().AsRegister(); __ LoadFromOffset(kLoadWord, out, obj, offset); diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 56ac38ef8..e67d8d0dc 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -1426,7 +1426,7 @@ void LocationsBuilderMIPS64::VisitArrayLength(HArrayLength* instruction) { void InstructionCodeGeneratorMIPS64::VisitArrayLength(HArrayLength* instruction) { LocationSummary* locations = instruction->GetLocations(); - uint32_t offset = mirror::Array::LengthOffset().Uint32Value(); + uint32_t offset = CodeGenerator::GetArrayLengthOffset(instruction); GpuRegister obj = locations->InAt(0).AsRegister(); GpuRegister out = locations->Out().AsRegister(); __ LoadFromOffset(kLoadWord, out, obj, offset); diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index e73e88030..50892a9d4 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -3303,17 +3303,6 @@ void InstructionCodeGeneratorX86::GenerateDivRemWithAnyConstant(HBinaryOperation int shift; CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift); - NearLabel ndiv; - NearLabel end; - // If numerator is 0, the result is 0, no computation needed. - __ testl(eax, eax); - __ j(kNotEqual, &ndiv); - - __ xorl(out, out); - __ jmp(&end); - - __ Bind(&ndiv); - // Save the numerator. __ movl(num, eax); @@ -3348,7 +3337,6 @@ void InstructionCodeGeneratorX86::GenerateDivRemWithAnyConstant(HBinaryOperation } else { __ movl(eax, edx); } - __ Bind(&end); } void InstructionCodeGeneratorX86::GenerateDivRemIntegral(HBinaryOperation* instruction) { @@ -5492,7 +5480,7 @@ void LocationsBuilderX86::VisitArrayLength(HArrayLength* instruction) { void InstructionCodeGeneratorX86::VisitArrayLength(HArrayLength* instruction) { LocationSummary* locations = instruction->GetLocations(); - uint32_t offset = mirror::Array::LengthOffset().Uint32Value(); + uint32_t offset = CodeGenerator::GetArrayLengthOffset(instruction); Register obj = locations->InAt(0).AsRegister(); Register out = locations->Out().AsRegister(); __ movl(out, Address(obj, offset)); @@ -5977,7 +5965,7 @@ HLoadString::LoadKind CodeGeneratorX86::GetSupportedLoadStringKind( DCHECK(GetCompilerOptions().GetCompilePic()); FALLTHROUGH_INTENDED; case HLoadString::LoadKind::kDexCachePcRelative: - DCHECK(!Runtime::Current()->UseJit()); // Note: boot image is also non-JIT. + DCHECK(!Runtime::Current()->UseJitCompilation()); // Note: boot image is also non-JIT. // We disable pc-relative load when there is an irreducible loop, as the optimization // is incompatible with it. // TODO: Create as many X86ComputeBaseMethodAddress instructions as needed for methods @@ -5989,7 +5977,7 @@ HLoadString::LoadKind CodeGeneratorX86::GetSupportedLoadStringKind( case HLoadString::LoadKind::kBootImageAddress: break; case HLoadString::LoadKind::kDexCacheAddress: - DCHECK(Runtime::Current()->UseJit()); + DCHECK(Runtime::Current()->UseJitCompilation()); break; case HLoadString::LoadKind::kDexCacheViaMethod: break; diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 5576d839c..56c5b0694 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -3390,16 +3390,6 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemWithAnyConstant(HBinaryOperat __ movl(numerator, eax); - NearLabel no_div; - NearLabel end; - __ testl(eax, eax); - __ j(kNotEqual, &no_div); - - __ xorl(out, out); - __ jmp(&end); - - __ Bind(&no_div); - __ movl(eax, Immediate(magic)); __ imull(numerator); @@ -3425,7 +3415,6 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemWithAnyConstant(HBinaryOperat } else { __ movl(eax, edx); } - __ Bind(&end); } else { int64_t imm = second.GetConstant()->AsLongConstant()->GetValue(); @@ -4967,7 +4956,7 @@ void LocationsBuilderX86_64::VisitArrayLength(HArrayLength* instruction) { void InstructionCodeGeneratorX86_64::VisitArrayLength(HArrayLength* instruction) { LocationSummary* locations = instruction->GetLocations(); - uint32_t offset = mirror::Array::LengthOffset().Uint32Value(); + uint32_t offset = CodeGenerator::GetArrayLengthOffset(instruction); CpuRegister obj = locations->InAt(0).AsRegister(); CpuRegister out = locations->Out().AsRegister(); __ movl(out, Address(obj, offset)); @@ -5413,10 +5402,10 @@ HLoadString::LoadKind CodeGeneratorX86_64::GetSupportedLoadStringKind( case HLoadString::LoadKind::kBootImageAddress: break; case HLoadString::LoadKind::kDexCacheAddress: - DCHECK(Runtime::Current()->UseJit()); + DCHECK(Runtime::Current()->UseJitCompilation()); break; case HLoadString::LoadKind::kDexCachePcRelative: - DCHECK(!Runtime::Current()->UseJit()); + DCHECK(!Runtime::Current()->UseJitCompilation()); break; case HLoadString::LoadKind::kDexCacheViaMethod: break; diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc index 5f1102499..49cfff46d 100644 --- a/compiler/optimizing/dead_code_elimination.cc +++ b/compiler/optimizing/dead_code_elimination.cc @@ -23,7 +23,7 @@ namespace art { static void MarkReachableBlocks(HGraph* graph, ArenaBitVector* visited) { - ArenaVector worklist(graph->GetArena()->Adapter()); + ArenaVector worklist(graph->GetArena()->Adapter(kArenaAllocDCE)); constexpr size_t kDefaultWorlistSize = 8; worklist.reserve(kDefaultWorlistSize); visited->SetBit(graph->GetEntryBlock()->GetBlockId()); diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc index 96837a826..968e26724 100644 --- a/compiler/optimizing/graph_checker.cc +++ b/compiler/optimizing/graph_checker.cc @@ -258,6 +258,15 @@ void GraphChecker::VisitBoundsCheck(HBoundsCheck* check) { VisitInstruction(check); } +void GraphChecker::VisitDeoptimize(HDeoptimize* deopt) { + if (GetGraph()->IsCompilingOsr()) { + AddError(StringPrintf("A graph compiled OSR cannot have a HDeoptimize instruction")); + } + + // Perform the instruction base checks too. + VisitInstruction(deopt); +} + void GraphChecker::VisitTryBoundary(HTryBoundary* try_boundary) { ArrayRef handlers = try_boundary->GetExceptionHandlers(); diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h index 83b198474..3060c8007 100644 --- a/compiler/optimizing/graph_checker.h +++ b/compiler/optimizing/graph_checker.h @@ -57,6 +57,7 @@ class GraphChecker : public HGraphDelegateVisitor { void VisitCheckCast(HCheckCast* check) OVERRIDE; void VisitCondition(HCondition* op) OVERRIDE; void VisitConstant(HConstant* instruction) OVERRIDE; + void VisitDeoptimize(HDeoptimize* instruction) OVERRIDE; void VisitIf(HIf* instruction) OVERRIDE; void VisitInstanceOf(HInstanceOf* check) OVERRIDE; void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE; diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc index 9efc13f61..6aec46354 100644 --- a/compiler/optimizing/graph_visualizer.cc +++ b/compiler/optimizing/graph_visualizer.cc @@ -98,7 +98,9 @@ typedef Disassembler* create_disasm_prototype(InstructionSet instruction_set, DisassemblerOptions* options); class HGraphVisualizerDisassembler { public: - HGraphVisualizerDisassembler(InstructionSet instruction_set, const uint8_t* base_address) + HGraphVisualizerDisassembler(InstructionSet instruction_set, + const uint8_t* base_address, + const uint8_t* end_address) : instruction_set_(instruction_set), disassembler_(nullptr) { libart_disassembler_handle_ = dlopen(kIsDebugBuild ? "libartd-disassembler.so" : "libart-disassembler.so", RTLD_NOW); @@ -119,6 +121,7 @@ class HGraphVisualizerDisassembler { instruction_set, new DisassemblerOptions(/* absolute_addresses */ false, base_address, + end_address, /* can_read_literals */ true))); } @@ -174,7 +177,9 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { disassembler_(disasm_info_ != nullptr ? new HGraphVisualizerDisassembler( codegen_.GetInstructionSet(), - codegen_.GetAssembler().CodeBufferBaseAddress()) + codegen_.GetAssembler().CodeBufferBaseAddress(), + codegen_.GetAssembler().CodeBufferBaseAddress() + + codegen_.GetAssembler().CodeSize()) : nullptr), indent_(0) {} @@ -389,6 +394,11 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { << instance_of->MustDoNullCheck() << std::noboolalpha; } + void VisitArrayLength(HArrayLength* array_length) OVERRIDE { + StartAttributeStream("is_string_length") << std::boolalpha + << array_length->IsStringLength() << std::noboolalpha; + } + void VisitArraySet(HArraySet* array_set) OVERRIDE { StartAttributeStream("value_can_be_null") << std::boolalpha << array_set->GetValueCanBeNull() << std::noboolalpha; @@ -544,26 +554,19 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { } } - if (IsPass(LICM::kLoopInvariantCodeMotionPassName) - || IsPass(HDeadCodeElimination::kFinalDeadCodeEliminationPassName) - || IsPass(HDeadCodeElimination::kInitialDeadCodeEliminationPassName) - || IsPass(BoundsCheckElimination::kBoundsCheckEliminationPassName) - || IsPass(RegisterAllocator::kRegisterAllocatorPassName) - || IsPass(HGraphBuilder::kBuilderPassName)) { - HLoopInformation* info = instruction->GetBlock()->GetLoopInformation(); - if (info == nullptr) { - StartAttributeStream("loop") << "none"; + HLoopInformation* loop_info = instruction->GetBlock()->GetLoopInformation(); + if (loop_info == nullptr) { + StartAttributeStream("loop") << "none"; + } else { + StartAttributeStream("loop") << "B" << loop_info->GetHeader()->GetBlockId(); + HLoopInformation* outer = loop_info->GetPreHeader()->GetLoopInformation(); + if (outer != nullptr) { + StartAttributeStream("outer_loop") << "B" << outer->GetHeader()->GetBlockId(); } else { - StartAttributeStream("loop") << "B" << info->GetHeader()->GetBlockId(); - HLoopInformation* outer = info->GetPreHeader()->GetLoopInformation(); - if (outer != nullptr) { - StartAttributeStream("outer_loop") << "B" << outer->GetHeader()->GetBlockId(); - } else { - StartAttributeStream("outer_loop") << "none"; - } - StartAttributeStream("irreducible") - << std::boolalpha << info->IsIrreducible() << std::noboolalpha; + StartAttributeStream("outer_loop") << "none"; } + StartAttributeStream("irreducible") + << std::boolalpha << loop_info->IsIrreducible() << std::noboolalpha; } if ((IsPass(HGraphBuilder::kBuilderPassName) diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc index d0d52bf6c..1e86b7507 100644 --- a/compiler/optimizing/gvn.cc +++ b/compiler/optimizing/gvn.cc @@ -454,11 +454,16 @@ void GlobalValueNumberer::VisitBasicBlock(HBasicBlock* block) { if (!set->IsEmpty()) { if (block->IsLoopHeader()) { - if (block->GetLoopInformation()->IsIrreducible()) { + if (block->GetLoopInformation()->ContainsIrreducibleLoop()) { // To satisfy our linear scan algorithm, no instruction should flow in an irreducible - // loop header. + // loop header. We clear the set at entry of irreducible loops and any loop containing + // an irreducible loop, as in both cases, GVN can extend the liveness of an instruction + // across the irreducible loop. + // Note that, if we're not compiling OSR, we could still do GVN and introduce + // phis at irreducible loop headers. We decided it was not worth the complexity. set->Clear(); } else { + DCHECK(!block->GetLoopInformation()->IsIrreducible()); DCHECK_EQ(block->GetDominator(), block->GetLoopInformation()->GetPreHeader()); set->Kill(side_effects_.GetLoopEffects(block)); } diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index ff4b9a765..59de89518 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -308,7 +308,7 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) { // Check if we can use an inline cache. ArtMethod* caller = graph_->GetArtMethod(); - if (Runtime::Current()->UseJit()) { + if (Runtime::Current()->UseJitCompilation()) { // Under JIT, we should always know the caller. DCHECK(caller != nullptr); ScopedProfilingInfoInlineUse spiis(caller, soa.Self()); @@ -322,7 +322,13 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) { return false; } else if (ic.IsMonomorphic()) { MaybeRecordStat(kMonomorphicCall); - return TryInlineMonomorphicCall(invoke_instruction, resolved_method, ic); + if (outermost_graph_->IsCompilingOsr()) { + // If we are compiling OSR, we pretend this call is polymorphic, as we may come from the + // interpreter and it may have seen different receiver types. + return TryInlinePolymorphicCall(invoke_instruction, resolved_method, ic); + } else { + return TryInlineMonomorphicCall(invoke_instruction, resolved_method, ic); + } } else if (ic.IsPolymorphic()) { MaybeRecordStat(kPolymorphicCall); return TryInlinePolymorphicCall(invoke_instruction, resolved_method, ic); @@ -510,6 +516,11 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction, bool deoptimize = all_targets_inlined && (i != InlineCache::kIndividualCacheSize - 1) && (ic.GetTypeAt(i + 1) == nullptr); + + if (outermost_graph_->IsCompilingOsr()) { + // We do not support HDeoptimize in OSR methods. + deoptimize = false; + } HInstruction* compare = AddTypeGuard( receiver, cursor, bb_cursor, class_index, is_referrer, invoke_instruction, deoptimize); if (deoptimize) { @@ -623,7 +634,7 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction, ArtMethod* resolved_method, const InlineCache& ic) { // This optimization only works under JIT for now. - DCHECK(Runtime::Current()->UseJit()); + DCHECK(Runtime::Current()->UseJitCompilation()); if (graph_->GetInstructionSet() == kMips64) { // TODO: Support HClassTableGet for mips64. return false; @@ -672,7 +683,8 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction, HInstruction* cursor = invoke_instruction->GetPrevious(); HBasicBlock* bb_cursor = invoke_instruction->GetBlock(); - if (!TryInlineAndReplace(invoke_instruction, actual_method, /* do_rtp */ false)) { + HInstruction* return_replacement = nullptr; + if (!TryBuildAndInline(invoke_instruction, actual_method, &return_replacement)) { return false; } @@ -701,9 +713,6 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction, } HNotEqual* compare = new (graph_->GetArena()) HNotEqual(class_table_get, constant); - HDeoptimize* deoptimize = new (graph_->GetArena()) HDeoptimize( - compare, invoke_instruction->GetDexPc()); - // TODO: Extend reference type propagation to understand the guard. if (cursor != nullptr) { bb_cursor->InsertInstructionAfter(receiver_class, cursor); } else { @@ -711,8 +720,20 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction, } bb_cursor->InsertInstructionAfter(class_table_get, receiver_class); bb_cursor->InsertInstructionAfter(compare, class_table_get); - bb_cursor->InsertInstructionAfter(deoptimize, compare); - deoptimize->CopyEnvironmentFrom(invoke_instruction->GetEnvironment()); + + if (outermost_graph_->IsCompilingOsr()) { + CreateDiamondPatternForPolymorphicInline(compare, return_replacement, invoke_instruction); + } else { + // TODO: Extend reference type propagation to understand the guard. + HDeoptimize* deoptimize = new (graph_->GetArena()) HDeoptimize( + compare, invoke_instruction->GetDexPc()); + bb_cursor->InsertInstructionAfter(deoptimize, compare); + deoptimize->CopyEnvironmentFrom(invoke_instruction->GetEnvironment()); + if (return_replacement != nullptr) { + invoke_instruction->ReplaceWith(return_replacement); + } + invoke_instruction->GetBlock()->RemoveInstruction(invoke_instruction); + } // Run type propagation to get the guard typed. ReferenceTypePropagation rtp_fixup(graph_, @@ -744,6 +765,12 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction, HInstruction** return_replacement) { const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile(); + if (method->IsProxyMethod()) { + VLOG(compiler) << "Method " << PrettyMethod(method) + << " is not inlined because of unimplemented inline support for proxy methods."; + return false; + } + // Check whether we're allowed to inline. The outermost compilation unit is the relevant // dex file here (though the transitivity of an inline chain would allow checking the calller). if (!compiler_driver_->MayInline(method->GetDexFile(), @@ -802,7 +829,7 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction, if (!method->GetDeclaringClass()->IsVerified()) { uint16_t class_def_idx = method->GetDeclaringClass()->GetDexClassDefIndex(); - if (Runtime::Current()->UseJit() || + if (Runtime::Current()->UseJitCompilation() || !compiler_driver_->IsMethodVerifiedWithoutFailures( method->GetDexMethodIndex(), class_def_idx, *method->GetDexFile())) { VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file) @@ -1265,6 +1292,8 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction, size_t HInliner::RunOptimizations(HGraph* callee_graph, const DexFile::CodeItem* code_item, const DexCompilationUnit& dex_compilation_unit) { + // Note: if the outermost_graph_ is being compiled OSR, we should not run any + // optimization that could lead to a HDeoptimize. The following optimizations do not. HDeadCodeElimination dce(callee_graph, stats_); HConstantFolding fold(callee_graph); HSharpening sharpening(callee_graph, codegen_, dex_compilation_unit, compiler_driver_); diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc index a834216d0..aaddc01f1 100644 --- a/compiler/optimizing/instruction_builder.cc +++ b/compiler/optimizing/instruction_builder.cc @@ -721,6 +721,11 @@ ArtMethod* HInstructionBuilder::ResolveMethod(uint16_t method_idx, InvokeType in DCHECK(Runtime::Current()->IsAotCompiler()); return nullptr; } + if (!methods_class->IsAssignableFrom(compiling_class.Get())) { + // We cannot statically determine the target method. The runtime will throw a + // NoSuchMethodError on this one. + return nullptr; + } ArtMethod* actual_method; if (methods_class->IsInterface()) { actual_method = methods_class->FindVirtualMethodForInterfaceSuper( diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc index d7b3856bf..fd79901ff 100644 --- a/compiler/optimizing/instruction_simplifier.cc +++ b/compiler/optimizing/instruction_simplifier.cc @@ -101,6 +101,7 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor { void SimplifyCompare(HInvoke* invoke, bool is_signum, Primitive::Type type); void SimplifyIsNaN(HInvoke* invoke); void SimplifyFP2Int(HInvoke* invoke); + void SimplifyStringIsEmptyOrLength(HInvoke* invoke); void SimplifyMemBarrier(HInvoke* invoke, MemBarrierKind barrier_kind); OptimizingCompilerStats* stats_; @@ -1673,6 +1674,27 @@ void InstructionSimplifierVisitor::SimplifyFP2Int(HInvoke* invoke) { invoke->ReplaceWithExceptInReplacementAtIndex(select, 0); // false at index 0 } +void InstructionSimplifierVisitor::SimplifyStringIsEmptyOrLength(HInvoke* invoke) { + HInstruction* str = invoke->InputAt(0); + uint32_t dex_pc = invoke->GetDexPc(); + // We treat String as an array to allow DCE and BCE to seamlessly work on strings, + // so create the HArrayLength. + HArrayLength* length = new (GetGraph()->GetArena()) HArrayLength(str, dex_pc); + length->MarkAsStringLength(); + HInstruction* replacement; + if (invoke->GetIntrinsic() == Intrinsics::kStringIsEmpty) { + // For String.isEmpty(), create the `HEqual` representing the `length == 0`. + invoke->GetBlock()->InsertInstructionBefore(length, invoke); + HIntConstant* zero = GetGraph()->GetIntConstant(0); + HEqual* equal = new (GetGraph()->GetArena()) HEqual(length, zero, dex_pc); + replacement = equal; + } else { + DCHECK_EQ(invoke->GetIntrinsic(), Intrinsics::kStringLength); + replacement = length; + } + invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, replacement); +} + void InstructionSimplifierVisitor::SimplifyMemBarrier(HInvoke* invoke, MemBarrierKind barrier_kind) { uint32_t dex_pc = invoke->GetDexPc(); HMemoryBarrier* mem_barrier = new (GetGraph()->GetArena()) HMemoryBarrier(barrier_kind, dex_pc); @@ -1719,6 +1741,10 @@ void InstructionSimplifierVisitor::VisitInvoke(HInvoke* instruction) { case Intrinsics::kDoubleDoubleToLongBits: SimplifyFP2Int(instruction); break; + case Intrinsics::kStringIsEmpty: + case Intrinsics::kStringLength: + SimplifyStringIsEmptyOrLength(instruction); + break; case Intrinsics::kUnsafeLoadFence: SimplifyMemBarrier(instruction, MemBarrierKind::kLoadAny); break; diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc index 5d4c4e295..418d59c6c 100644 --- a/compiler/optimizing/intrinsics.cc +++ b/compiler/optimizing/intrinsics.cc @@ -388,10 +388,8 @@ static Intrinsics GetIntrinsic(InlineMethod method) { case kIntrinsicGetCharsNoCheck: return Intrinsics::kStringGetCharsNoCheck; case kIntrinsicIsEmptyOrLength: - // The inliner can handle these two cases - and this is the preferred approach - // since after inlining the call is no longer visible (as opposed to waiting - // until codegen to handle intrinsic). - return Intrinsics::kNone; + return ((method.d.data & kIntrinsicFlagIsEmpty) == 0) ? + Intrinsics::kStringLength : Intrinsics::kStringIsEmpty; case kIntrinsicIndexOf: return ((method.d.data & kIntrinsicFlagBase0) == 0) ? Intrinsics::kStringIndexOfAfter : Intrinsics::kStringIndexOf; diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h index 863dd1c6f..214250f33 100644 --- a/compiler/optimizing/intrinsics.h +++ b/compiler/optimizing/intrinsics.h @@ -30,6 +30,10 @@ class DexFile; // Temporary measure until we have caught up with the Java 7 definition of Math.round. b/26327751 static constexpr bool kRoundIsPlusPointFive = false; +// Positive floating-point infinities. +static constexpr uint32_t kPositiveInfinityFloat = 0x7f800000U; +static constexpr uint64_t kPositiveInfinityDouble = UINT64_C(0x7ff0000000000000); + // Recognize intrinsics from HInvoke nodes. class IntrinsicsRecognizer : public HOptimization { public: @@ -235,6 +239,8 @@ UNREACHABLE_INTRINSIC(Arch, IntegerCompare) \ UNREACHABLE_INTRINSIC(Arch, LongCompare) \ UNREACHABLE_INTRINSIC(Arch, IntegerSignum) \ UNREACHABLE_INTRINSIC(Arch, LongSignum) \ +UNREACHABLE_INTRINSIC(Arch, StringIsEmpty) \ +UNREACHABLE_INTRINSIC(Arch, StringLength) \ UNREACHABLE_INTRINSIC(Arch, UnsafeLoadFence) \ UNREACHABLE_INTRINSIC(Arch, UnsafeStoreFence) \ UNREACHABLE_INTRINSIC(Arch, UnsafeFullFence) diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc index 86b7bc138..de04175e3 100644 --- a/compiler/optimizing/intrinsics_arm.cc +++ b/compiler/optimizing/intrinsics_arm.cc @@ -1115,16 +1115,16 @@ static void GenerateVisitStringIndexOf(HInvoke* invoke, ArenaAllocator* allocator, bool start_at_zero) { LocationSummary* locations = invoke->GetLocations(); - Register tmp_reg = locations->GetTemp(0).AsRegister(); // Note that the null check must have been done earlier. DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0))); // Check for code points > 0xFFFF. Either a slow-path check when we don't know statically, - // or directly dispatch if we have a constant. + // or directly dispatch for a large constant, or omit slow-path for a small constant or a char. SlowPathCode* slow_path = nullptr; - if (invoke->InputAt(1)->IsIntConstant()) { - if (static_cast(invoke->InputAt(1)->AsIntConstant()->GetValue()) > + HInstruction* code_point = invoke->InputAt(1); + if (code_point->IsIntConstant()) { + if (static_cast(code_point->AsIntConstant()->GetValue()) > std::numeric_limits::max()) { // Always needs the slow-path. We could directly dispatch to it, but this case should be // rare, so for simplicity just put the full slow-path down and branch unconditionally. @@ -1134,16 +1134,18 @@ static void GenerateVisitStringIndexOf(HInvoke* invoke, __ Bind(slow_path->GetExitLabel()); return; } - } else { + } else if (code_point->GetType() != Primitive::kPrimChar) { Register char_reg = locations->InAt(1).AsRegister(); - __ LoadImmediate(tmp_reg, std::numeric_limits::max()); - __ cmp(char_reg, ShifterOperand(tmp_reg)); + // 0xffff is not modified immediate but 0x10000 is, so use `>= 0x10000` instead of `> 0xffff`. + __ cmp(char_reg, + ShifterOperand(static_cast(std::numeric_limits::max()) + 1)); slow_path = new (allocator) IntrinsicSlowPathARM(invoke); codegen->AddSlowPath(slow_path); - __ b(slow_path->GetEntryLabel(), HI); + __ b(slow_path->GetEntryLabel(), HS); } if (start_at_zero) { + Register tmp_reg = locations->GetTemp(0).AsRegister(); DCHECK_EQ(tmp_reg, R2); // Start-index = 0. __ LoadImmediate(tmp_reg, 0); @@ -1170,7 +1172,7 @@ void IntrinsicLocationsBuilderARM::VisitStringIndexOf(HInvoke* invoke) { locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); locations->SetOut(Location::RegisterLocation(R0)); - // Need a temp for slow-path codepoint compare, and need to send start-index=0. + // Need to send start-index=0. locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2))); } @@ -1190,9 +1192,6 @@ void IntrinsicLocationsBuilderARM::VisitStringIndexOfAfter(HInvoke* invoke) { locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2))); locations->SetOut(Location::RegisterLocation(R0)); - - // Need a temp for slow-path codepoint compare. - locations->AddTemp(Location::RequiresRegister()); } void IntrinsicCodeGeneratorARM::VisitStringIndexOfAfter(HInvoke* invoke) { @@ -1985,6 +1984,56 @@ void IntrinsicCodeGeneratorARM::VisitStringGetCharsNoCheck(HInvoke* invoke) { __ Bind(&done); } +void IntrinsicLocationsBuilderARM::VisitFloatIsInfinite(HInvoke* invoke) { + CreateFPToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorARM::VisitFloatIsInfinite(HInvoke* invoke) { + ArmAssembler* const assembler = GetAssembler(); + LocationSummary* const locations = invoke->GetLocations(); + const Register out = locations->Out().AsRegister(); + // Shifting left by 1 bit makes the value encodable as an immediate operand; + // we don't care about the sign bit anyway. + constexpr uint32_t infinity = kPositiveInfinityFloat << 1U; + + __ vmovrs(out, locations->InAt(0).AsFpuRegister()); + // We don't care about the sign bit, so shift left. + __ Lsl(out, out, 1); + __ eor(out, out, ShifterOperand(infinity)); + // If the result is 0, then it has 32 leading zeros, and less than that otherwise. + __ clz(out, out); + // Any number less than 32 logically shifted right by 5 bits results in 0; + // the same operation on 32 yields 1. + __ Lsr(out, out, 5); +} + +void IntrinsicLocationsBuilderARM::VisitDoubleIsInfinite(HInvoke* invoke) { + CreateFPToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorARM::VisitDoubleIsInfinite(HInvoke* invoke) { + ArmAssembler* const assembler = GetAssembler(); + LocationSummary* const locations = invoke->GetLocations(); + const Register out = locations->Out().AsRegister(); + // The highest 32 bits of double precision positive infinity separated into + // two constants encodable as immediate operands. + constexpr uint32_t infinity_high = 0x7f000000U; + constexpr uint32_t infinity_high2 = 0x00f00000U; + + static_assert((infinity_high | infinity_high2) == static_cast(kPositiveInfinityDouble >> 32U), + "The constants do not add up to the high 32 bits of double precision positive infinity."); + __ vmovrrd(IP, out, FromLowSToD(locations->InAt(0).AsFpuRegisterPairLow())); + __ eor(out, out, ShifterOperand(infinity_high)); + __ eor(out, out, ShifterOperand(infinity_high2)); + // We don't care about the sign bit, so shift left. + __ orr(out, IP, ShifterOperand(out, LSL, 1)); + // If the result is 0, then it has 32 leading zeros, and less than that otherwise. + __ clz(out, out); + // Any number less than 32 logically shifted right by 5 bits results in 0; + // the same operation on 32 yields 1. + __ Lsr(out, out, 5); +} + UNIMPLEMENTED_INTRINSIC(ARM, IntegerBitCount) UNIMPLEMENTED_INTRINSIC(ARM, LongBitCount) UNIMPLEMENTED_INTRINSIC(ARM, MathMinDoubleDouble) @@ -2001,8 +2050,6 @@ UNIMPLEMENTED_INTRINSIC(ARM, MathRoundFloat) // Could be done by changing rou UNIMPLEMENTED_INTRINSIC(ARM, UnsafeCASLong) // High register pressure. UNIMPLEMENTED_INTRINSIC(ARM, SystemArrayCopyChar) UNIMPLEMENTED_INTRINSIC(ARM, ReferenceGetReferent) -UNIMPLEMENTED_INTRINSIC(ARM, FloatIsInfinite) -UNIMPLEMENTED_INTRINSIC(ARM, DoubleIsInfinite) UNIMPLEMENTED_INTRINSIC(ARM, IntegerHighestOneBit) UNIMPLEMENTED_INTRINSIC(ARM, LongHighestOneBit) UNIMPLEMENTED_INTRINSIC(ARM, IntegerLowestOneBit) diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 04ae3a673..6cd1726eb 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -47,6 +47,7 @@ using helpers::SRegisterFrom; using helpers::WRegisterFrom; using helpers::XRegisterFrom; using helpers::InputRegisterAt; +using helpers::OutputRegister; namespace { @@ -1173,31 +1174,118 @@ void IntrinsicCodeGeneratorARM64::VisitStringCharAt(HInvoke* invoke) { void IntrinsicLocationsBuilderARM64::VisitStringCompareTo(HInvoke* invoke) { LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kCall, + invoke->InputAt(1)->CanBeNull() + ? LocationSummary::kCallOnSlowPath + : LocationSummary::kNoCall, kIntrinsified); - InvokeRuntimeCallingConvention calling_convention; - locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); - locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); - locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt)); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RequiresRegister()); + locations->AddTemp(Location::RequiresRegister()); + locations->AddTemp(Location::RequiresRegister()); + locations->AddTemp(Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); } void IntrinsicCodeGeneratorARM64::VisitStringCompareTo(HInvoke* invoke) { vixl::MacroAssembler* masm = GetVIXLAssembler(); LocationSummary* locations = invoke->GetLocations(); + Register str = XRegisterFrom(locations->InAt(0)); + Register arg = XRegisterFrom(locations->InAt(1)); + Register out = OutputRegister(invoke); + + Register temp0 = WRegisterFrom(locations->GetTemp(0)); + Register temp1 = WRegisterFrom(locations->GetTemp(1)); + Register temp2 = WRegisterFrom(locations->GetTemp(2)); + + vixl::Label loop; + vixl::Label find_char_diff; + vixl::Label end; + + // Get offsets of count and value fields within a string object. + const int32_t count_offset = mirror::String::CountOffset().Int32Value(); + const int32_t value_offset = mirror::String::ValueOffset().Int32Value(); + // Note that the null check must have been done earlier. DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0))); - Register argument = WRegisterFrom(locations->InAt(1)); - __ Cmp(argument, 0); - SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke); - codegen_->AddSlowPath(slow_path); - __ B(eq, slow_path->GetEntryLabel()); + // Take slow path and throw if input can be and is null. + SlowPathCodeARM64* slow_path = nullptr; + const bool can_slow_path = invoke->InputAt(1)->CanBeNull(); + if (can_slow_path) { + slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke); + codegen_->AddSlowPath(slow_path); + __ Cbz(arg, slow_path->GetEntryLabel()); + } - __ Ldr( - lr, MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pStringCompareTo).Int32Value())); - __ Blr(lr); - __ Bind(slow_path->GetExitLabel()); + // Reference equality check, return 0 if same reference. + __ Subs(out, str, arg); + __ B(&end, eq); + // Load lengths of this and argument strings. + __ Ldr(temp0, MemOperand(str.X(), count_offset)); + __ Ldr(temp1, MemOperand(arg.X(), count_offset)); + // Return zero if both strings are empty. + __ Orr(out, temp0, temp1); + __ Cbz(out, &end); + // out = length diff. + __ Subs(out, temp0, temp1); + // temp2 = min(len(str), len(arg)). + __ Csel(temp2, temp1, temp0, ge); + // Shorter string is empty? + __ Cbz(temp2, &end); + + // Store offset of string value in preparation for comparison loop. + __ Mov(temp1, value_offset); + + UseScratchRegisterScope scratch_scope(masm); + Register temp4 = scratch_scope.AcquireX(); + + // Assertions that must hold in order to compare strings 4 characters at a time. + DCHECK_ALIGNED(value_offset, 8); + static_assert(IsAligned<8>(kObjectAlignment), "String of odd length is not zero padded"); + + const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar); + DCHECK_EQ(char_size, 2u); + + // Promote temp0 to an X reg, ready for LDR. + temp0 = temp0.X(); + + // Loop to compare 4x16-bit characters at a time (ok because of string data alignment). + __ Bind(&loop); + __ Ldr(temp4, MemOperand(str.X(), temp1)); + __ Ldr(temp0, MemOperand(arg.X(), temp1)); + __ Cmp(temp4, temp0); + __ B(ne, &find_char_diff); + __ Add(temp1, temp1, char_size * 4); + __ Subs(temp2, temp2, 4); + __ B(gt, &loop); + __ B(&end); + + // Promote temp1 to an X reg, ready for EOR. + temp1 = temp1.X(); + + // Find the single 16-bit character difference. + __ Bind(&find_char_diff); + // Get the bit position of the first character that differs. + __ Eor(temp1, temp0, temp4); + __ Rbit(temp1, temp1); + __ Clz(temp1, temp1); + __ Bic(temp1, temp1, 0xf); + // If the number of 16-bit chars remaining <= the index where the difference occurs (0-3), then + // the difference occurs outside the remaining string data, so just return length diff (out). + __ Cmp(temp2, Operand(temp1, LSR, 4)); + __ B(le, &end); + // Extract the characters and calculate the difference. + __ Lsr(temp0, temp0, temp1); + __ Lsr(temp4, temp4, temp1); + __ And(temp4, temp4, 0xffff); + __ Sub(out, temp4, Operand(temp0, UXTH)); + + __ Bind(&end); + + if (can_slow_path) { + __ Bind(slow_path->GetExitLabel()); + } } void IntrinsicLocationsBuilderARM64::VisitStringEquals(HInvoke* invoke) { @@ -1302,16 +1390,16 @@ static void GenerateVisitStringIndexOf(HInvoke* invoke, ArenaAllocator* allocator, bool start_at_zero) { LocationSummary* locations = invoke->GetLocations(); - Register tmp_reg = WRegisterFrom(locations->GetTemp(0)); // Note that the null check must have been done earlier. DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0))); // Check for code points > 0xFFFF. Either a slow-path check when we don't know statically, - // or directly dispatch if we have a constant. + // or directly dispatch for a large constant, or omit slow-path for a small constant or a char. SlowPathCodeARM64* slow_path = nullptr; - if (invoke->InputAt(1)->IsIntConstant()) { - if (static_cast(invoke->InputAt(1)->AsIntConstant()->GetValue()) > 0xFFFFU) { + HInstruction* code_point = invoke->InputAt(1); + if (code_point->IsIntConstant()) { + if (static_cast(code_point->AsIntConstant()->GetValue()) > 0xFFFFU) { // Always needs the slow-path. We could directly dispatch to it, but this case should be // rare, so for simplicity just put the full slow-path down and branch unconditionally. slow_path = new (allocator) IntrinsicSlowPathARM64(invoke); @@ -1320,17 +1408,17 @@ static void GenerateVisitStringIndexOf(HInvoke* invoke, __ Bind(slow_path->GetExitLabel()); return; } - } else { + } else if (code_point->GetType() != Primitive::kPrimChar) { Register char_reg = WRegisterFrom(locations->InAt(1)); - __ Mov(tmp_reg, 0xFFFF); - __ Cmp(char_reg, Operand(tmp_reg)); + __ Tst(char_reg, 0xFFFF0000); slow_path = new (allocator) IntrinsicSlowPathARM64(invoke); codegen->AddSlowPath(slow_path); - __ B(hi, slow_path->GetEntryLabel()); + __ B(ne, slow_path->GetEntryLabel()); } if (start_at_zero) { // Start-index = 0. + Register tmp_reg = WRegisterFrom(locations->GetTemp(0)); __ Mov(tmp_reg, 0); } @@ -1354,7 +1442,7 @@ void IntrinsicLocationsBuilderARM64::VisitStringIndexOf(HInvoke* invoke) { locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt)); - // Need a temp for slow-path codepoint compare, and need to send start_index=0. + // Need to send start_index=0. locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(2))); } @@ -1374,9 +1462,6 @@ void IntrinsicLocationsBuilderARM64::VisitStringIndexOfAfter(HInvoke* invoke) { locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2))); locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt)); - - // Need a temp for slow-path codepoint compare. - locations->AddTemp(Location::RequiresRegister()); } void IntrinsicCodeGeneratorARM64::VisitStringIndexOfAfter(HInvoke* invoke) { @@ -2201,9 +2286,46 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) { __ Bind(slow_path->GetExitLabel()); } +static void GenIsInfinite(LocationSummary* locations, + bool is64bit, + vixl::MacroAssembler* masm) { + Operand infinity; + Register out; + + if (is64bit) { + infinity = kPositiveInfinityDouble; + out = XRegisterFrom(locations->Out()); + } else { + infinity = kPositiveInfinityFloat; + out = WRegisterFrom(locations->Out()); + } + + const Register zero = vixl::Assembler::AppropriateZeroRegFor(out); + + MoveFPToInt(locations, is64bit, masm); + __ Eor(out, out, infinity); + // We don't care about the sign bit, so shift left. + __ Cmp(zero, Operand(out, LSL, 1)); + __ Cset(out, eq); +} + +void IntrinsicLocationsBuilderARM64::VisitFloatIsInfinite(HInvoke* invoke) { + CreateFPToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorARM64::VisitFloatIsInfinite(HInvoke* invoke) { + GenIsInfinite(invoke->GetLocations(), /* is64bit */ false, GetVIXLAssembler()); +} + +void IntrinsicLocationsBuilderARM64::VisitDoubleIsInfinite(HInvoke* invoke) { + CreateFPToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorARM64::VisitDoubleIsInfinite(HInvoke* invoke) { + GenIsInfinite(invoke->GetLocations(), /* is64bit */ true, GetVIXLAssembler()); +} + UNIMPLEMENTED_INTRINSIC(ARM64, ReferenceGetReferent) -UNIMPLEMENTED_INTRINSIC(ARM64, FloatIsInfinite) -UNIMPLEMENTED_INTRINSIC(ARM64, DoubleIsInfinite) UNIMPLEMENTED_INTRINSIC(ARM64, IntegerHighestOneBit) UNIMPLEMENTED_INTRINSIC(ARM64, LongHighestOneBit) UNIMPLEMENTED_INTRINSIC(ARM64, IntegerLowestOneBit) diff --git a/compiler/optimizing/intrinsics_list.h b/compiler/optimizing/intrinsics_list.h index dd9294d48..db60238fb 100644 --- a/compiler/optimizing/intrinsics_list.h +++ b/compiler/optimizing/intrinsics_list.h @@ -107,6 +107,8 @@ V(StringGetCharsNoCheck, kDirect, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow) \ V(StringIndexOf, kDirect, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow) \ V(StringIndexOfAfter, kDirect, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow) \ + V(StringIsEmpty, kDirect, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow) \ + V(StringLength, kDirect, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow) \ V(StringNewStringFromBytes, kStatic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \ V(StringNewStringFromChars, kStatic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \ V(StringNewStringFromString, kStatic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \ diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc index 19c6a225a..fa250a306 100644 --- a/compiler/optimizing/intrinsics_mips.cc +++ b/compiler/optimizing/intrinsics_mips.cc @@ -2067,11 +2067,12 @@ static void GenerateStringIndexOf(HInvoke* invoke, // Note that the null check must have been done earlier. DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0))); - // Check for code points > 0xFFFF. Either a slow-path check when we - // don't know statically, or directly dispatch if we have a constant. + // Check for code points > 0xFFFF. Either a slow-path check when we don't know statically, + // or directly dispatch for a large constant, or omit slow-path for a small constant or a char. SlowPathCodeMIPS* slow_path = nullptr; - if (invoke->InputAt(1)->IsIntConstant()) { - if (!IsUint<16>(invoke->InputAt(1)->AsIntConstant()->GetValue())) { + HInstruction* code_point = invoke->InputAt(1); + if (code_point->IsIntConstant()) { + if (!IsUint<16>(code_point->AsIntConstant()->GetValue())) { // Always needs the slow-path. We could directly dispatch to it, // but this case should be rare, so for simplicity just put the // full slow-path down and branch unconditionally. @@ -2081,7 +2082,7 @@ static void GenerateStringIndexOf(HInvoke* invoke, __ Bind(slow_path->GetExitLabel()); return; } - } else { + } else if (code_point->GetType() != Primitive::kPrimChar) { Register char_reg = locations->InAt(1).AsRegister(); // The "bltu" conditional branch tests to see if the character value // fits in a valid 16-bit (MIPS halfword) value. If it doesn't then @@ -2283,10 +2284,10 @@ static void GenIsInfinite(LocationSummary* locations, // If one, or more, of the exponent bits is zero, then the number can't be infinite. if (type == Primitive::kPrimDouble) { __ MoveFromFpuHigh(TMP, in); - __ LoadConst32(AT, 0x7FF00000); + __ LoadConst32(AT, High32Bits(kPositiveInfinityDouble)); } else { __ Mfc1(TMP, in); - __ LoadConst32(AT, 0x7F800000); + __ LoadConst32(AT, kPositiveInfinityFloat); } __ Xor(TMP, TMP, AT); diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc index 1524e1e01..6c4e64e4b 100644 --- a/compiler/optimizing/intrinsics_mips64.cc +++ b/compiler/optimizing/intrinsics_mips64.cc @@ -1563,11 +1563,12 @@ static void GenerateStringIndexOf(HInvoke* invoke, // Note that the null check must have been done earlier. DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0))); - // Check for code points > 0xFFFF. Either a slow-path check when we - // don't know statically, or directly dispatch if we have a constant. + // Check for code points > 0xFFFF. Either a slow-path check when we don't know statically, + // or directly dispatch for a large constant, or omit slow-path for a small constant or a char. SlowPathCodeMIPS64* slow_path = nullptr; - if (invoke->InputAt(1)->IsIntConstant()) { - if (!IsUint<16>(invoke->InputAt(1)->AsIntConstant()->GetValue())) { + HInstruction* code_point = invoke->InputAt(1); + if (code_point->IsIntConstant()) { + if (!IsUint<16>(code_point->AsIntConstant()->GetValue())) { // Always needs the slow-path. We could directly dispatch to it, // but this case should be rare, so for simplicity just put the // full slow-path down and branch unconditionally. @@ -1577,7 +1578,7 @@ static void GenerateStringIndexOf(HInvoke* invoke, __ Bind(slow_path->GetExitLabel()); return; } - } else { + } else if (code_point->GetType() != Primitive::kPrimChar) { GpuRegister char_reg = locations->InAt(1).AsRegister(); __ LoadConst32(tmp_reg, std::numeric_limits::max()); slow_path = new (allocator) IntrinsicSlowPathMIPS64(invoke); diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index 4aab3e276..99bc40e71 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -1418,10 +1418,11 @@ static void GenerateStringIndexOf(HInvoke* invoke, DCHECK_EQ(out, EDI); // Check for code points > 0xFFFF. Either a slow-path check when we don't know statically, - // or directly dispatch if we have a constant. + // or directly dispatch for a large constant, or omit slow-path for a small constant or a char. SlowPathCode* slow_path = nullptr; - if (invoke->InputAt(1)->IsIntConstant()) { - if (static_cast(invoke->InputAt(1)->AsIntConstant()->GetValue()) > + HInstruction* code_point = invoke->InputAt(1); + if (code_point->IsIntConstant()) { + if (static_cast(code_point->AsIntConstant()->GetValue()) > std::numeric_limits::max()) { // Always needs the slow-path. We could directly dispatch to it, but this case should be // rare, so for simplicity just put the full slow-path down and branch unconditionally. @@ -1431,7 +1432,7 @@ static void GenerateStringIndexOf(HInvoke* invoke, __ Bind(slow_path->GetExitLabel()); return; } - } else { + } else if (code_point->GetType() != Primitive::kPrimChar) { __ cmpl(search_value, Immediate(std::numeric_limits::max())); slow_path = new (allocator) IntrinsicSlowPathX86(invoke); codegen->AddSlowPath(slow_path); diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index 1d32dc7bc..06e9cc2b2 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -1517,10 +1517,11 @@ static void GenerateStringIndexOf(HInvoke* invoke, DCHECK_EQ(out.AsRegister(), RDI); // Check for code points > 0xFFFF. Either a slow-path check when we don't know statically, - // or directly dispatch if we have a constant. + // or directly dispatch for a large constant, or omit slow-path for a small constant or a char. SlowPathCode* slow_path = nullptr; - if (invoke->InputAt(1)->IsIntConstant()) { - if (static_cast(invoke->InputAt(1)->AsIntConstant()->GetValue()) > + HInstruction* code_point = invoke->InputAt(1); + if (code_point->IsIntConstant()) { + if (static_cast(code_point->AsIntConstant()->GetValue()) > std::numeric_limits::max()) { // Always needs the slow-path. We could directly dispatch to it, but this case should be // rare, so for simplicity just put the full slow-path down and branch unconditionally. @@ -1530,7 +1531,7 @@ static void GenerateStringIndexOf(HInvoke* invoke, __ Bind(slow_path->GetExitLabel()); return; } - } else { + } else if (code_point->GetType() != Primitive::kPrimChar) { __ cmpl(search_value, Immediate(std::numeric_limits::max())); slow_path = new (allocator) IntrinsicSlowPathX86_64(invoke); codegen->AddSlowPath(slow_path); diff --git a/compiler/optimizing/licm.cc b/compiler/optimizing/licm.cc index 5a0b89c90..7543cd6c5 100644 --- a/compiler/optimizing/licm.cc +++ b/compiler/optimizing/licm.cc @@ -101,16 +101,6 @@ void LICM::Run() { SideEffects loop_effects = side_effects_.GetLoopEffects(block); HBasicBlock* pre_header = loop_info->GetPreHeader(); - bool contains_irreducible_loop = false; - if (graph_->HasIrreducibleLoops()) { - for (HBlocksInLoopIterator it_loop(*loop_info); !it_loop.Done(); it_loop.Advance()) { - if (it_loop.Current()->GetLoopInformation()->IsIrreducible()) { - contains_irreducible_loop = true; - break; - } - } - } - for (HBlocksInLoopIterator it_loop(*loop_info); !it_loop.Done(); it_loop.Advance()) { HBasicBlock* inner = it_loop.Current(); DCHECK(inner->IsInLoop()); @@ -123,11 +113,12 @@ void LICM::Run() { visited->SetBit(inner->GetBlockId()); } - if (contains_irreducible_loop) { + if (loop_info->ContainsIrreducibleLoop()) { // We cannot licm in an irreducible loop, or in a natural loop containing an // irreducible loop. continue; } + DCHECK(!loop_info->IsIrreducible()); // We can move an instruction that can throw only if it is the first // throwing instruction in the loop. Note that the first potentially diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc index 2de41580b..8a75a90cf 100644 --- a/compiler/optimizing/load_store_elimination.cc +++ b/compiler/optimizing/load_store_elimination.cc @@ -733,19 +733,14 @@ class LSEVisitor : public HGraphVisitor { if (Primitive::PrimitiveKind(heap_value->GetType()) != Primitive::PrimitiveKind(instruction->GetType())) { // The only situation where the same heap location has different type is when - // we do an array get from a null constant. In order to stay properly typed - // we do not merge the array gets. + // we do an array get on an instruction that originates from the null constant + // (the null could be behind a field access, an array access, a null check or + // a bound type). + // In order to stay properly typed on primitive types, we do not eliminate + // the array gets. if (kIsDebugBuild) { DCHECK(heap_value->IsArrayGet()) << heap_value->DebugName(); DCHECK(instruction->IsArrayGet()) << instruction->DebugName(); - HInstruction* array = instruction->AsArrayGet()->GetArray(); - DCHECK(array->IsNullCheck()) << array->DebugName(); - HInstruction* input = HuntForOriginalReference(array->InputAt(0)); - DCHECK(input->IsNullConstant()) << input->DebugName(); - array = heap_value->AsArrayGet()->GetArray(); - DCHECK(array->IsNullCheck()) << array->DebugName(); - input = HuntForOriginalReference(array->InputAt(0)); - DCHECK(input->IsNullConstant()) << input->DebugName(); } return; } diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index fe75451ad..60329ccff 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -56,9 +56,11 @@ void HGraph::FindBackEdges(ArenaBitVector* visited) { // Nodes that we're currently visiting, indexed by block id. ArenaBitVector visiting(arena_, blocks_.size(), false, kArenaAllocGraphBuilder); // Number of successors visited from a given node, indexed by block id. - ArenaVector successors_visited(blocks_.size(), 0u, arena_->Adapter()); + ArenaVector successors_visited(blocks_.size(), + 0u, + arena_->Adapter(kArenaAllocGraphBuilder)); // Stack of nodes that we're currently visiting (same as marked in "visiting" above). - ArenaVector worklist(arena_->Adapter()); + ArenaVector worklist(arena_->Adapter(kArenaAllocGraphBuilder)); constexpr size_t kDefaultWorklistSize = 8; worklist.reserve(kDefaultWorklistSize); visited->SetBit(entry_block_->GetBlockId()); @@ -206,17 +208,35 @@ HInstruction* HBasicBlock::GetFirstInstructionDisregardMoves() const { return instruction; } +static bool UpdateDominatorOfSuccessor(HBasicBlock* block, HBasicBlock* successor) { + DCHECK(ContainsElement(block->GetSuccessors(), successor)); + + HBasicBlock* old_dominator = successor->GetDominator(); + HBasicBlock* new_dominator = + (old_dominator == nullptr) ? block + : CommonDominator::ForPair(old_dominator, block); + + if (old_dominator == new_dominator) { + return false; + } else { + successor->SetDominator(new_dominator); + return true; + } +} + void HGraph::ComputeDominanceInformation() { DCHECK(reverse_post_order_.empty()); reverse_post_order_.reserve(blocks_.size()); reverse_post_order_.push_back(entry_block_); // Number of visits of a given node, indexed by block id. - ArenaVector visits(blocks_.size(), 0u, arena_->Adapter()); + ArenaVector visits(blocks_.size(), 0u, arena_->Adapter(kArenaAllocGraphBuilder)); // Number of successors visited from a given node, indexed by block id. - ArenaVector successors_visited(blocks_.size(), 0u, arena_->Adapter()); + ArenaVector successors_visited(blocks_.size(), + 0u, + arena_->Adapter(kArenaAllocGraphBuilder)); // Nodes for which we need to visit successors. - ArenaVector worklist(arena_->Adapter()); + ArenaVector worklist(arena_->Adapter(kArenaAllocGraphBuilder)); constexpr size_t kDefaultWorklistSize = 8; worklist.reserve(kDefaultWorklistSize); worklist.push_back(entry_block_); @@ -228,15 +248,7 @@ void HGraph::ComputeDominanceInformation() { worklist.pop_back(); } else { HBasicBlock* successor = current->GetSuccessors()[successors_visited[current_id]++]; - - if (successor->GetDominator() == nullptr) { - successor->SetDominator(current); - } else { - // The CommonDominator can work for multiple blocks as long as the - // domination information doesn't change. However, since we're changing - // that information here, we can use the finder only for pairs of blocks. - successor->SetDominator(CommonDominator::ForPair(successor->GetDominator(), current)); - } + UpdateDominatorOfSuccessor(current, successor); // Once all the forward edges have been visited, we know the immediate // dominator of the block. We can then start visiting its successors. @@ -248,6 +260,44 @@ void HGraph::ComputeDominanceInformation() { } } + // Check if the graph has back edges not dominated by their respective headers. + // If so, we need to update the dominators of those headers and recursively of + // their successors. We do that with a fix-point iteration over all blocks. + // The algorithm is guaranteed to terminate because it loops only if the sum + // of all dominator chains has decreased in the current iteration. + bool must_run_fix_point = false; + for (HBasicBlock* block : blocks_) { + if (block != nullptr && + block->IsLoopHeader() && + block->GetLoopInformation()->HasBackEdgeNotDominatedByHeader()) { + must_run_fix_point = true; + break; + } + } + if (must_run_fix_point) { + bool update_occurred = true; + while (update_occurred) { + update_occurred = false; + for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) { + HBasicBlock* block = it.Current(); + for (HBasicBlock* successor : block->GetSuccessors()) { + update_occurred |= UpdateDominatorOfSuccessor(block, successor); + } + } + } + } + + // Make sure that there are no remaining blocks whose dominator information + // needs to be updated. + if (kIsDebugBuild) { + for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) { + HBasicBlock* block = it.Current(); + for (HBasicBlock* successor : block->GetSuccessors()) { + DCHECK(!UpdateDominatorOfSuccessor(block, successor)); + } + } + } + // Populate `dominated_blocks_` information after computing all dominators. // The potential presence of irreducible loops requires to do it after. for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) { @@ -396,8 +446,10 @@ void HGraph::SimplifyCFG() { } GraphAnalysisResult HGraph::AnalyzeLoops() const { - // Order does not matter. - for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) { + // We iterate post order to ensure we visit inner loops before outer loops. + // `PopulateRecursive` needs this guarantee to know whether a natural loop + // contains an irreducible loop. + for (HPostOrderIterator it(*this); !it.Done(); it.Advance()) { HBasicBlock* block = it.Current(); if (block->IsLoopHeader()) { if (block->IsCatchBlock()) { @@ -530,6 +582,14 @@ void HLoopInformation::PopulateRecursive(HBasicBlock* block) { blocks_.SetBit(block->GetBlockId()); block->SetInLoop(this); + if (block->IsLoopHeader()) { + // We're visiting loops in post-order, so inner loops must have been + // populated already. + DCHECK(block->GetLoopInformation()->IsPopulated()); + if (block->GetLoopInformation()->IsIrreducible()) { + contains_irreducible_loop_ = true; + } + } for (HBasicBlock* predecessor : block->GetPredecessors()) { PopulateRecursive(predecessor); } @@ -595,20 +655,16 @@ void HLoopInformation::Populate() { blocks_.SetBit(header_->GetBlockId()); header_->SetInLoop(this); - bool is_irreducible_loop = false; - for (HBasicBlock* back_edge : GetBackEdges()) { - DCHECK(back_edge->GetDominator() != nullptr); - if (!header_->Dominates(back_edge)) { - is_irreducible_loop = true; - break; - } - } + bool is_irreducible_loop = HasBackEdgeNotDominatedByHeader(); if (is_irreducible_loop) { ArenaBitVector visited(graph->GetArena(), graph->GetBlocks().size(), /* expandable */ false, kArenaAllocGraphBuilder); + // Stop marking blocks at the loop header. + visited.SetBit(header_->GetBlockId()); + for (HBasicBlock* back_edge : GetBackEdges()) { PopulateIrreducibleRecursive(back_edge, &visited); } @@ -637,6 +693,7 @@ void HLoopInformation::Populate() { } if (is_irreducible_loop) { irreducible_ = true; + contains_irreducible_loop_ = true; graph->SetHasIrreducibleLoops(true); } } @@ -667,6 +724,16 @@ size_t HLoopInformation::GetLifetimeEnd() const { return last_position; } +bool HLoopInformation::HasBackEdgeNotDominatedByHeader() const { + for (HBasicBlock* back_edge : GetBackEdges()) { + DCHECK(back_edge->GetDominator() != nullptr); + if (!header_->Dominates(back_edge)) { + return true; + } + } + return false; +} + bool HBasicBlock::Dominates(HBasicBlock* other) const { // Walk up the dominator tree from `other`, to find out if `this` // is an ancestor. @@ -2218,6 +2285,7 @@ ReferenceTypeInfo ReferenceTypeInfo::Create(TypeHandle type_handle, bool is_exac ScopedObjectAccess soa(Thread::Current()); DCHECK(IsValidHandle(type_handle)); DCHECK(!type_handle->IsErroneous()); + DCHECK(!type_handle->IsArrayClass() || !type_handle->GetComponentType()->IsErroneous()); if (!is_exact) { DCHECK(!type_handle->CannotBeAssignedFromOtherTypes()) << "Callers of ReferenceTypeInfo::Create should ensure is_exact is properly computed"; @@ -2395,6 +2463,7 @@ void HLoadString::SetLoadKindInternal(LoadKind load_kind) { } if (!NeedsEnvironment()) { RemoveEnvironment(); + SetSideEffects(SideEffects::None()); } } diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index afb995d43..c08323a0c 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -650,6 +650,7 @@ class HLoopInformation : public ArenaObject { : header_(header), suspend_check_(nullptr), irreducible_(false), + contains_irreducible_loop_(false), back_edges_(graph->GetArena()->Adapter(kArenaAllocLoopInfoBackEdges)), // Make bit vector growable, as the number of blocks may change. blocks_(graph->GetArena(), graph->GetBlocks().size(), true, kArenaAllocLoopInfoBackEdges) { @@ -657,6 +658,7 @@ class HLoopInformation : public ArenaObject { } bool IsIrreducible() const { return irreducible_; } + bool ContainsIrreducibleLoop() const { return contains_irreducible_loop_; } void Dump(std::ostream& os); @@ -725,6 +727,12 @@ class HLoopInformation : public ArenaObject { blocks_.ClearAllBits(); } + bool HasBackEdgeNotDominatedByHeader() const; + + bool IsPopulated() const { + return blocks_.GetHighestBitSet() != -1; + } + private: // Internal recursive implementation of `Populate`. void PopulateRecursive(HBasicBlock* block); @@ -733,6 +741,7 @@ class HLoopInformation : public ArenaObject { HBasicBlock* header_; HSuspendCheck* suspend_check_; bool irreducible_; + bool contains_irreducible_loop_; ArenaVector back_edges_; ArenaBitVector blocks_; @@ -2281,7 +2290,7 @@ class HExpression : public HTemplateInstruction { // Represents dex's RETURN_VOID opcode. A HReturnVoid is a control flow // instruction that branches to the exit block. -class HReturnVoid : public HTemplateInstruction<0> { +class HReturnVoid FINAL : public HTemplateInstruction<0> { public: explicit HReturnVoid(uint32_t dex_pc = kNoDexPc) : HTemplateInstruction(SideEffects::None(), dex_pc) {} @@ -2296,7 +2305,7 @@ class HReturnVoid : public HTemplateInstruction<0> { // Represents dex's RETURN opcodes. A HReturn is a control flow // instruction that branches to the exit block. -class HReturn : public HTemplateInstruction<1> { +class HReturn FINAL : public HTemplateInstruction<1> { public: explicit HReturn(HInstruction* value, uint32_t dex_pc = kNoDexPc) : HTemplateInstruction(SideEffects::None(), dex_pc) { @@ -2311,7 +2320,7 @@ class HReturn : public HTemplateInstruction<1> { DISALLOW_COPY_AND_ASSIGN(HReturn); }; -class HPhi : public HInstruction { +class HPhi FINAL : public HInstruction { public: HPhi(ArenaAllocator* arena, uint32_t reg_number, @@ -2415,7 +2424,7 @@ class HPhi : public HInstruction { // The exit instruction is the only instruction of the exit block. // Instructions aborting the method (HThrow and HReturn) must branch to the // exit block. -class HExit : public HTemplateInstruction<0> { +class HExit FINAL : public HTemplateInstruction<0> { public: explicit HExit(uint32_t dex_pc = kNoDexPc) : HTemplateInstruction(SideEffects::None(), dex_pc) {} @@ -2428,7 +2437,7 @@ class HExit : public HTemplateInstruction<0> { }; // Jumps from one block to another. -class HGoto : public HTemplateInstruction<0> { +class HGoto FINAL : public HTemplateInstruction<0> { public: explicit HGoto(uint32_t dex_pc = kNoDexPc) : HTemplateInstruction(SideEffects::None(), dex_pc) {} @@ -2468,7 +2477,7 @@ class HConstant : public HExpression<0> { DISALLOW_COPY_AND_ASSIGN(HConstant); }; -class HNullConstant : public HConstant { +class HNullConstant FINAL : public HConstant { public: bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; @@ -2492,7 +2501,7 @@ class HNullConstant : public HConstant { // Constants of the type int. Those can be from Dex instructions, or // synthesized (for example with the if-eqz instruction). -class HIntConstant : public HConstant { +class HIntConstant FINAL : public HConstant { public: int32_t GetValue() const { return value_; } @@ -2533,7 +2542,7 @@ class HIntConstant : public HConstant { DISALLOW_COPY_AND_ASSIGN(HIntConstant); }; -class HLongConstant : public HConstant { +class HLongConstant FINAL : public HConstant { public: int64_t GetValue() const { return value_; } @@ -2563,7 +2572,7 @@ class HLongConstant : public HConstant { DISALLOW_COPY_AND_ASSIGN(HLongConstant); }; -class HFloatConstant : public HConstant { +class HFloatConstant FINAL : public HConstant { public: float GetValue() const { return value_; } @@ -2616,7 +2625,7 @@ class HFloatConstant : public HConstant { DISALLOW_COPY_AND_ASSIGN(HFloatConstant); }; -class HDoubleConstant : public HConstant { +class HDoubleConstant FINAL : public HConstant { public: double GetValue() const { return value_; } @@ -2669,7 +2678,7 @@ class HDoubleConstant : public HConstant { // Conditional branch. A block ending with an HIf instruction must have // two successors. -class HIf : public HTemplateInstruction<1> { +class HIf FINAL : public HTemplateInstruction<1> { public: explicit HIf(HInstruction* input, uint32_t dex_pc = kNoDexPc) : HTemplateInstruction(SideEffects::None(), dex_pc) { @@ -2698,7 +2707,7 @@ class HIf : public HTemplateInstruction<1> { // non-exceptional control flow. // Normal-flow successor is stored at index zero, exception handlers under // higher indices in no particular order. -class HTryBoundary : public HTemplateInstruction<0> { +class HTryBoundary FINAL : public HTemplateInstruction<0> { public: enum class BoundaryKind { kEntry, @@ -2756,7 +2765,7 @@ class HTryBoundary : public HTemplateInstruction<0> { }; // Deoptimize to interpreter, upon checking a condition. -class HDeoptimize : public HTemplateInstruction<1> { +class HDeoptimize FINAL : public HTemplateInstruction<1> { public: // We set CanTriggerGC to prevent any intermediate address to be live // at the point of the `HDeoptimize`. @@ -2781,7 +2790,7 @@ class HDeoptimize : public HTemplateInstruction<1> { // Represents the ArtMethod that was passed as a first argument to // the method. It is used by instructions that depend on it, like // instructions that work with the dex cache. -class HCurrentMethod : public HExpression<0> { +class HCurrentMethod FINAL : public HExpression<0> { public: explicit HCurrentMethod(Primitive::Type type, uint32_t dex_pc = kNoDexPc) : HExpression(type, SideEffects::None(), dex_pc) {} @@ -2794,7 +2803,7 @@ class HCurrentMethod : public HExpression<0> { // Fetches an ArtMethod from the virtual table or the interface method table // of a class. -class HClassTableGet : public HExpression<1> { +class HClassTableGet FINAL : public HExpression<1> { public: enum class TableKind { kVTable, @@ -2841,7 +2850,7 @@ class HClassTableGet : public HExpression<1> { // PackedSwitch (jump table). A block ending with a PackedSwitch instruction will // have one successor for each entry in the switch table, and the final successor // will be the block containing the next Dex opcode. -class HPackedSwitch : public HTemplateInstruction<1> { +class HPackedSwitch FINAL : public HTemplateInstruction<1> { public: HPackedSwitch(int32_t start_value, uint32_t num_entries, @@ -3086,7 +3095,7 @@ class HCondition : public HBinaryOperation { }; // Instruction to check if two inputs are equal to each other. -class HEqual : public HCondition { +class HEqual FINAL : public HCondition { public: HEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc) : HCondition(first, second, dex_pc) {} @@ -3130,7 +3139,7 @@ class HEqual : public HCondition { DISALLOW_COPY_AND_ASSIGN(HEqual); }; -class HNotEqual : public HCondition { +class HNotEqual FINAL : public HCondition { public: HNotEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc) : HCondition(first, second, dex_pc) {} @@ -3173,7 +3182,7 @@ class HNotEqual : public HCondition { DISALLOW_COPY_AND_ASSIGN(HNotEqual); }; -class HLessThan : public HCondition { +class HLessThan FINAL : public HCondition { public: HLessThan(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc) : HCondition(first, second, dex_pc) {} @@ -3210,7 +3219,7 @@ class HLessThan : public HCondition { DISALLOW_COPY_AND_ASSIGN(HLessThan); }; -class HLessThanOrEqual : public HCondition { +class HLessThanOrEqual FINAL : public HCondition { public: HLessThanOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc) : HCondition(first, second, dex_pc) {} @@ -3247,7 +3256,7 @@ class HLessThanOrEqual : public HCondition { DISALLOW_COPY_AND_ASSIGN(HLessThanOrEqual); }; -class HGreaterThan : public HCondition { +class HGreaterThan FINAL : public HCondition { public: HGreaterThan(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc) : HCondition(first, second, dex_pc) {} @@ -3284,7 +3293,7 @@ class HGreaterThan : public HCondition { DISALLOW_COPY_AND_ASSIGN(HGreaterThan); }; -class HGreaterThanOrEqual : public HCondition { +class HGreaterThanOrEqual FINAL : public HCondition { public: HGreaterThanOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc) : HCondition(first, second, dex_pc) {} @@ -3321,7 +3330,7 @@ class HGreaterThanOrEqual : public HCondition { DISALLOW_COPY_AND_ASSIGN(HGreaterThanOrEqual); }; -class HBelow : public HCondition { +class HBelow FINAL : public HCondition { public: HBelow(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc) : HCondition(first, second, dex_pc) {} @@ -3361,7 +3370,7 @@ class HBelow : public HCondition { DISALLOW_COPY_AND_ASSIGN(HBelow); }; -class HBelowOrEqual : public HCondition { +class HBelowOrEqual FINAL : public HCondition { public: HBelowOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc) : HCondition(first, second, dex_pc) {} @@ -3401,7 +3410,7 @@ class HBelowOrEqual : public HCondition { DISALLOW_COPY_AND_ASSIGN(HBelowOrEqual); }; -class HAbove : public HCondition { +class HAbove FINAL : public HCondition { public: HAbove(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc) : HCondition(first, second, dex_pc) {} @@ -3441,7 +3450,7 @@ class HAbove : public HCondition { DISALLOW_COPY_AND_ASSIGN(HAbove); }; -class HAboveOrEqual : public HCondition { +class HAboveOrEqual FINAL : public HCondition { public: HAboveOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc) : HCondition(first, second, dex_pc) {} @@ -3483,7 +3492,7 @@ class HAboveOrEqual : public HCondition { // Instruction to check how two inputs compare to each other. // Result is 0 if input0 == input1, 1 if input0 > input1, or -1 if input0 < input1. -class HCompare : public HBinaryOperation { +class HCompare FINAL : public HBinaryOperation { public: // Note that `comparison_type` is the type of comparison performed // between the comparison's inputs, not the type of the instantiated @@ -3572,7 +3581,7 @@ class HCompare : public HBinaryOperation { DISALLOW_COPY_AND_ASSIGN(HCompare); }; -class HNewInstance : public HExpression<2> { +class HNewInstance FINAL : public HExpression<2> { public: HNewInstance(HInstruction* cls, HCurrentMethod* current_method, @@ -3775,7 +3784,7 @@ class HInvoke : public HInstruction { DISALLOW_COPY_AND_ASSIGN(HInvoke); }; -class HInvokeUnresolved : public HInvoke { +class HInvokeUnresolved FINAL : public HInvoke { public: HInvokeUnresolved(ArenaAllocator* arena, uint32_t number_of_arguments, @@ -3798,7 +3807,7 @@ class HInvokeUnresolved : public HInvoke { DISALLOW_COPY_AND_ASSIGN(HInvokeUnresolved); }; -class HInvokeStaticOrDirect : public HInvoke { +class HInvokeStaticOrDirect FINAL : public HInvoke { public: // Requirements of this method call regarding the class // initialization (clinit) check of its declaring class. @@ -4087,7 +4096,7 @@ class HInvokeStaticOrDirect : public HInvoke { std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::MethodLoadKind rhs); std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::ClinitCheckRequirement rhs); -class HInvokeVirtual : public HInvoke { +class HInvokeVirtual FINAL : public HInvoke { public: HInvokeVirtual(ArenaAllocator* arena, uint32_t number_of_arguments, @@ -4113,7 +4122,7 @@ class HInvokeVirtual : public HInvoke { DISALLOW_COPY_AND_ASSIGN(HInvokeVirtual); }; -class HInvokeInterface : public HInvoke { +class HInvokeInterface FINAL : public HInvoke { public: HInvokeInterface(ArenaAllocator* arena, uint32_t number_of_arguments, @@ -4140,7 +4149,7 @@ class HInvokeInterface : public HInvoke { DISALLOW_COPY_AND_ASSIGN(HInvokeInterface); }; -class HNeg : public HUnaryOperation { +class HNeg FINAL : public HUnaryOperation { public: HNeg(Primitive::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc) : HUnaryOperation(result_type, input, dex_pc) { @@ -4168,7 +4177,7 @@ class HNeg : public HUnaryOperation { DISALLOW_COPY_AND_ASSIGN(HNeg); }; -class HNewArray : public HExpression<2> { +class HNewArray FINAL : public HExpression<2> { public: HNewArray(HInstruction* length, HCurrentMethod* current_method, @@ -4207,7 +4216,7 @@ class HNewArray : public HExpression<2> { DISALLOW_COPY_AND_ASSIGN(HNewArray); }; -class HAdd : public HBinaryOperation { +class HAdd FINAL : public HBinaryOperation { public: HAdd(Primitive::Type result_type, HInstruction* left, @@ -4242,7 +4251,7 @@ class HAdd : public HBinaryOperation { DISALLOW_COPY_AND_ASSIGN(HAdd); }; -class HSub : public HBinaryOperation { +class HSub FINAL : public HBinaryOperation { public: HSub(Primitive::Type result_type, HInstruction* left, @@ -4275,7 +4284,7 @@ class HSub : public HBinaryOperation { DISALLOW_COPY_AND_ASSIGN(HSub); }; -class HMul : public HBinaryOperation { +class HMul FINAL : public HBinaryOperation { public: HMul(Primitive::Type result_type, HInstruction* left, @@ -4310,7 +4319,7 @@ class HMul : public HBinaryOperation { DISALLOW_COPY_AND_ASSIGN(HMul); }; -class HDiv : public HBinaryOperation { +class HDiv FINAL : public HBinaryOperation { public: HDiv(Primitive::Type result_type, HInstruction* left, @@ -4362,7 +4371,7 @@ class HDiv : public HBinaryOperation { DISALLOW_COPY_AND_ASSIGN(HDiv); }; -class HRem : public HBinaryOperation { +class HRem FINAL : public HBinaryOperation { public: HRem(Primitive::Type result_type, HInstruction* left, @@ -4413,7 +4422,7 @@ class HRem : public HBinaryOperation { DISALLOW_COPY_AND_ASSIGN(HRem); }; -class HDivZeroCheck : public HExpression<1> { +class HDivZeroCheck FINAL : public HExpression<1> { public: // `HDivZeroCheck` can trigger GC, as it may call the `ArithmeticException` // constructor. @@ -4439,7 +4448,7 @@ class HDivZeroCheck : public HExpression<1> { DISALLOW_COPY_AND_ASSIGN(HDivZeroCheck); }; -class HShl : public HBinaryOperation { +class HShl FINAL : public HBinaryOperation { public: HShl(Primitive::Type result_type, HInstruction* value, @@ -4485,7 +4494,7 @@ class HShl : public HBinaryOperation { DISALLOW_COPY_AND_ASSIGN(HShl); }; -class HShr : public HBinaryOperation { +class HShr FINAL : public HBinaryOperation { public: HShr(Primitive::Type result_type, HInstruction* value, @@ -4531,7 +4540,7 @@ class HShr : public HBinaryOperation { DISALLOW_COPY_AND_ASSIGN(HShr); }; -class HUShr : public HBinaryOperation { +class HUShr FINAL : public HBinaryOperation { public: HUShr(Primitive::Type result_type, HInstruction* value, @@ -4579,7 +4588,7 @@ class HUShr : public HBinaryOperation { DISALLOW_COPY_AND_ASSIGN(HUShr); }; -class HAnd : public HBinaryOperation { +class HAnd FINAL : public HBinaryOperation { public: HAnd(Primitive::Type result_type, HInstruction* left, @@ -4616,7 +4625,7 @@ class HAnd : public HBinaryOperation { DISALLOW_COPY_AND_ASSIGN(HAnd); }; -class HOr : public HBinaryOperation { +class HOr FINAL : public HBinaryOperation { public: HOr(Primitive::Type result_type, HInstruction* left, @@ -4653,7 +4662,7 @@ class HOr : public HBinaryOperation { DISALLOW_COPY_AND_ASSIGN(HOr); }; -class HXor : public HBinaryOperation { +class HXor FINAL : public HBinaryOperation { public: HXor(Primitive::Type result_type, HInstruction* left, @@ -4690,7 +4699,7 @@ class HXor : public HBinaryOperation { DISALLOW_COPY_AND_ASSIGN(HXor); }; -class HRor : public HBinaryOperation { +class HRor FINAL : public HBinaryOperation { public: HRor(Primitive::Type result_type, HInstruction* value, HInstruction* distance) : HBinaryOperation(result_type, value, distance) { @@ -4743,7 +4752,7 @@ class HRor : public HBinaryOperation { // The value of a parameter in this method. Its location depends on // the calling convention. -class HParameterValue : public HExpression<0> { +class HParameterValue FINAL : public HExpression<0> { public: HParameterValue(const DexFile& dex_file, uint16_t type_index, @@ -4785,7 +4794,7 @@ class HParameterValue : public HExpression<0> { DISALLOW_COPY_AND_ASSIGN(HParameterValue); }; -class HNot : public HUnaryOperation { +class HNot FINAL : public HUnaryOperation { public: HNot(Primitive::Type result_type, HInstruction* input, uint32_t dex_pc = kNoDexPc) : HUnaryOperation(result_type, input, dex_pc) {} @@ -4818,7 +4827,7 @@ class HNot : public HUnaryOperation { DISALLOW_COPY_AND_ASSIGN(HNot); }; -class HBooleanNot : public HUnaryOperation { +class HBooleanNot FINAL : public HUnaryOperation { public: explicit HBooleanNot(HInstruction* input, uint32_t dex_pc = kNoDexPc) : HUnaryOperation(Primitive::Type::kPrimBoolean, input, dex_pc) {} @@ -4855,7 +4864,7 @@ class HBooleanNot : public HUnaryOperation { DISALLOW_COPY_AND_ASSIGN(HBooleanNot); }; -class HTypeConversion : public HExpression<1> { +class HTypeConversion FINAL : public HExpression<1> { public: // Instantiate a type conversion of `input` to `result_type`. HTypeConversion(Primitive::Type result_type, HInstruction* input, uint32_t dex_pc) @@ -4898,7 +4907,7 @@ class HTypeConversion : public HExpression<1> { static constexpr uint32_t kNoRegNumber = -1; -class HNullCheck : public HExpression<1> { +class HNullCheck FINAL : public HExpression<1> { public: // `HNullCheck` can trigger GC, as it may call the `NullPointerException` // constructor. @@ -4960,7 +4969,7 @@ class FieldInfo : public ValueObject { const Handle dex_cache_; }; -class HInstanceFieldGet : public HExpression<1> { +class HInstanceFieldGet FINAL : public HExpression<1> { public: HInstanceFieldGet(HInstruction* value, Primitive::Type field_type, @@ -5012,7 +5021,7 @@ class HInstanceFieldGet : public HExpression<1> { DISALLOW_COPY_AND_ASSIGN(HInstanceFieldGet); }; -class HInstanceFieldSet : public HTemplateInstruction<2> { +class HInstanceFieldSet FINAL : public HTemplateInstruction<2> { public: HInstanceFieldSet(HInstruction* object, HInstruction* value, @@ -5063,7 +5072,7 @@ class HInstanceFieldSet : public HTemplateInstruction<2> { DISALLOW_COPY_AND_ASSIGN(HInstanceFieldSet); }; -class HArrayGet : public HExpression<2> { +class HArrayGet FINAL : public HExpression<2> { public: HArrayGet(HInstruction* array, HInstruction* index, Primitive::Type type, uint32_t dex_pc) : HExpression(type, SideEffects::ArrayReadOfType(type), dex_pc) { @@ -5109,7 +5118,7 @@ class HArrayGet : public HExpression<2> { DISALLOW_COPY_AND_ASSIGN(HArrayGet); }; -class HArraySet : public HTemplateInstruction<3> { +class HArraySet FINAL : public HTemplateInstruction<3> { public: HArraySet(HInstruction* array, HInstruction* index, @@ -5209,7 +5218,7 @@ class HArraySet : public HTemplateInstruction<3> { DISALLOW_COPY_AND_ASSIGN(HArraySet); }; -class HArrayLength : public HExpression<1> { +class HArrayLength FINAL : public HExpression<1> { public: HArrayLength(HInstruction* array, uint32_t dex_pc) : HExpression(Primitive::kPrimInt, SideEffects::None(), dex_pc) { @@ -5226,13 +5235,26 @@ class HArrayLength : public HExpression<1> { return obj == InputAt(0); } + void MarkAsStringLength() { SetPackedFlag(); } + bool IsStringLength() const { return GetPackedFlag(); } + DECLARE_INSTRUCTION(ArrayLength); private: + // We treat a String as an array, creating the HArrayLength from String.length() + // or String.isEmpty() intrinsic in the instruction simplifier. We can always + // determine whether a particular HArrayLength is actually a String.length() by + // looking at the type of the input but that requires holding the mutator lock, so + // we prefer to use a flag, so that code generators don't need to do the locking. + static constexpr size_t kFlagIsStringLength = kNumberOfExpressionPackedBits; + static constexpr size_t kNumberOfArrayLengthPackedBits = kFlagIsStringLength + 1; + static_assert(kNumberOfArrayLengthPackedBits <= HInstruction::kMaxNumberOfPackedBits, + "Too many packed fields."); + DISALLOW_COPY_AND_ASSIGN(HArrayLength); }; -class HBoundsCheck : public HExpression<2> { +class HBoundsCheck FINAL : public HExpression<2> { public: // `HBoundsCheck` can trigger GC, as it may call the `IndexOutOfBoundsException` // constructor. @@ -5260,7 +5282,7 @@ class HBoundsCheck : public HExpression<2> { DISALLOW_COPY_AND_ASSIGN(HBoundsCheck); }; -class HSuspendCheck : public HTemplateInstruction<0> { +class HSuspendCheck FINAL : public HTemplateInstruction<0> { public: explicit HSuspendCheck(uint32_t dex_pc = kNoDexPc) : HTemplateInstruction(SideEffects::CanTriggerGC(), dex_pc), slow_path_(nullptr) {} @@ -5302,7 +5324,7 @@ class HNativeDebugInfo : public HTemplateInstruction<0> { /** * Instruction to load a Class object. */ -class HLoadClass : public HExpression<1> { +class HLoadClass FINAL : public HExpression<1> { public: HLoadClass(HCurrentMethod* current_method, uint16_t type_index, @@ -5406,7 +5428,7 @@ class HLoadClass : public HExpression<1> { DISALLOW_COPY_AND_ASSIGN(HLoadClass); }; -class HLoadString : public HExpression<1> { +class HLoadString FINAL : public HExpression<1> { public: // Determines how to load the String. enum class LoadKind { @@ -5530,6 +5552,7 @@ class HLoadString : public HExpression<1> { SetPackedFlag(true); DCHECK(!NeedsEnvironment()); RemoveEnvironment(); + SetSideEffects(SideEffects::None()); } size_t InputCount() const OVERRIDE { @@ -5607,7 +5630,7 @@ inline void HLoadString::AddSpecialInput(HInstruction* special_input) { /** * Performs an initialization check on its Class object input. */ -class HClinitCheck : public HExpression<1> { +class HClinitCheck FINAL : public HExpression<1> { public: HClinitCheck(HLoadClass* constant, uint32_t dex_pc) : HExpression( @@ -5637,7 +5660,7 @@ class HClinitCheck : public HExpression<1> { DISALLOW_COPY_AND_ASSIGN(HClinitCheck); }; -class HStaticFieldGet : public HExpression<1> { +class HStaticFieldGet FINAL : public HExpression<1> { public: HStaticFieldGet(HInstruction* cls, Primitive::Type field_type, @@ -5686,7 +5709,7 @@ class HStaticFieldGet : public HExpression<1> { DISALLOW_COPY_AND_ASSIGN(HStaticFieldGet); }; -class HStaticFieldSet : public HTemplateInstruction<2> { +class HStaticFieldSet FINAL : public HTemplateInstruction<2> { public: HStaticFieldSet(HInstruction* cls, HInstruction* value, @@ -5734,7 +5757,7 @@ class HStaticFieldSet : public HTemplateInstruction<2> { DISALLOW_COPY_AND_ASSIGN(HStaticFieldSet); }; -class HUnresolvedInstanceFieldGet : public HExpression<1> { +class HUnresolvedInstanceFieldGet FINAL : public HExpression<1> { public: HUnresolvedInstanceFieldGet(HInstruction* obj, Primitive::Type field_type, @@ -5759,7 +5782,7 @@ class HUnresolvedInstanceFieldGet : public HExpression<1> { DISALLOW_COPY_AND_ASSIGN(HUnresolvedInstanceFieldGet); }; -class HUnresolvedInstanceFieldSet : public HTemplateInstruction<2> { +class HUnresolvedInstanceFieldSet FINAL : public HTemplateInstruction<2> { public: HUnresolvedInstanceFieldSet(HInstruction* obj, HInstruction* value, @@ -5797,7 +5820,7 @@ class HUnresolvedInstanceFieldSet : public HTemplateInstruction<2> { DISALLOW_COPY_AND_ASSIGN(HUnresolvedInstanceFieldSet); }; -class HUnresolvedStaticFieldGet : public HExpression<0> { +class HUnresolvedStaticFieldGet FINAL : public HExpression<0> { public: HUnresolvedStaticFieldGet(Primitive::Type field_type, uint32_t field_index, @@ -5820,7 +5843,7 @@ class HUnresolvedStaticFieldGet : public HExpression<0> { DISALLOW_COPY_AND_ASSIGN(HUnresolvedStaticFieldGet); }; -class HUnresolvedStaticFieldSet : public HTemplateInstruction<1> { +class HUnresolvedStaticFieldSet FINAL : public HTemplateInstruction<1> { public: HUnresolvedStaticFieldSet(HInstruction* value, Primitive::Type field_type, @@ -5857,7 +5880,7 @@ class HUnresolvedStaticFieldSet : public HTemplateInstruction<1> { }; // Implement the move-exception DEX instruction. -class HLoadException : public HExpression<0> { +class HLoadException FINAL : public HExpression<0> { public: explicit HLoadException(uint32_t dex_pc = kNoDexPc) : HExpression(Primitive::kPrimNot, SideEffects::None(), dex_pc) {} @@ -5872,7 +5895,7 @@ class HLoadException : public HExpression<0> { // Implicit part of move-exception which clears thread-local exception storage. // Must not be removed because the runtime expects the TLS to get cleared. -class HClearException : public HTemplateInstruction<0> { +class HClearException FINAL : public HTemplateInstruction<0> { public: explicit HClearException(uint32_t dex_pc = kNoDexPc) : HTemplateInstruction(SideEffects::AllWrites(), dex_pc) {} @@ -5883,7 +5906,7 @@ class HClearException : public HTemplateInstruction<0> { DISALLOW_COPY_AND_ASSIGN(HClearException); }; -class HThrow : public HTemplateInstruction<1> { +class HThrow FINAL : public HTemplateInstruction<1> { public: HThrow(HInstruction* exception, uint32_t dex_pc) : HTemplateInstruction(SideEffects::CanTriggerGC(), dex_pc) { @@ -5920,7 +5943,7 @@ enum class TypeCheckKind { std::ostream& operator<<(std::ostream& os, TypeCheckKind rhs); -class HInstanceOf : public HExpression<2> { +class HInstanceOf FINAL : public HExpression<2> { public: HInstanceOf(HInstruction* object, HLoadClass* constant, @@ -5974,7 +5997,7 @@ class HInstanceOf : public HExpression<2> { DISALLOW_COPY_AND_ASSIGN(HInstanceOf); }; -class HBoundType : public HExpression<1> { +class HBoundType FINAL : public HExpression<1> { public: HBoundType(HInstruction* input, uint32_t dex_pc = kNoDexPc) : HExpression(Primitive::kPrimNot, SideEffects::None(), dex_pc), @@ -6018,7 +6041,7 @@ class HBoundType : public HExpression<1> { DISALLOW_COPY_AND_ASSIGN(HBoundType); }; -class HCheckCast : public HTemplateInstruction<2> { +class HCheckCast FINAL : public HTemplateInstruction<2> { public: HCheckCast(HInstruction* object, HLoadClass* constant, @@ -6063,7 +6086,7 @@ class HCheckCast : public HTemplateInstruction<2> { DISALLOW_COPY_AND_ASSIGN(HCheckCast); }; -class HMemoryBarrier : public HTemplateInstruction<0> { +class HMemoryBarrier FINAL : public HTemplateInstruction<0> { public: explicit HMemoryBarrier(MemBarrierKind barrier_kind, uint32_t dex_pc = kNoDexPc) : HTemplateInstruction( @@ -6088,7 +6111,7 @@ class HMemoryBarrier : public HTemplateInstruction<0> { DISALLOW_COPY_AND_ASSIGN(HMemoryBarrier); }; -class HMonitorOperation : public HTemplateInstruction<1> { +class HMonitorOperation FINAL : public HTemplateInstruction<1> { public: enum class OperationKind { kEnter, @@ -6133,7 +6156,7 @@ class HMonitorOperation : public HTemplateInstruction<1> { DISALLOW_COPY_AND_ASSIGN(HMonitorOperation); }; -class HSelect : public HExpression<3> { +class HSelect FINAL : public HExpression<3> { public: HSelect(HInstruction* condition, HInstruction* true_value, @@ -6246,7 +6269,7 @@ std::ostream& operator<<(std::ostream& os, const MoveOperands& rhs); static constexpr size_t kDefaultNumberOfMoves = 4; -class HParallelMove : public HTemplateInstruction<0> { +class HParallelMove FINAL : public HTemplateInstruction<0> { public: explicit HParallelMove(ArenaAllocator* arena, uint32_t dex_pc = kNoDexPc) : HTemplateInstruction(SideEffects::None(), dex_pc), diff --git a/compiler/optimizing/nodes_arm.h b/compiler/optimizing/nodes_arm.h index 6a1dbb9e7..371e8ef6b 100644 --- a/compiler/optimizing/nodes_arm.h +++ b/compiler/optimizing/nodes_arm.h @@ -19,7 +19,7 @@ namespace art { -class HArmDexCacheArraysBase : public HExpression<0> { +class HArmDexCacheArraysBase FINAL : public HExpression<0> { public: explicit HArmDexCacheArraysBase(const DexFile& dex_file) : HExpression(Primitive::kPrimInt, SideEffects::None(), kNoDexPc), diff --git a/compiler/optimizing/nodes_arm64.h b/compiler/optimizing/nodes_arm64.h index 173852a55..737aece9c 100644 --- a/compiler/optimizing/nodes_arm64.h +++ b/compiler/optimizing/nodes_arm64.h @@ -21,7 +21,7 @@ namespace art { -class HArm64DataProcWithShifterOp : public HExpression<2> { +class HArm64DataProcWithShifterOp FINAL : public HExpression<2> { public: enum OpKind { kLSL, // Logical shift left. @@ -97,7 +97,7 @@ std::ostream& operator<<(std::ostream& os, const HArm64DataProcWithShifterOp::Op // This instruction computes an intermediate address pointing in the 'middle' of an object. The // result pointer cannot be handled by GC, so extra care is taken to make sure that this value is // never used across anything that can trigger GC. -class HArm64IntermediateAddress : public HExpression<2> { +class HArm64IntermediateAddress FINAL : public HExpression<2> { public: HArm64IntermediateAddress(HInstruction* base_address, HInstruction* offset, uint32_t dex_pc) : HExpression(Primitive::kPrimNot, SideEffects::DependsOnGC(), dex_pc) { diff --git a/compiler/optimizing/nodes_shared.h b/compiler/optimizing/nodes_shared.h index c10c718ff..bdcf54a6f 100644 --- a/compiler/optimizing/nodes_shared.h +++ b/compiler/optimizing/nodes_shared.h @@ -19,7 +19,7 @@ namespace art { -class HMultiplyAccumulate : public HExpression<3> { +class HMultiplyAccumulate FINAL : public HExpression<3> { public: HMultiplyAccumulate(Primitive::Type type, InstructionKind op, @@ -53,7 +53,7 @@ class HMultiplyAccumulate : public HExpression<3> { DISALLOW_COPY_AND_ASSIGN(HMultiplyAccumulate); }; -class HBitwiseNegatedRight : public HBinaryOperation { +class HBitwiseNegatedRight FINAL : public HBinaryOperation { public: HBitwiseNegatedRight(Primitive::Type result_type, InstructionKind op, diff --git a/compiler/optimizing/nodes_x86.h b/compiler/optimizing/nodes_x86.h index 0b3a84d3d..c3696b593 100644 --- a/compiler/optimizing/nodes_x86.h +++ b/compiler/optimizing/nodes_x86.h @@ -20,7 +20,7 @@ namespace art { // Compute the address of the method for X86 Constant area support. -class HX86ComputeBaseMethodAddress : public HExpression<0> { +class HX86ComputeBaseMethodAddress FINAL : public HExpression<0> { public: // Treat the value as an int32_t, but it is really a 32 bit native pointer. HX86ComputeBaseMethodAddress() @@ -33,7 +33,7 @@ class HX86ComputeBaseMethodAddress : public HExpression<0> { }; // Load a constant value from the constant table. -class HX86LoadFromConstantTable : public HExpression<2> { +class HX86LoadFromConstantTable FINAL : public HExpression<2> { public: HX86LoadFromConstantTable(HX86ComputeBaseMethodAddress* method_base, HConstant* constant) @@ -57,7 +57,7 @@ class HX86LoadFromConstantTable : public HExpression<2> { }; // Version of HNeg with access to the constant table for FP types. -class HX86FPNeg : public HExpression<2> { +class HX86FPNeg FINAL : public HExpression<2> { public: HX86FPNeg(Primitive::Type result_type, HInstruction* input, @@ -76,7 +76,7 @@ class HX86FPNeg : public HExpression<2> { }; // X86 version of HPackedSwitch that holds a pointer to the base method address. -class HX86PackedSwitch : public HTemplateInstruction<2> { +class HX86PackedSwitch FINAL : public HTemplateInstruction<2> { public: HX86PackedSwitch(int32_t start_value, int32_t num_entries, diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index 04c9ff9d6..f2394f605 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -46,6 +46,13 @@ static inline ReferenceTypeInfo::TypeHandle GetRootHandle(StackHandleScopeCollec return *cache; } +// Returns true if klass is admissible to the propagation: non-null and non-erroneous. +// For an array type, we also check if the component type is admissible. +static bool IsAdmissible(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) { + return klass != nullptr && !klass->IsErroneous() && + (!klass->IsArrayClass() || IsAdmissible(klass->GetComponentType())); +} + ReferenceTypeInfo::TypeHandle ReferenceTypePropagation::HandleCache::GetObjectClassHandle() { return GetRootHandle(handles_, ClassLinker::kJavaLangObject, &object_class_handle_); } @@ -453,15 +460,10 @@ void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* inst } instr->SetReferenceTypeInfo( ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact */ true)); - } else if (klass != nullptr) { - if (klass->IsErroneous()) { - // Set inexact object type for erroneous types. - instr->SetReferenceTypeInfo(instr->GetBlock()->GetGraph()->GetInexactObjectRti()); - } else { - ReferenceTypeInfo::TypeHandle handle = handle_cache_->NewHandle(klass); - is_exact = is_exact || handle->CannotBeAssignedFromOtherTypes(); - instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(handle, is_exact)); - } + } else if (IsAdmissible(klass)) { + ReferenceTypeInfo::TypeHandle handle = handle_cache_->NewHandle(klass); + is_exact = is_exact || handle->CannotBeAssignedFromOtherTypes(); + instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(handle, is_exact)); } else { instr->SetReferenceTypeInfo(instr->GetBlock()->GetGraph()->GetInexactObjectRti()); } @@ -563,7 +565,7 @@ void ReferenceTypePropagation::RTPVisitor::VisitLoadClass(HLoadClass* instr) { instr->GetDexFile(), instr->GetTypeIndex(), hint_dex_cache_); - if (resolved_class != nullptr && !resolved_class->IsErroneous()) { + if (IsAdmissible(resolved_class)) { instr->SetLoadedClassRTI(ReferenceTypeInfo::Create( handle_cache_->NewHandle(resolved_class), /* is_exact */ true)); } @@ -664,12 +666,6 @@ void ReferenceTypePropagation::VisitPhi(HPhi* phi) { } if (phi->GetBlock()->IsLoopHeader()) { - if (!is_first_run_ && graph_->IsCompilingOsr()) { - // Don't update the type of a loop phi when compiling OSR: we may have done - // speculative optimizations dominating that phi, that do not hold at the - // point the interpreter jumps to that loop header. - return; - } // Set the initial type for the phi. Use the non back edge input for reaching // a fixed point faster. HInstruction* first_input = phi->InputAt(0); @@ -742,7 +738,7 @@ void ReferenceTypePropagation::UpdateArrayGet(HArrayGet* instr, HandleCache* han } Handle handle = parent_rti.GetTypeHandle(); - if (handle->IsObjectArrayClass() && !handle->GetComponentType()->IsErroneous()) { + if (handle->IsObjectArrayClass() && IsAdmissible(handle->GetComponentType())) { ReferenceTypeInfo::TypeHandle component_handle = handle_cache->NewHandle(handle->GetComponentType()); bool is_exact = component_handle->CannotBeAssignedFromOtherTypes(); diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc index b1f9cbcdf..4405b803e 100644 --- a/compiler/optimizing/register_allocator.cc +++ b/compiler/optimizing/register_allocator.cc @@ -1773,7 +1773,9 @@ void RegisterAllocator::ConnectSplitSiblings(LiveInterval* interval, // therefore will not have a location for that instruction for `to`. // Because the instruction is a constant or the ArtMethod, we don't need to // do anything: it will be materialized in the irreducible loop. - DCHECK(IsMaterializableEntryBlockInstructionOfGraphWithIrreducibleLoop(defined_by)); + DCHECK(IsMaterializableEntryBlockInstructionOfGraphWithIrreducibleLoop(defined_by)) + << defined_by->DebugName() << ":" << defined_by->GetId() + << " " << from->GetBlockId() << " -> " << to->GetBlockId(); return; } diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc index 7a1bb316e..08bd35f14 100644 --- a/compiler/optimizing/sharpening.cc +++ b/compiler/optimizing/sharpening.cc @@ -99,7 +99,7 @@ void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { if (direct_method != 0u) { // Should we use a direct pointer to the method? // Note: For JIT, kDirectAddressWithFixup doesn't make sense at all and while // kDirectAddress would be fine for image methods, we don't support it at the moment. - DCHECK(!Runtime::Current()->UseJit()); + DCHECK(!Runtime::Current()->UseJitCompilation()); if (direct_method != static_cast(-1)) { // Is the method pointer known now? method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress; method_load_data = direct_method; @@ -109,7 +109,7 @@ void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { } else { // Use dex cache. DCHECK_EQ(target_method.dex_file, &graph_->GetDexFile()); if (use_pc_relative_instructions) { // Can we use PC-relative access to the dex cache arrays? - DCHECK(!Runtime::Current()->UseJit()); + DCHECK(!Runtime::Current()->UseJitCompilation()); method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative; DexCacheArraysLayout layout(GetInstructionSetPointerSize(codegen_->GetInstructionSet()), &graph_->GetDexFile()); @@ -121,7 +121,7 @@ void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { if (direct_code != 0u) { // Should we use a direct pointer to the code? // Note: For JIT, kCallPCRelative and kCallDirectWithFixup don't make sense at all and // while kCallDirect would be fine for image methods, we don't support it at the moment. - DCHECK(!Runtime::Current()->UseJit()); + DCHECK(!Runtime::Current()->UseJitCompilation()); if (direct_code != static_cast(-1)) { // Is the code pointer known now? code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallDirect; direct_code_ptr = direct_code; @@ -174,7 +174,7 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) { if (compiler_driver_->IsBootImage()) { // Compiling boot image. Resolve the string and allocate it if needed. - DCHECK(!runtime->UseJit()); + DCHECK(!runtime->UseJitCompilation()); mirror::String* string = class_linker->ResolveString(dex_file, string_index, dex_cache); CHECK(string != nullptr); if (!compiler_driver_->GetSupportBootImageFixup()) { @@ -187,7 +187,7 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) { ? HLoadString::LoadKind::kBootImageLinkTimePcRelative : HLoadString::LoadKind::kBootImageLinkTimeAddress; } - } else if (runtime->UseJit()) { + } else if (runtime->UseJitCompilation()) { // TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus. // DCHECK(!codegen_->GetCompilerOptions().GetCompilePic()); mirror::String* string = dex_cache->GetResolvedString(string_index); diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc index c2aa0c007..f96ca321c 100644 --- a/compiler/optimizing/ssa_builder.cc +++ b/compiler/optimizing/ssa_builder.cc @@ -233,7 +233,7 @@ bool SsaBuilder::UpdatePrimitiveType(HPhi* phi, ArenaVector* worklist) { } void SsaBuilder::RunPrimitiveTypePropagation() { - ArenaVector worklist(graph_->GetArena()->Adapter()); + ArenaVector worklist(graph_->GetArena()->Adapter(kArenaAllocGraphBuilder)); for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) { HBasicBlock* block = it.Current(); @@ -319,7 +319,7 @@ bool SsaBuilder::FixAmbiguousArrayOps() { // uses (because they are untyped) and environment uses (if --debuggable). // After resolving all ambiguous ArrayGets, we will re-run primitive type // propagation on the Phis which need to be updated. - ArenaVector worklist(graph_->GetArena()->Adapter()); + ArenaVector worklist(graph_->GetArena()->Adapter(kArenaAllocGraphBuilder)); { ScopedObjectAccess soa(Thread::Current()); diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc index 5534aeac2..36e0d993d 100644 --- a/compiler/optimizing/ssa_liveness_analysis.cc +++ b/compiler/optimizing/ssa_liveness_analysis.cc @@ -309,17 +309,8 @@ void SsaLivenessAnalysis::ComputeLiveRanges() { } if (block->IsLoopHeader()) { - if (kIsDebugBuild && block->GetLoopInformation()->IsIrreducible()) { - // To satisfy our liveness algorithm, we need to ensure loop headers of - // irreducible loops do not have any live-in instructions, except constants - // and the current method, which can be trivially re-materialized. - for (uint32_t idx : live_in->Indexes()) { - HInstruction* instruction = GetInstructionFromSsaIndex(idx); - DCHECK(instruction->GetBlock()->IsEntryBlock()) << instruction->DebugName(); - DCHECK(!instruction->IsParameterValue()); - DCHECK(instruction->IsCurrentMethod() || instruction->IsConstant()) - << instruction->DebugName(); - } + if (kIsDebugBuild) { + CheckNoLiveInIrreducibleLoop(*block); } size_t last_position = block->GetLoopInformation()->GetLifetimeEnd(); // For all live_in instructions at the loop header, we need to create a range @@ -344,6 +335,9 @@ void SsaLivenessAnalysis::ComputeLiveInAndLiveOutSets() { // change in this loop), and the live_out set. If the live_out // set does not change, there is no need to update the live_in set. if (UpdateLiveOut(block) && UpdateLiveIn(block)) { + if (kIsDebugBuild) { + CheckNoLiveInIrreducibleLoop(block); + } changed = true; } } diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h index 40dab74a2..1fcba8bc7 100644 --- a/compiler/optimizing/ssa_liveness_analysis.h +++ b/compiler/optimizing/ssa_liveness_analysis.h @@ -1003,6 +1003,15 @@ class LiveInterval : public ArenaObject { void AddBackEdgeUses(const HBasicBlock& block_at_use) { DCHECK(block_at_use.IsInLoop()); + if (block_at_use.GetGraph()->HasIrreducibleLoops()) { + // Linear order may not be well formed when irreducible loops are present, + // i.e. loop blocks may not be adjacent and a back edge may not be last, + // which violates assumptions made in this method. + return; + } + + DCHECK(IsLinearOrderWellFormed(*block_at_use.GetGraph())); + // Add synthesized uses at the back edge of loops to help the register allocator. // Note that this method is called in decreasing liveness order, to faciliate adding // uses at the head of the `first_use_` linked list. Because below @@ -1027,30 +1036,12 @@ class LiveInterval : public ArenaObject { if ((first_use_ != nullptr) && (first_use_->GetPosition() <= back_edge_use_position)) { // There was a use already seen in this loop. Therefore the previous call to `AddUse` // already inserted the backedge use. We can stop going outward. - if (kIsDebugBuild) { - if (!HasSynthesizeUseAt(back_edge_use_position)) { - // There exists a use prior to `back_edge_use_position` but there is - // no synthesized use at the back edge. This can happen in the presence - // of irreducible loops, when blocks of the loop are not adjacent in - // linear order, i.e. when there is an out-of-loop block between - // `block_at_use` and `back_edge_position` that uses this interval. - DCHECK(block_at_use.GetGraph()->HasIrreducibleLoops()); - DCHECK(!IsLinearOrderWellFormed(*block_at_use.GetGraph())); - } - } + DCHECK(HasSynthesizeUseAt(back_edge_use_position)); break; } - if (last_in_new_list != nullptr && - back_edge_use_position <= last_in_new_list->GetPosition()) { - // Loops are not properly nested in the linear order, i.e. the back edge - // of an outer loop preceeds blocks of an inner loop. This can happen - // in the presence of irreducible loops. - DCHECK(block_at_use.GetGraph()->HasIrreducibleLoops()); - DCHECK(!IsLinearOrderWellFormed(*block_at_use.GetGraph())); - // We must bail out, otherwise we would generate an unsorted use list. - break; - } + DCHECK(last_in_new_list == nullptr || + back_edge_use_position > last_in_new_list->GetPosition()); UsePosition* new_use = new (allocator_) UsePosition( /* user */ nullptr, @@ -1269,6 +1260,23 @@ class SsaLivenessAnalysis : public ValueObject { return instruction->GetType() == Primitive::kPrimNot; } + void CheckNoLiveInIrreducibleLoop(const HBasicBlock& block) const { + if (!block.IsLoopHeader() || !block.GetLoopInformation()->IsIrreducible()) { + return; + } + BitVector* live_in = GetLiveInSet(block); + // To satisfy our liveness algorithm, we need to ensure loop headers of + // irreducible loops do not have any live-in instructions, except constants + // and the current method, which can be trivially re-materialized. + for (uint32_t idx : live_in->Indexes()) { + HInstruction* instruction = GetInstructionFromSsaIndex(idx); + DCHECK(instruction->GetBlock()->IsEntryBlock()) << instruction->DebugName(); + DCHECK(!instruction->IsParameterValue()); + DCHECK(instruction->IsCurrentMethod() || instruction->IsConstant()) + << instruction->DebugName(); + } + } + HGraph* const graph_; CodeGenerator* const codegen_; ArenaVector block_infos_; diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc index aeb31094d..c67612e65 100644 --- a/compiler/optimizing/ssa_phi_elimination.cc +++ b/compiler/optimizing/ssa_phi_elimination.cc @@ -17,6 +17,7 @@ #include "ssa_phi_elimination.h" #include "base/arena_containers.h" +#include "base/arena_bit_vector.h" #include "base/bit_vector-inl.h" namespace art { @@ -30,7 +31,7 @@ void SsaDeadPhiElimination::MarkDeadPhis() { // Phis are constructed live and should not be revived if previously marked // dead. This algorithm temporarily breaks that invariant but we DCHECK that // only phis which were initially live are revived. - ArenaSet initially_live(graph_->GetArena()->Adapter()); + ArenaSet initially_live(graph_->GetArena()->Adapter(kArenaAllocSsaPhiElimination)); // Add to the worklist phis referenced by non-phi instructions. for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) { @@ -127,8 +128,11 @@ void SsaRedundantPhiElimination::Run() { } } - ArenaSet visited_phis_in_cycle(graph_->GetArena()->Adapter()); - ArenaVector cycle_worklist(graph_->GetArena()->Adapter()); + ArenaBitVector visited_phis_in_cycle(graph_->GetArena(), + graph_->GetCurrentInstructionId(), + /* expandable */ false, + kArenaAllocSsaPhiElimination); + ArenaVector cycle_worklist(graph_->GetArena()->Adapter(kArenaAllocSsaPhiElimination)); while (!worklist_.empty()) { HPhi* phi = worklist_.back(); @@ -139,17 +143,18 @@ void SsaRedundantPhiElimination::Run() { continue; } - if (phi->InputCount() == 0) { - DCHECK(phi->IsDead()); + // If the phi is dead, we know we won't revive it and it will be removed, + // so don't process it. + if (phi->IsDead()) { continue; } HInstruction* candidate = nullptr; - visited_phis_in_cycle.clear(); + visited_phis_in_cycle.ClearAllBits(); cycle_worklist.clear(); cycle_worklist.push_back(phi); - visited_phis_in_cycle.insert(phi->GetId()); + visited_phis_in_cycle.SetBit(phi->GetId()); bool catch_phi_in_cycle = phi->IsCatchPhi(); bool irreducible_loop_phi_in_cycle = phi->IsIrreducibleLoopHeaderPhi(); @@ -181,9 +186,9 @@ void SsaRedundantPhiElimination::Run() { if (input == current) { continue; } else if (input->IsPhi()) { - if (!ContainsElement(visited_phis_in_cycle, input->GetId())) { + if (!visited_phis_in_cycle.IsBitSet(input->GetId())) { cycle_worklist.push_back(input->AsPhi()); - visited_phis_in_cycle.insert(input->GetId()); + visited_phis_in_cycle.SetBit(input->GetId()); catch_phi_in_cycle |= input->AsPhi()->IsCatchPhi(); irreducible_loop_phi_in_cycle |= input->IsIrreducibleLoopHeaderPhi(); } else { @@ -232,7 +237,7 @@ void SsaRedundantPhiElimination::Run() { // for elimination. Add phis that use this phi to the worklist. for (const HUseListNode& use : current->GetUses()) { HInstruction* user = use.GetUser(); - if (user->IsPhi() && !ContainsElement(visited_phis_in_cycle, user->GetId())) { + if (user->IsPhi() && !visited_phis_in_cycle.IsBitSet(user->GetId())) { worklist_.push_back(user->AsPhi()); } } diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc index 2203646e7..84cdb7d4d 100644 --- a/compiler/utils/x86/assembler_x86.cc +++ b/compiler/utils/x86/assembler_x86.cc @@ -1030,6 +1030,14 @@ void X86Assembler::xchgl(Register reg, const Address& address) { } +void X86Assembler::cmpb(const Address& address, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x80); + EmitOperand(7, address); + EmitUint8(imm.value() & 0xFF); +} + + void X86Assembler::cmpw(const Address& address, const Immediate& imm) { AssemblerBuffer::EnsureCapacity ensured(&buffer_); EmitUint8(0x66); diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h index 8567ad2a1..bc46e9f7c 100644 --- a/compiler/utils/x86/assembler_x86.h +++ b/compiler/utils/x86/assembler_x86.h @@ -479,6 +479,7 @@ class X86Assembler FINAL : public Assembler { void xchgl(Register dst, Register src); void xchgl(Register reg, const Address& address); + void cmpb(const Address& address, const Immediate& imm); void cmpw(const Address& address, const Immediate& imm); void cmpl(Register reg, const Immediate& imm); diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc index 1d1df6e44..28043c938 100644 --- a/compiler/utils/x86/assembler_x86_test.cc +++ b/compiler/utils/x86/assembler_x86_test.cc @@ -389,4 +389,10 @@ TEST_F(AssemblerX86Test, NearLabel) { DriverStr(expected, "near_label"); } +TEST_F(AssemblerX86Test, Cmpb) { + GetAssembler()->cmpb(x86::Address(x86::EDI, 128), x86::Immediate(0)); + const char* expected = "cmpb $0, 128(%EDI)\n"; + DriverStr(expected, "cmpb"); +} + } // namespace art diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc index 32eb4a37b..5e7b587e4 100644 --- a/compiler/utils/x86_64/assembler_x86_64.cc +++ b/compiler/utils/x86_64/assembler_x86_64.cc @@ -1224,6 +1224,16 @@ void X86_64Assembler::xchgl(CpuRegister reg, const Address& address) { } +void X86_64Assembler::cmpb(const Address& address, const Immediate& imm) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + CHECK(imm.is_int32()); + EmitOptionalRex32(address); + EmitUint8(0x80); + EmitOperand(7, address); + EmitUint8(imm.value() & 0xFF); +} + + void X86_64Assembler::cmpw(const Address& address, const Immediate& imm) { AssemblerBuffer::EnsureCapacity ensured(&buffer_); CHECK(imm.is_int32()); diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h index 92c7d0ab9..720a402b5 100644 --- a/compiler/utils/x86_64/assembler_x86_64.h +++ b/compiler/utils/x86_64/assembler_x86_64.h @@ -506,6 +506,7 @@ class X86_64Assembler FINAL : public Assembler { void xchgq(CpuRegister dst, CpuRegister src); void xchgl(CpuRegister reg, const Address& address); + void cmpb(const Address& address, const Immediate& imm); void cmpw(const Address& address, const Immediate& imm); void cmpl(CpuRegister reg, const Immediate& imm); diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc index afe9207eb..9dccc9f21 100644 --- a/compiler/utils/x86_64/assembler_x86_64_test.cc +++ b/compiler/utils/x86_64/assembler_x86_64_test.cc @@ -1637,4 +1637,11 @@ TEST_F(AssemblerX86_64Test, Repecmpsq) { DriverStr(expected, "Repecmpsq"); } +TEST_F(AssemblerX86_64Test, Cmpb) { + GetAssembler()->cmpb(x86_64::Address(x86_64::CpuRegister(x86_64::RDI), 128), + x86_64::Immediate(0)); + const char* expected = "cmpb $0, 128(%RDI)\n"; + DriverStr(expected, "cmpb"); +} + } // namespace art diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index 370583e3b..cb274dcc0 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -1312,7 +1312,7 @@ class Dex2Oat FINAL { if (IsBootImage() && image_filenames_.size() > 1) { // If we're compiling the boot image, store the boot classpath into the Key-Value store. // We need this for the multi-image case. - key_value_store_->Put(OatHeader::kBootClassPath, GetMultiImageBootClassPath()); + key_value_store_->Put(OatHeader::kBootClassPathKey, GetMultiImageBootClassPath()); } if (!IsBootImage()) { @@ -1348,12 +1348,22 @@ class Dex2Oat FINAL { // Open dex files for class path. const std::vector class_path_locations = GetClassPathLocations(runtime_->GetClassPathString()); - OpenClassPathFiles(class_path_locations, &class_path_files_); + OpenClassPathFiles(class_path_locations, + &class_path_files_, + &opened_oat_files_, + runtime_->GetInstructionSet()); // Store the classpath we have right now. std::vector class_path_files = MakeNonOwningPointerVector(class_path_files_); - key_value_store_->Put(OatHeader::kClassPathKey, - OatFile::EncodeDexFileDependencies(class_path_files)); + std::string encoded_class_path; + if (class_path_locations.size() == 1 && + class_path_locations[0] == OatFile::kSpecialSharedLibrary) { + // When passing the special shared library as the classpath, it is the only path. + encoded_class_path = OatFile::kSpecialSharedLibrary; + } else { + encoded_class_path = OatFile::EncodeDexFileDependencies(class_path_files); + } + key_value_store_->Put(OatHeader::kClassPathKey, encoded_class_path); } // Now that we have finalized key_value_store_, start writing the oat file. @@ -1526,6 +1536,7 @@ class Dex2Oat FINAL { instruction_set_, instruction_set_features_.get(), IsBootImage(), + IsAppImage(), image_classes_.release(), compiled_classes_.release(), /* compiled_methods */ nullptr, @@ -1963,14 +1974,37 @@ class Dex2Oat FINAL { return parsed; } - // Opens requested class path files and appends them to opened_dex_files. + // Opens requested class path files and appends them to opened_dex_files. If the dex files have + // been stripped, this opens them from their oat files and appends them to opened_oat_files. static void OpenClassPathFiles(const std::vector& class_path_locations, - std::vector>* opened_dex_files) { - DCHECK(opened_dex_files != nullptr) << "OpenClassPathFiles out-param is nullptr"; + std::vector>* opened_dex_files, + std::vector>* opened_oat_files, + InstructionSet isa) { + DCHECK(opened_dex_files != nullptr) << "OpenClassPathFiles dex out-param is nullptr"; + DCHECK(opened_oat_files != nullptr) << "OpenClassPathFiles oat out-param is nullptr"; for (const std::string& location : class_path_locations) { + // Stop early if we detect the special shared library, which may be passed as the classpath + // for dex2oat when we want to skip the shared libraries check. + if (location == OatFile::kSpecialSharedLibrary) { + break; + } std::string error_msg; if (!DexFile::Open(location.c_str(), location.c_str(), &error_msg, opened_dex_files)) { - LOG(WARNING) << "Failed to open dex file '" << location << "': " << error_msg; + // If we fail to open the dex file because it's been stripped, try to open the dex file + // from its corresponding oat file. + OatFileAssistant oat_file_assistant(location.c_str(), isa, false, false); + std::unique_ptr oat_file(oat_file_assistant.GetBestOatFile()); + if (oat_file == nullptr) { + LOG(WARNING) << "Failed to open dex file and associated oat file for '" << location + << "': " << error_msg; + } else { + std::vector> oat_dex_files = + oat_file_assistant.LoadDexFiles(*oat_file, location.c_str()); + opened_oat_files->push_back(std::move(oat_file)); + opened_dex_files->insert(opened_dex_files->end(), + std::make_move_iterator(oat_dex_files.begin()), + std::make_move_iterator(oat_dex_files.end())); + } } } } @@ -2440,6 +2474,7 @@ class Dex2Oat FINAL { std::unique_ptr driver_; std::vector> opened_dex_files_maps_; + std::vector> opened_oat_files_; std::vector> opened_dex_files_; std::vector no_inline_from_dex_files_; diff --git a/disassembler/disassembler.h b/disassembler/disassembler.h index b99e5c2df..b08031587 100644 --- a/disassembler/disassembler.h +++ b/disassembler/disassembler.h @@ -31,16 +31,23 @@ class DisassemblerOptions { // Should the disassembler print absolute or relative addresses. const bool absolute_addresses_; - // Base addess for calculating relative code offsets when absolute_addresses_ is false. + // Base address for calculating relative code offsets when absolute_addresses_ is false. const uint8_t* const base_address_; + // End address (exclusive); + const uint8_t* const end_address_; + // If set, the disassembler is allowed to look at load targets in literal // pools. const bool can_read_literals_; - DisassemblerOptions(bool absolute_addresses, const uint8_t* base_address, + DisassemblerOptions(bool absolute_addresses, + const uint8_t* base_address, + const uint8_t* end_address, bool can_read_literals) - : absolute_addresses_(absolute_addresses), base_address_(base_address), + : absolute_addresses_(absolute_addresses), + base_address_(base_address), + end_address_(end_address), can_read_literals_(can_read_literals) {} private: diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc index bcb043883..286faf215 100644 --- a/disassembler/disassembler_arm.cc +++ b/disassembler/disassembler_arm.cc @@ -418,7 +418,12 @@ std::ostream& operator<<(std::ostream& os, T2LitType type) { return os << static_cast(type); } -void DumpThumb2Literal(std::ostream& args, const uint8_t* instr_ptr, uint32_t U, uint32_t imm32, +void DumpThumb2Literal(std::ostream& args, + const uint8_t* instr_ptr, + const uintptr_t lo_adr, + const uintptr_t hi_adr, + uint32_t U, + uint32_t imm32, T2LitType type) { // Literal offsets (imm32) are not required to be aligned so we may need unaligned access. typedef const int16_t unaligned_int16_t __attribute__ ((aligned (1))); @@ -428,8 +433,16 @@ void DumpThumb2Literal(std::ostream& args, const uint8_t* instr_ptr, uint32_t U, typedef const int64_t unaligned_int64_t __attribute__ ((aligned (1))); typedef const uint64_t unaligned_uint64_t __attribute__ ((aligned (1))); + // Get address of literal. Bail if not within expected buffer range to + // avoid trying to fetch invalid literals (we can encounter this when + // interpreting raw data as instructions). uintptr_t pc = RoundDown(reinterpret_cast(instr_ptr) + 4, 4); uintptr_t lit_adr = U ? pc + imm32 : pc - imm32; + if (lit_adr < lo_adr || lit_adr >= hi_adr) { + args << " ; (?)"; + return; + } + args << " ; "; switch (type) { case kT2LitUByte: @@ -482,6 +495,10 @@ size_t DisassemblerArm::DumpThumb32(std::ostream& os, const uint8_t* instr_ptr) return DumpThumb16(os, instr_ptr); } + // Set valid address range of backing buffer. + const uintptr_t lo_adr = reinterpret_cast(GetDisassemblerOptions()->base_address_); + const uintptr_t hi_adr = reinterpret_cast(GetDisassemblerOptions()->end_address_); + uint32_t op2 = (instr >> 20) & 0x7F; std::ostringstream opcode; std::ostringstream args; @@ -824,7 +841,7 @@ size_t DisassemblerArm::DumpThumb32(std::ostream& os, const uint8_t* instr_ptr) args << d << ", [" << Rn << ", #" << ((U == 1) ? "" : "-") << (imm8 << 2) << "]"; if (Rn.r == 15 && U == 1) { - DumpThumb2Literal(args, instr_ptr, U, imm8 << 2, kT2LitHexLong); + DumpThumb2Literal(args, instr_ptr, lo_adr, hi_adr, U, imm8 << 2, kT2LitHexLong); } } else if (Rn.r == 13 && W == 1 && U == L) { // VPUSH/VPOP opcode << (L == 1 ? "vpop" : "vpush"); @@ -1410,7 +1427,7 @@ size_t DisassemblerArm::DumpThumb32(std::ostream& os, const uint8_t* instr_ptr) }; DCHECK_LT(op2 >> 1, arraysize(lit_type)); DCHECK_NE(lit_type[op2 >> 1], kT2LitInvalid); - DumpThumb2Literal(args, instr_ptr, U, imm12, lit_type[op2 >> 1]); + DumpThumb2Literal(args, instr_ptr, lo_adr, hi_adr, U, imm12, lit_type[op2 >> 1]); } } else if ((instr & 0xFC0) == 0) { opcode << ldr_str << sign << type << ".w"; @@ -1711,10 +1728,13 @@ size_t DisassemblerArm::DumpThumb16(std::ostream& os, const uint8_t* instr_ptr) break; } } else if (opcode1 == 0x12 || opcode1 == 0x13) { // 01001x + const uintptr_t lo_adr = reinterpret_cast(GetDisassemblerOptions()->base_address_); + const uintptr_t hi_adr = reinterpret_cast(GetDisassemblerOptions()->end_address_); ThumbRegister Rt(instr, 8); uint16_t imm8 = instr & 0xFF; opcode << "ldr"; args << Rt << ", [pc, #" << (imm8 << 2) << "]"; + DumpThumb2Literal(args, instr_ptr, lo_adr, hi_adr, /*U*/ 1u, imm8 << 2, kT2LitHexWord); } else if ((opcode1 >= 0x14 && opcode1 <= 0x17) || // 0101xx (opcode1 >= 0x18 && opcode1 <= 0x1f) || // 011xxx (opcode1 >= 0x20 && opcode1 <= 0x27)) { // 100xxx diff --git a/disassembler/disassembler_arm64.cc b/disassembler/disassembler_arm64.cc index 5f8871470..6a9afe574 100644 --- a/disassembler/disassembler_arm64.cc +++ b/disassembler/disassembler_arm64.cc @@ -63,9 +63,17 @@ void CustomDisassembler::VisitLoadLiteral(const vixl::Instruction* instr) { return; } + // Get address of literal. Bail if not within expected buffer range to + // avoid trying to fetch invalid literals (we can encounter this when + // interpreting raw data as instructions). void* data_address = instr->LiteralAddress(); - vixl::Instr op = instr->Mask(vixl::LoadLiteralMask); + if (data_address < base_address_ || data_address >= end_address_) { + AppendToOutput(" (?)"); + return; + } + // Output information on literal. + vixl::Instr op = instr->Mask(vixl::LoadLiteralMask); switch (op) { case vixl::LDR_w_lit: case vixl::LDR_x_lit: diff --git a/disassembler/disassembler_arm64.h b/disassembler/disassembler_arm64.h index 44fa53f9f..a4e5ee8a4 100644 --- a/disassembler/disassembler_arm64.h +++ b/disassembler/disassembler_arm64.h @@ -30,8 +30,11 @@ namespace arm64 { class CustomDisassembler FINAL : public vixl::Disassembler { public: - explicit CustomDisassembler(DisassemblerOptions* options) : - vixl::Disassembler(), read_literals_(options->can_read_literals_) { + explicit CustomDisassembler(DisassemblerOptions* options) + : vixl::Disassembler(), + read_literals_(options->can_read_literals_), + base_address_(options->base_address_), + end_address_(options->end_address_) { if (!options->absolute_addresses_) { MapCodeAddress(0, reinterpret_cast(options->base_address_)); } @@ -55,6 +58,10 @@ class CustomDisassembler FINAL : public vixl::Disassembler { // true | 0x72681558: 1c000acb ldr s11, pc+344 (addr 0x726816b0) // false | 0x72681558: 1c000acb ldr s11, pc+344 (addr 0x726816b0) (3.40282e+38) const bool read_literals_; + + // Valid address range: [base_address_, end_address_) + const void* const base_address_; + const void* const end_address_; }; class DisassemblerArm64 FINAL : public Disassembler { diff --git a/libart_fake/Android.mk b/libart_fake/Android.mk new file mode 100644 index 000000000..ed868a5bd --- /dev/null +++ b/libart_fake/Android.mk @@ -0,0 +1,34 @@ +# Copyright (C) 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +LOCAL_PATH := $(my-dir) + +include $(CLEAR_VARS) + +LOCAL_MODULE := libart_fake +LOCAL_INSTALLED_MODULE_STEM := libart.so +LOCAL_SDK_VERSION := 9 +LOCAL_CPP_EXTENSION := .cc +LOCAL_SRC_FILES := fake.cc +LOCAL_SHARED_LIBRARIES := liblog + +ifdef TARGET_2ND_ARCH + LOCAL_MODULE_PATH_32 := $(TARGET_OUT)/fake-libs + LOCAL_MODULE_PATH_64 := $(TARGET_OUT)/fake-libs64 +else + LOCAL_MODULE_PATH := $(TARGET_OUT)/fake-libs +endif + +include $(BUILD_SHARED_LIBRARY) diff --git a/libart_fake/README.md b/libart_fake/README.md new file mode 100644 index 000000000..6e3621e55 --- /dev/null +++ b/libart_fake/README.md @@ -0,0 +1,5 @@ +libart_fake +==== + +A fake libart made to satisfy some misbehaving apps that will attempt to link +against libart.so. diff --git a/libart_fake/fake.cc b/libart_fake/fake.cc new file mode 100644 index 000000000..884242101 --- /dev/null +++ b/libart_fake/fake.cc @@ -0,0 +1,46 @@ +/* + * Copyright 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "libart_fake" + +#include + +#define LOGIT(...) __android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__) +namespace art { +class Dbg { + public: + void SuspendVM(); + void ResumeVM(); +}; + +class FaultManager { + public: + void EnsureArtActionInFrontOfSignalChain(); +}; + +void Dbg::SuspendVM() { + LOGIT("Linking to and calling into libart.so internal functions is not supported. " + "This call to '%s' is being ignored.", __func__); +} +void Dbg::ResumeVM() { + LOGIT("Linking to and calling into libart.so internal functions is not supported. " + "This call to '%s' is being ignored.", __func__); +} +void FaultManager::EnsureArtActionInFrontOfSignalChain() { + LOGIT("Linking to and calling into libart.so internal functions is not supported. " + "This call to '%s' is being ignored.", __func__); +} +}; // namespace art diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc index 3c6a05d97..f5458c067 100644 --- a/oatdump/oatdump.cc +++ b/oatdump/oatdump.cc @@ -59,6 +59,7 @@ #include "stack_map.h" #include "ScopedLocalRef.h" #include "thread_list.h" +#include "type_lookup_table.h" #include "verifier/method_verifier.h" #include "well_known_classes.h" @@ -334,6 +335,7 @@ class OatDumper { disassembler_(Disassembler::Create(instruction_set_, new DisassemblerOptions(options_.absolute_addresses_, oat_file.Begin(), + oat_file.End(), true /* can_read_literals_ */))) { CHECK(options_.class_loader_ != nullptr); CHECK(options_.class_filter_ != nullptr); @@ -573,8 +575,15 @@ class OatDumper { os << StringPrintf("location: %s\n", oat_dex_file.GetDexFileLocation().c_str()); os << StringPrintf("checksum: 0x%08x\n", oat_dex_file.GetDexFileLocationChecksum()); - // Create the verifier early. + // Print embedded dex file data range. + const uint8_t* const oat_file_begin = oat_dex_file.GetOatFile()->Begin(); + const uint8_t* const dex_file_pointer = oat_dex_file.GetDexFilePointer(); + uint32_t dex_offset = dchecked_integral_cast(dex_file_pointer - oat_file_begin); + os << StringPrintf("dex-file: 0x%08x..0x%08x\n", + dex_offset, + dchecked_integral_cast(dex_offset + oat_dex_file.FileSize() - 1)); + // Create the dex file early. A lot of print-out things depend on it. std::string error_msg; const DexFile* const dex_file = OpenDexFile(&oat_dex_file, &error_msg); if (dex_file == nullptr) { @@ -583,6 +592,16 @@ class OatDumper { return false; } + // Print lookup table, if it exists. + if (oat_dex_file.GetLookupTableData() != nullptr) { + uint32_t table_offset = dchecked_integral_cast( + oat_dex_file.GetLookupTableData() - oat_file_begin); + uint32_t table_size = TypeLookupTable::RawDataLength(*dex_file); + os << StringPrintf("type-table: 0x%08x..0x%08x\n", + table_offset, + table_offset + table_size - 1); + } + VariableIndentationOutputStream vios(&os); ScopedIndentation indent1(&vios); for (size_t class_def_index = 0; @@ -1416,11 +1435,10 @@ class ImageDumper { indent_os << "\n"; // TODO: Dump fields. // Dump methods after. - const auto& methods_section = image_header_.GetMethodsSection(); DumpArtMethodVisitor visitor(this); - methods_section.VisitPackedArtMethods(&visitor, - image_space_.Begin(), - image_header_.GetPointerSize()); + image_header_.VisitPackedArtMethods(&visitor, + image_space_.Begin(), + image_header_.GetPointerSize()); // Dump the large objects separately. heap->GetLargeObjectsSpace()->GetLiveBitmap()->Walk(ImageDumper::Callback, this); indent_os << "\n"; @@ -1779,6 +1797,7 @@ class ImageDumper { DCHECK(method != nullptr); const void* quick_oat_code_begin = GetQuickOatCodeBegin(method); const void* quick_oat_code_end = GetQuickOatCodeEnd(method); + const size_t pointer_size = image_header_.GetPointerSize(); OatQuickMethodHeader* method_header = reinterpret_cast( reinterpret_cast(quick_oat_code_begin) - sizeof(OatQuickMethodHeader)); if (method->IsNative()) { @@ -1792,13 +1811,16 @@ class ImageDumper { image_header_.GetPointerSize())) { indent_os << StringPrintf("OAT CODE: %p\n", quick_oat_code_begin); } - } else if (method->IsAbstract() || - method->IsCalleeSaveMethod() || - method->IsResolutionMethod() || - (method == Runtime::Current()->GetImtConflictMethod()) || - method->IsImtUnimplementedMethod() || - method->IsClassInitializer()) { + } else if (method->IsAbstract() || method->IsClassInitializer()) { // Don't print information for these. + } else if (method->IsRuntimeMethod()) { + ImtConflictTable* table = method->GetImtConflictTable(image_header_.GetPointerSize()); + if (table != nullptr) { + indent_os << "IMT conflict table " << table << " method: "; + for (size_t i = 0, count = table->NumEntries(pointer_size); i < count; ++i) { + indent_os << PrettyMethod(table->GetImplementationMethod(i, pointer_size)) << " "; + } + } } else { const DexFile::CodeItem* code_item = method->GetCodeItem(); size_t dex_instruction_bytes = code_item->insns_size_in_code_units_ * 2; diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc index 93e40afea..0a7ffda3b 100644 --- a/patchoat/patchoat.cc +++ b/patchoat/patchoat.cc @@ -472,8 +472,7 @@ class PatchOatArtFieldVisitor : public ArtFieldVisitor { void PatchOat::PatchArtFields(const ImageHeader* image_header) { PatchOatArtFieldVisitor visitor(this); - const auto& section = image_header->GetImageSection(ImageHeader::kSectionArtFields); - section.VisitPackedArtFields(&visitor, heap_->Begin()); + image_header->VisitPackedArtFields(&visitor, heap_->Begin()); } class PatchOatArtMethodVisitor : public ArtMethodVisitor { @@ -490,10 +489,20 @@ class PatchOatArtMethodVisitor : public ArtMethodVisitor { }; void PatchOat::PatchArtMethods(const ImageHeader* image_header) { - const auto& section = image_header->GetMethodsSection(); const size_t pointer_size = InstructionSetPointerSize(isa_); PatchOatArtMethodVisitor visitor(this); - section.VisitPackedArtMethods(&visitor, heap_->Begin(), pointer_size); + image_header->VisitPackedArtMethods(&visitor, heap_->Begin(), pointer_size); +} + +void PatchOat::PatchImtConflictTables(const ImageHeader* image_header) { + const size_t pointer_size = InstructionSetPointerSize(isa_); + // We can safely walk target image since the conflict tables are independent. + image_header->VisitPackedImtConflictTables( + [this](ArtMethod* method) { + return RelocatedAddressOfPointer(method); + }, + image_->Begin(), + pointer_size); } class FixupRootVisitor : public RootVisitor { @@ -627,6 +636,7 @@ bool PatchOat::PatchImage(bool primary_image) { PatchArtFields(image_header); PatchArtMethods(image_header); + PatchImtConflictTables(image_header); PatchInternedStrings(image_header); PatchClassTable(image_header); // Patch dex file int/long arrays which point to ArtFields. @@ -725,6 +735,7 @@ void PatchOat::FixupMethod(ArtMethod* object, ArtMethod* copy) { RelocatedAddressOfPointer(object->GetDexCacheResolvedTypes(pointer_size)), pointer_size); copy->SetEntryPointFromQuickCompiledCodePtrSize(RelocatedAddressOfPointer( object->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size)), pointer_size); + // No special handling for IMT conflict table since all pointers are moved by the same offset. copy->SetEntryPointFromJniPtrSize(RelocatedAddressOfPointer( object->GetEntryPointFromJniPtrSize(pointer_size)), pointer_size); } diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h index 510ff1e5b..3ef837fde 100644 --- a/patchoat/patchoat.h +++ b/patchoat/patchoat.h @@ -117,6 +117,8 @@ class PatchOat { bool PatchImage(bool primary_image) SHARED_REQUIRES(Locks::mutator_lock_); void PatchArtFields(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_); void PatchArtMethods(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_); + void PatchImtConflictTables(const ImageHeader* image_header) + SHARED_REQUIRES(Locks::mutator_lock_); void PatchInternedStrings(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_); void PatchClassTable(const ImageHeader* image_header) diff --git a/profman/profman.cc b/profman/profman.cc index 3e632bce8..4d9276f15 100644 --- a/profman/profman.cc +++ b/profman/profman.cc @@ -189,7 +189,6 @@ class ProfMan FINAL { return -1; } std::string dump = info.DumpInfo(/*dex_files*/ nullptr); - info.Save(fd); std::cout << dump << "\n"; return 0; } @@ -216,7 +215,11 @@ class ProfMan FINAL { } void LogCompletionTime() { - LOG(INFO) << "profman took " << PrettyDuration(NanoTime() - start_ns_); + static constexpr uint64_t kLogThresholdTime = MsToNs(100); // 100ms + uint64_t time_taken = NanoTime() - start_ns_; + if (time_taken > kLogThresholdTime) { + LOG(WARNING) << "profman took " << PrettyDuration(time_taken); + } } std::vector profile_files_; diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc index e358ff879..f0e9ac517 100644 --- a/runtime/arch/arm/entrypoints_init_arm.cc +++ b/runtime/arch/arm/entrypoints_init_arm.cc @@ -17,6 +17,7 @@ #include "entrypoints/jni/jni_entrypoints.h" #include "entrypoints/quick/quick_alloc_entrypoints.h" #include "entrypoints/quick/quick_default_externs.h" +#include "entrypoints/quick/quick_default_init_entrypoints.h" #include "entrypoints/quick/quick_entrypoints.h" #include "entrypoints/entrypoint_utils.h" #include "entrypoints/math_entrypoints.h" @@ -47,67 +48,12 @@ extern "C" int __aeabi_idivmod(int32_t, int32_t); // [DIV|REM]_INT[_2ADDR|_LIT8 extern "C" int64_t __aeabi_ldivmod(int64_t, int64_t); void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { - // JNI - jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub; - - // Alloc - ResetQuickAllocEntryPoints(qpoints); + DefaultInitEntryPoints(jpoints, qpoints); // Cast qpoints->pInstanceofNonTrivial = artIsAssignableFromCode; qpoints->pCheckCast = art_quick_check_cast; - // DexCache - qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage; - qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access; - qpoints->pInitializeType = art_quick_initialize_type; - qpoints->pResolveString = art_quick_resolve_string; - - // Field - qpoints->pSet8Instance = art_quick_set8_instance; - qpoints->pSet8Static = art_quick_set8_static; - qpoints->pSet16Instance = art_quick_set16_instance; - qpoints->pSet16Static = art_quick_set16_static; - qpoints->pSet32Instance = art_quick_set32_instance; - qpoints->pSet32Static = art_quick_set32_static; - qpoints->pSet64Instance = art_quick_set64_instance; - qpoints->pSet64Static = art_quick_set64_static; - qpoints->pSetObjInstance = art_quick_set_obj_instance; - qpoints->pSetObjStatic = art_quick_set_obj_static; - qpoints->pGetByteInstance = art_quick_get_byte_instance; - qpoints->pGetBooleanInstance = art_quick_get_boolean_instance; - qpoints->pGetShortInstance = art_quick_get_short_instance; - qpoints->pGetCharInstance = art_quick_get_char_instance; - qpoints->pGet32Instance = art_quick_get32_instance; - qpoints->pGet64Instance = art_quick_get64_instance; - qpoints->pGetObjInstance = art_quick_get_obj_instance; - qpoints->pGetByteStatic = art_quick_get_byte_static; - qpoints->pGetBooleanStatic = art_quick_get_boolean_static; - qpoints->pGetShortStatic = art_quick_get_short_static; - qpoints->pGetCharStatic = art_quick_get_char_static; - qpoints->pGet32Static = art_quick_get32_static; - qpoints->pGet64Static = art_quick_get64_static; - qpoints->pGetObjStatic = art_quick_get_obj_static; - - // Array - qpoints->pAputObjectWithNullAndBoundCheck = art_quick_aput_obj_with_null_and_bound_check; - qpoints->pAputObjectWithBoundCheck = art_quick_aput_obj_with_bound_check; - qpoints->pAputObject = art_quick_aput_obj; - qpoints->pHandleFillArrayData = art_quick_handle_fill_data; - - // JNI - qpoints->pJniMethodStart = JniMethodStart; - qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized; - qpoints->pJniMethodEnd = JniMethodEnd; - qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized; - qpoints->pJniMethodEndWithReference = JniMethodEndWithReference; - qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; - qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline; - - // Locks - qpoints->pLockObject = art_quick_lock_object; - qpoints->pUnlockObject = art_quick_unlock_object; - // Math qpoints->pIdivmod = __aeabi_idivmod; qpoints->pLdiv = __aeabi_ldivmod; @@ -154,35 +100,6 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { qpoints->pStringCompareTo = art_quick_string_compareto; qpoints->pMemcpy = memcpy; - // Invocation - qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline; - qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline; - qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge; - qpoints->pInvokeDirectTrampolineWithAccessCheck = - art_quick_invoke_direct_trampoline_with_access_check; - qpoints->pInvokeInterfaceTrampolineWithAccessCheck = - art_quick_invoke_interface_trampoline_with_access_check; - qpoints->pInvokeStaticTrampolineWithAccessCheck = - art_quick_invoke_static_trampoline_with_access_check; - qpoints->pInvokeSuperTrampolineWithAccessCheck = - art_quick_invoke_super_trampoline_with_access_check; - qpoints->pInvokeVirtualTrampolineWithAccessCheck = - art_quick_invoke_virtual_trampoline_with_access_check; - - // Thread - qpoints->pTestSuspend = art_quick_test_suspend; - - // Throws - qpoints->pDeliverException = art_quick_deliver_exception; - qpoints->pThrowArrayBounds = art_quick_throw_array_bounds; - qpoints->pThrowDivZero = art_quick_throw_div_zero; - qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method; - qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception; - qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow; - - // Deoptimization from compiled code. - qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code; - // Read barrier. qpoints->pReadBarrierJni = ReadBarrierJni; qpoints->pReadBarrierMark = artReadBarrierMark; diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S index e6ff0aa13..321b9d217 100644 --- a/runtime/arch/arm/quick_entrypoints_arm.S +++ b/runtime/arch/arm/quick_entrypoints_arm.S @@ -544,6 +544,15 @@ ENTRY art_quick_lock_object DELIVER_PENDING_EXCEPTION END art_quick_lock_object +ENTRY art_quick_lock_object_no_inline + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 @ save callee saves in case we block + mov r1, r9 @ pass Thread::Current + bl artLockObjectFromCode @ (Object* obj, Thread*) + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME + RETURN_IF_RESULT_IS_ZERO + DELIVER_PENDING_EXCEPTION +END art_quick_lock_object_no_inline + /* * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure. * r0 holds the possibly null object to lock. @@ -601,6 +610,16 @@ ENTRY art_quick_unlock_object DELIVER_PENDING_EXCEPTION END art_quick_unlock_object +ENTRY art_quick_unlock_object_no_inline + @ save callee saves in case exception allocation triggers GC + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 + mov r1, r9 @ pass Thread::Current + bl artUnlockObjectFromCode @ (Object* obj, Thread*) + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME + RETURN_IF_RESULT_IS_ZERO + DELIVER_PENDING_EXCEPTION +END art_quick_unlock_object_no_inline + /* * Entry from managed code that calls artIsAssignableFromCode and on failure calls * artThrowClassCastException. diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc index 4db941174..bf0f6470d 100644 --- a/runtime/arch/arm64/entrypoints_init_arm64.cc +++ b/runtime/arch/arm64/entrypoints_init_arm64.cc @@ -17,6 +17,7 @@ #include "entrypoints/jni/jni_entrypoints.h" #include "entrypoints/quick/quick_alloc_entrypoints.h" #include "entrypoints/quick/quick_default_externs.h" +#include "entrypoints/quick/quick_default_init_entrypoints.h" #include "entrypoints/quick/quick_entrypoints.h" #include "entrypoints/entrypoint_utils.h" #include "entrypoints/math_entrypoints.h" @@ -30,67 +31,12 @@ extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass, const mirror::Class* ref_class); void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { - // JNI - jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub; - - // Alloc - ResetQuickAllocEntryPoints(qpoints); + DefaultInitEntryPoints(jpoints, qpoints); // Cast qpoints->pInstanceofNonTrivial = artIsAssignableFromCode; qpoints->pCheckCast = art_quick_check_cast; - // DexCache - qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage; - qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access; - qpoints->pInitializeType = art_quick_initialize_type; - qpoints->pResolveString = art_quick_resolve_string; - - // Field - qpoints->pSet8Instance = art_quick_set8_instance; - qpoints->pSet8Static = art_quick_set8_static; - qpoints->pSet16Instance = art_quick_set16_instance; - qpoints->pSet16Static = art_quick_set16_static; - qpoints->pSet32Instance = art_quick_set32_instance; - qpoints->pSet32Static = art_quick_set32_static; - qpoints->pSet64Instance = art_quick_set64_instance; - qpoints->pSet64Static = art_quick_set64_static; - qpoints->pSetObjInstance = art_quick_set_obj_instance; - qpoints->pSetObjStatic = art_quick_set_obj_static; - qpoints->pGetBooleanInstance = art_quick_get_boolean_instance; - qpoints->pGetByteInstance = art_quick_get_byte_instance; - qpoints->pGetCharInstance = art_quick_get_char_instance; - qpoints->pGetShortInstance = art_quick_get_short_instance; - qpoints->pGet32Instance = art_quick_get32_instance; - qpoints->pGet64Instance = art_quick_get64_instance; - qpoints->pGetObjInstance = art_quick_get_obj_instance; - qpoints->pGetBooleanStatic = art_quick_get_boolean_static; - qpoints->pGetByteStatic = art_quick_get_byte_static; - qpoints->pGetCharStatic = art_quick_get_char_static; - qpoints->pGetShortStatic = art_quick_get_short_static; - qpoints->pGet32Static = art_quick_get32_static; - qpoints->pGet64Static = art_quick_get64_static; - qpoints->pGetObjStatic = art_quick_get_obj_static; - - // Array - qpoints->pAputObjectWithNullAndBoundCheck = art_quick_aput_obj_with_null_and_bound_check; - qpoints->pAputObjectWithBoundCheck = art_quick_aput_obj_with_bound_check; - qpoints->pAputObject = art_quick_aput_obj; - qpoints->pHandleFillArrayData = art_quick_handle_fill_data; - - // JNI - qpoints->pJniMethodStart = JniMethodStart; - qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized; - qpoints->pJniMethodEnd = JniMethodEnd; - qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized; - qpoints->pJniMethodEndWithReference = JniMethodEndWithReference; - qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; - qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline; - - // Locks - qpoints->pLockObject = art_quick_lock_object; - qpoints->pUnlockObject = art_quick_unlock_object; - // Math // TODO null entrypoints not needed for ARM64 - generate inline. qpoints->pCmpgDouble = nullptr; @@ -134,38 +80,10 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { // Intrinsics qpoints->pIndexOf = art_quick_indexof; - qpoints->pStringCompareTo = art_quick_string_compareto; + // The ARM64 StringCompareTo intrinsic does not call the runtime. + qpoints->pStringCompareTo = nullptr; qpoints->pMemcpy = memcpy; - // Invocation - qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline; - qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline; - qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge; - qpoints->pInvokeDirectTrampolineWithAccessCheck = - art_quick_invoke_direct_trampoline_with_access_check; - qpoints->pInvokeInterfaceTrampolineWithAccessCheck = - art_quick_invoke_interface_trampoline_with_access_check; - qpoints->pInvokeStaticTrampolineWithAccessCheck = - art_quick_invoke_static_trampoline_with_access_check; - qpoints->pInvokeSuperTrampolineWithAccessCheck = - art_quick_invoke_super_trampoline_with_access_check; - qpoints->pInvokeVirtualTrampolineWithAccessCheck = - art_quick_invoke_virtual_trampoline_with_access_check; - - // Thread - qpoints->pTestSuspend = art_quick_test_suspend; - - // Throws - qpoints->pDeliverException = art_quick_deliver_exception; - qpoints->pThrowArrayBounds = art_quick_throw_array_bounds; - qpoints->pThrowDivZero = art_quick_throw_div_zero; - qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method; - qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception; - qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow; - - // Deoptimization from compiled code. - qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code; - // Read barrier. qpoints->pReadBarrierJni = ReadBarrierJni; qpoints->pReadBarrierMark = artReadBarrierMark; diff --git a/runtime/arch/arm64/instruction_set_features_arm64.cc b/runtime/arch/arm64/instruction_set_features_arm64.cc index 613bb5c76..cad13b29d 100644 --- a/runtime/arch/arm64/instruction_set_features_arm64.cc +++ b/runtime/arch/arm64/instruction_set_features_arm64.cc @@ -39,7 +39,7 @@ const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromVariant( if (!needs_a53_835769_fix) { // Check to see if this is an expected variant. static const char* arm64_known_variants[] = { - "denver64", "kryo" + "denver64", "kryo", "exynos-m1" }; if (!FindVariantInArray(arm64_known_variants, arraysize(arm64_known_variants), variant)) { std::ostringstream os; diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S index 1cdda2d19..1fba09bae 100644 --- a/runtime/arch/arm64/quick_entrypoints_arm64.S +++ b/runtime/arch/arm64/quick_entrypoints_arm64.S @@ -1113,6 +1113,14 @@ ENTRY art_quick_lock_object RETURN_IF_W0_IS_ZERO_OR_DELIVER END art_quick_lock_object +ENTRY art_quick_lock_object_no_inline + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case we block + mov x1, xSELF // pass Thread::Current + bl artLockObjectFromCode // (Object* obj, Thread*) + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME + RETURN_IF_W0_IS_ZERO_OR_DELIVER +END art_quick_lock_object_no_inline + /* * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure. * x0 holds the possibly null object to lock. @@ -1171,6 +1179,14 @@ ENTRY art_quick_unlock_object RETURN_IF_W0_IS_ZERO_OR_DELIVER END art_quick_unlock_object +ENTRY art_quick_unlock_object_no_inline + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case exception allocation triggers GC + mov x1, xSELF // pass Thread::Current + bl artUnlockObjectFromCode // (Object* obj, Thread*) + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME + RETURN_IF_W0_IS_ZERO_OR_DELIVER +END art_quick_unlock_object_no_inline + /* * Entry from managed code that calls artIsAssignableFromCode and on failure calls * artThrowClassCastException. @@ -2187,108 +2203,3 @@ ENTRY art_quick_indexof asr x0, x0, #1 ret END art_quick_indexof - - /* - * String's compareTo. - * - * TODO: Not very optimized. - * - * On entry: - * x0: this object pointer - * x1: comp object pointer - * - */ - .extern __memcmp16 -ENTRY art_quick_string_compareto - mov x2, x0 // x0 is return, use x2 for first input. - sub x0, x2, x1 // Same string object? - cbnz x0,1f - ret -1: // Different string objects. - - ldr w4, [x2, #MIRROR_STRING_COUNT_OFFSET] - ldr w3, [x1, #MIRROR_STRING_COUNT_OFFSET] - add x2, x2, #MIRROR_STRING_VALUE_OFFSET - add x1, x1, #MIRROR_STRING_VALUE_OFFSET - - /* - * Now: Data* Count - * first arg x2 w4 - * second arg x1 w3 - */ - - // x0 := str1.length(w4) - str2.length(w3). ldr zero-extended w3/w4 into x3/x4. - subs x0, x4, x3 - // Min(count1, count2) into w3. - csel x3, x3, x4, ge - - // TODO: Tune this value. - // Check for long string, do memcmp16 for them. - cmp w3, #28 // Constant from arm32. - bgt .Ldo_memcmp16 - - /* - * Now: - * x2: *first string data - * x1: *second string data - * w3: iteration count - * x0: return value if comparison equal - * x4, x5, x6, x7: free - */ - - // Do a simple unrolled loop. -.Lloop: - // At least two more elements? - subs w3, w3, #2 - b.lt .Lremainder_or_done - - ldrh w4, [x2], #2 - ldrh w5, [x1], #2 - - ldrh w6, [x2], #2 - ldrh w7, [x1], #2 - - subs w4, w4, w5 - b.ne .Lw4_result - - subs w6, w6, w7 - b.ne .Lw6_result - - b .Lloop - -.Lremainder_or_done: - adds w3, w3, #1 - b.eq .Lremainder - ret - -.Lremainder: - ldrh w4, [x2], #2 - ldrh w5, [x1], #2 - subs w4, w4, w5 - b.ne .Lw4_result - ret - -// Result is in w4 -.Lw4_result: - sxtw x0, w4 - ret - -// Result is in w6 -.Lw6_result: - sxtw x0, w6 - ret - -.Ldo_memcmp16: - mov x14, x0 // Save x0 and LR. __memcmp16 does not use these temps. - mov x15, xLR // TODO: Codify and check that? - - mov x0, x2 - uxtw x2, w3 - bl __memcmp16 - - mov xLR, x15 // Restore LR. - - cmp x0, #0 // Check the memcmp difference. - csel x0, x0, x14, ne // x0 := x0 != 0 ? x14(prev x0=length diff) : x1. - ret -END art_quick_string_compareto diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc index 51eb77f40..45e33a850 100644 --- a/runtime/arch/mips/entrypoints_init_mips.cc +++ b/runtime/arch/mips/entrypoints_init_mips.cc @@ -59,6 +59,9 @@ extern "C" int64_t __divdi3(int64_t, int64_t); extern "C" int64_t __moddi3(int64_t, int64_t); void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { + // Note: MIPS has asserts checking for the type of entrypoint. Don't move it + // to InitDefaultEntryPoints(). + // JNI jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub; @@ -167,9 +170,14 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { "Non-direct C stub marked direct."); // Locks - qpoints->pLockObject = art_quick_lock_object; + if (UNLIKELY(VLOG_IS_ON(systrace_lock_logging))) { + qpoints->pLockObject = art_quick_lock_object_no_inline; + qpoints->pUnlockObject = art_quick_unlock_object_no_inline; + } else { + qpoints->pLockObject = art_quick_lock_object; + qpoints->pUnlockObject = art_quick_unlock_object; + } static_assert(!IsDirectEntrypoint(kQuickLockObject), "Non-direct C stub marked direct."); - qpoints->pUnlockObject = art_quick_unlock_object; static_assert(!IsDirectEntrypoint(kQuickUnlockObject), "Non-direct C stub marked direct."); // Math diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S index 8939a488e..3ee26afc4 100644 --- a/runtime/arch/mips/quick_entrypoints_mips.S +++ b/runtime/arch/mips/quick_entrypoints_mips.S @@ -906,6 +906,16 @@ ENTRY art_quick_lock_object RETURN_IF_ZERO END art_quick_lock_object +ENTRY art_quick_lock_object_no_inline + beqz $a0, .Lart_quick_throw_null_pointer_exception_gp_set + nop + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case we block + la $t9, artLockObjectFromCode + jalr $t9 # (Object* obj, Thread*) + move $a1, rSELF # pass Thread::Current + RETURN_IF_ZERO +END art_quick_lock_object_no_inline + /* * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure. */ @@ -920,6 +930,16 @@ ENTRY art_quick_unlock_object RETURN_IF_ZERO END art_quick_unlock_object +ENTRY art_quick_unlock_object_no_inline + beqz $a0, .Lart_quick_throw_null_pointer_exception_gp_set + nop + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC + la $t9, artUnlockObjectFromCode + jalr $t9 # (Object* obj, Thread*) + move $a1, rSELF # pass Thread::Current + RETURN_IF_ZERO +END art_quick_unlock_object_no_inline + /* * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure. */ diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc index 4bdb38e51..030c12707 100644 --- a/runtime/arch/mips64/entrypoints_init_mips64.cc +++ b/runtime/arch/mips64/entrypoints_init_mips64.cc @@ -18,6 +18,7 @@ #include "entrypoints/jni/jni_entrypoints.h" #include "entrypoints/quick/quick_alloc_entrypoints.h" #include "entrypoints/quick/quick_default_externs.h" +#include "entrypoints/quick/quick_default_init_entrypoints.h" #include "entrypoints/quick/quick_entrypoints.h" #include "entrypoints/entrypoint_utils.h" #include "entrypoints/math_entrypoints.h" @@ -57,67 +58,12 @@ extern "C" int64_t __divdi3(int64_t, int64_t); extern "C" int64_t __moddi3(int64_t, int64_t); void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { - // JNI - jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub; - - // Alloc - ResetQuickAllocEntryPoints(qpoints); + DefaultInitEntryPoints(jpoints, qpoints); // Cast qpoints->pInstanceofNonTrivial = artIsAssignableFromCode; qpoints->pCheckCast = art_quick_check_cast; - // DexCache - qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage; - qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access; - qpoints->pInitializeType = art_quick_initialize_type; - qpoints->pResolveString = art_quick_resolve_string; - - // Field - qpoints->pSet8Instance = art_quick_set8_instance; - qpoints->pSet8Static = art_quick_set8_static; - qpoints->pSet16Instance = art_quick_set16_instance; - qpoints->pSet16Static = art_quick_set16_static; - qpoints->pSet32Instance = art_quick_set32_instance; - qpoints->pSet32Static = art_quick_set32_static; - qpoints->pSet64Instance = art_quick_set64_instance; - qpoints->pSet64Static = art_quick_set64_static; - qpoints->pSetObjInstance = art_quick_set_obj_instance; - qpoints->pSetObjStatic = art_quick_set_obj_static; - qpoints->pGetBooleanInstance = art_quick_get_boolean_instance; - qpoints->pGetByteInstance = art_quick_get_byte_instance; - qpoints->pGetCharInstance = art_quick_get_char_instance; - qpoints->pGetShortInstance = art_quick_get_short_instance; - qpoints->pGet32Instance = art_quick_get32_instance; - qpoints->pGet64Instance = art_quick_get64_instance; - qpoints->pGetObjInstance = art_quick_get_obj_instance; - qpoints->pGetBooleanStatic = art_quick_get_boolean_static; - qpoints->pGetByteStatic = art_quick_get_byte_static; - qpoints->pGetCharStatic = art_quick_get_char_static; - qpoints->pGetShortStatic = art_quick_get_short_static; - qpoints->pGet32Static = art_quick_get32_static; - qpoints->pGet64Static = art_quick_get64_static; - qpoints->pGetObjStatic = art_quick_get_obj_static; - - // Array - qpoints->pAputObjectWithNullAndBoundCheck = art_quick_aput_obj_with_null_and_bound_check; - qpoints->pAputObjectWithBoundCheck = art_quick_aput_obj_with_bound_check; - qpoints->pAputObject = art_quick_aput_obj; - qpoints->pHandleFillArrayData = art_quick_handle_fill_data; - - // JNI - qpoints->pJniMethodStart = JniMethodStart; - qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized; - qpoints->pJniMethodEnd = JniMethodEnd; - qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized; - qpoints->pJniMethodEndWithReference = JniMethodEndWithReference; - qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; - qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline; - - // Locks - qpoints->pLockObject = art_quick_lock_object; - qpoints->pUnlockObject = art_quick_unlock_object; - // Math qpoints->pCmpgDouble = CmpgDouble; qpoints->pCmpgFloat = CmpgFloat; @@ -144,35 +90,6 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { qpoints->pStringCompareTo = art_quick_string_compareto; qpoints->pMemcpy = memcpy; - // Invocation - qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline; - qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline; - qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge; - qpoints->pInvokeDirectTrampolineWithAccessCheck = - art_quick_invoke_direct_trampoline_with_access_check; - qpoints->pInvokeInterfaceTrampolineWithAccessCheck = - art_quick_invoke_interface_trampoline_with_access_check; - qpoints->pInvokeStaticTrampolineWithAccessCheck = - art_quick_invoke_static_trampoline_with_access_check; - qpoints->pInvokeSuperTrampolineWithAccessCheck = - art_quick_invoke_super_trampoline_with_access_check; - qpoints->pInvokeVirtualTrampolineWithAccessCheck = - art_quick_invoke_virtual_trampoline_with_access_check; - - // Thread - qpoints->pTestSuspend = art_quick_test_suspend; - - // Throws - qpoints->pDeliverException = art_quick_deliver_exception; - qpoints->pThrowArrayBounds = art_quick_throw_array_bounds; - qpoints->pThrowDivZero = art_quick_throw_div_zero; - qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method; - qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception; - qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow; - - // Deoptimization from compiled code. - qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code; - // TODO - use lld/scd instructions for Mips64 // Atomic 64-bit load/store qpoints->pA64Load = QuasiAtomic::Read64; diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S index 5d0c94c63..8f1a35a69 100644 --- a/runtime/arch/mips64/quick_entrypoints_mips64.S +++ b/runtime/arch/mips64/quick_entrypoints_mips64.S @@ -971,6 +971,15 @@ ENTRY art_quick_lock_object RETURN_IF_ZERO END art_quick_lock_object +ENTRY art_quick_lock_object_no_inline + beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set + nop + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case we block + jal artLockObjectFromCode # (Object* obj, Thread*) + move $a1, rSELF # pass Thread::Current + RETURN_IF_ZERO +END art_quick_lock_object_no_inline + /* * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure. */ @@ -984,6 +993,15 @@ ENTRY art_quick_unlock_object RETURN_IF_ZERO END art_quick_unlock_object +ENTRY art_quick_unlock_object_no_inline + beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set + nop + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC + jal artUnlockObjectFromCode # (Object* obj, Thread*) + move $a1, rSELF # pass Thread::Current + RETURN_IF_ZERO +END art_quick_unlock_object_no_inline + /* * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure. */ diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc index 75d9073cf..02629e819 100644 --- a/runtime/arch/stub_test.cc +++ b/runtime/arch/stub_test.cc @@ -1205,7 +1205,8 @@ TEST_F(StubTest, AllocObjectArray) { TEST_F(StubTest, StringCompareTo) { -#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \ + // There is no StringCompareTo runtime entrypoint for __aarch64__. +#if defined(__i386__) || defined(__arm__) || \ defined(__mips__) || (defined(__x86_64__) && !defined(__APPLE__)) // TODO: Check the "Unresolved" allocation stubs @@ -2010,14 +2011,14 @@ TEST_F(StubTest, DISABLED_IMT) { // that will create it: the runtime stub expects to be called by compiled code. LinearAlloc* linear_alloc = Runtime::Current()->GetLinearAlloc(); ArtMethod* conflict_method = Runtime::Current()->CreateImtConflictMethod(linear_alloc); - static ImtConflictTable::Entry empty_entry = { nullptr, nullptr }; - ImtConflictTable* empty_conflict_table = reinterpret_cast(&empty_entry); + ImtConflictTable* empty_conflict_table = + Runtime::Current()->GetClassLinker()->CreateImtConflictTable(/*count*/0u, linear_alloc); void* data = linear_alloc->Alloc( self, - ImtConflictTable::ComputeSizeWithOneMoreEntry(empty_conflict_table)); + ImtConflictTable::ComputeSizeWithOneMoreEntry(empty_conflict_table, sizeof(void*))); ImtConflictTable* new_table = new (data) ImtConflictTable( - empty_conflict_table, inf_contains, contains_amethod); - conflict_method->SetImtConflictTable(new_table); + empty_conflict_table, inf_contains, contains_amethod, sizeof(void*)); + conflict_method->SetImtConflictTable(new_table, sizeof(void*)); size_t result = Invoke3WithReferrerAndHidden(reinterpret_cast(conflict_method), diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc index e593f39fd..15a857146 100644 --- a/runtime/arch/x86/entrypoints_init_x86.cc +++ b/runtime/arch/x86/entrypoints_init_x86.cc @@ -17,6 +17,7 @@ #include "entrypoints/jni/jni_entrypoints.h" #include "entrypoints/quick/quick_alloc_entrypoints.h" #include "entrypoints/quick/quick_default_externs.h" +#include "entrypoints/quick/quick_default_init_entrypoints.h" #include "entrypoints/quick/quick_entrypoints.h" #include "entrypoints/runtime_asm_entrypoints.h" #include "interpreter/interpreter.h" @@ -33,67 +34,12 @@ extern "C" mirror::Object* art_quick_read_barrier_slow(mirror::Object*, mirror:: extern "C" mirror::Object* art_quick_read_barrier_for_root_slow(GcRoot*); void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { - // JNI - jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub; - - // Alloc - ResetQuickAllocEntryPoints(qpoints); + DefaultInitEntryPoints(jpoints, qpoints); // Cast qpoints->pInstanceofNonTrivial = art_quick_is_assignable; qpoints->pCheckCast = art_quick_check_cast; - // DexCache - qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage; - qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access; - qpoints->pInitializeType = art_quick_initialize_type; - qpoints->pResolveString = art_quick_resolve_string; - - // Field - qpoints->pSet8Instance = art_quick_set8_instance; - qpoints->pSet8Static = art_quick_set8_static; - qpoints->pSet16Instance = art_quick_set16_instance; - qpoints->pSet16Static = art_quick_set16_static; - qpoints->pSet32Instance = art_quick_set32_instance; - qpoints->pSet32Static = art_quick_set32_static; - qpoints->pSet64Instance = art_quick_set64_instance; - qpoints->pSet64Static = art_quick_set64_static; - qpoints->pSetObjInstance = art_quick_set_obj_instance; - qpoints->pSetObjStatic = art_quick_set_obj_static; - qpoints->pGetByteInstance = art_quick_get_byte_instance; - qpoints->pGetBooleanInstance = art_quick_get_boolean_instance; - qpoints->pGetShortInstance = art_quick_get_short_instance; - qpoints->pGetCharInstance = art_quick_get_char_instance; - qpoints->pGet32Instance = art_quick_get32_instance; - qpoints->pGet64Instance = art_quick_get64_instance; - qpoints->pGetObjInstance = art_quick_get_obj_instance; - qpoints->pGetByteStatic = art_quick_get_byte_static; - qpoints->pGetBooleanStatic = art_quick_get_boolean_static; - qpoints->pGetShortStatic = art_quick_get_short_static; - qpoints->pGetCharStatic = art_quick_get_char_static; - qpoints->pGet32Static = art_quick_get32_static; - qpoints->pGet64Static = art_quick_get64_static; - qpoints->pGetObjStatic = art_quick_get_obj_static; - - // Array - qpoints->pAputObjectWithNullAndBoundCheck = art_quick_aput_obj_with_null_and_bound_check; - qpoints->pAputObjectWithBoundCheck = art_quick_aput_obj_with_bound_check; - qpoints->pAputObject = art_quick_aput_obj; - qpoints->pHandleFillArrayData = art_quick_handle_fill_data; - - // JNI - qpoints->pJniMethodStart = JniMethodStart; - qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized; - qpoints->pJniMethodEnd = JniMethodEnd; - qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized; - qpoints->pJniMethodEndWithReference = JniMethodEndWithReference; - qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; - qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline; - - // Locks - qpoints->pLockObject = art_quick_lock_object; - qpoints->pUnlockObject = art_quick_unlock_object; - // More math. qpoints->pCos = cos; qpoints->pSin = sin; @@ -128,35 +74,6 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { qpoints->pStringCompareTo = art_quick_string_compareto; qpoints->pMemcpy = art_quick_memcpy; - // Invocation - qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline; - qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline; - qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge; - qpoints->pInvokeDirectTrampolineWithAccessCheck = - art_quick_invoke_direct_trampoline_with_access_check; - qpoints->pInvokeInterfaceTrampolineWithAccessCheck = - art_quick_invoke_interface_trampoline_with_access_check; - qpoints->pInvokeStaticTrampolineWithAccessCheck = - art_quick_invoke_static_trampoline_with_access_check; - qpoints->pInvokeSuperTrampolineWithAccessCheck = - art_quick_invoke_super_trampoline_with_access_check; - qpoints->pInvokeVirtualTrampolineWithAccessCheck = - art_quick_invoke_virtual_trampoline_with_access_check; - - // Thread - qpoints->pTestSuspend = art_quick_test_suspend; - - // Throws - qpoints->pDeliverException = art_quick_deliver_exception; - qpoints->pThrowArrayBounds = art_quick_throw_array_bounds; - qpoints->pThrowDivZero = art_quick_throw_div_zero; - qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method; - qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception; - qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow; - - // Deoptimize - qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code; - // Read barrier. qpoints->pReadBarrierJni = ReadBarrierJni; qpoints->pReadBarrierMark = art_quick_read_barrier_mark; diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S index 4f9b3f787..485da9fe3 100644 --- a/runtime/arch/x86/quick_entrypoints_x86.S +++ b/runtime/arch/x86/quick_entrypoints_x86.S @@ -1075,6 +1075,22 @@ DEFINE_FUNCTION art_quick_lock_object RETURN_IF_EAX_ZERO END_FUNCTION art_quick_lock_object +DEFINE_FUNCTION art_quick_lock_object_no_inline + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC + // Outgoing argument set up + subl LITERAL(8), %esp // alignment padding + CFI_ADJUST_CFA_OFFSET(8) + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + CFI_ADJUST_CFA_OFFSET(4) + PUSH eax // pass object + call SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*) + addl LITERAL(16), %esp // pop arguments + CFI_ADJUST_CFA_OFFSET(-16) + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + RETURN_IF_EAX_ZERO +END_FUNCTION art_quick_lock_object_no_inline + + DEFINE_FUNCTION art_quick_unlock_object testl %eax, %eax // null check object/eax jz .Lslow_unlock @@ -1130,6 +1146,21 @@ DEFINE_FUNCTION art_quick_unlock_object RETURN_IF_EAX_ZERO END_FUNCTION art_quick_unlock_object +DEFINE_FUNCTION art_quick_unlock_object_no_inline + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC + // Outgoing argument set up + subl LITERAL(8), %esp // alignment padding + CFI_ADJUST_CFA_OFFSET(8) + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + CFI_ADJUST_CFA_OFFSET(4) + PUSH eax // pass object + call SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*) + addl LITERAL(16), %esp // pop arguments + CFI_ADJUST_CFA_OFFSET(-16) + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + RETURN_IF_EAX_ZERO +END_FUNCTION art_quick_unlock_object_no_inline + DEFINE_FUNCTION art_quick_is_assignable PUSH eax // alignment padding PUSH ecx // pass arg2 - obj->klass diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc index 0a5d14a16..bd6df700d 100644 --- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc +++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc @@ -17,6 +17,9 @@ #include "entrypoints/jni/jni_entrypoints.h" #include "entrypoints/quick/quick_alloc_entrypoints.h" #include "entrypoints/quick/quick_default_externs.h" +#if !defined(__APPLE__) +#include "entrypoints/quick/quick_default_init_entrypoints.h" +#endif #include "entrypoints/quick/quick_entrypoints.h" #include "entrypoints/math_entrypoints.h" #include "entrypoints/runtime_asm_entrypoints.h" @@ -38,67 +41,12 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { UNUSED(jpoints, qpoints); UNIMPLEMENTED(FATAL); #else - // JNI - jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub; - - // Alloc - ResetQuickAllocEntryPoints(qpoints); + DefaultInitEntryPoints(jpoints, qpoints); // Cast qpoints->pInstanceofNonTrivial = art_quick_assignable_from_code; qpoints->pCheckCast = art_quick_check_cast; - // DexCache - qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage; - qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access; - qpoints->pInitializeType = art_quick_initialize_type; - qpoints->pResolveString = art_quick_resolve_string; - - // Field - qpoints->pSet8Instance = art_quick_set8_instance; - qpoints->pSet8Static = art_quick_set8_static; - qpoints->pSet16Instance = art_quick_set16_instance; - qpoints->pSet16Static = art_quick_set16_static; - qpoints->pSet32Instance = art_quick_set32_instance; - qpoints->pSet32Static = art_quick_set32_static; - qpoints->pSet64Instance = art_quick_set64_instance; - qpoints->pSet64Static = art_quick_set64_static; - qpoints->pSetObjInstance = art_quick_set_obj_instance; - qpoints->pSetObjStatic = art_quick_set_obj_static; - qpoints->pGetByteInstance = art_quick_get_byte_instance; - qpoints->pGetBooleanInstance = art_quick_get_boolean_instance; - qpoints->pGetShortInstance = art_quick_get_short_instance; - qpoints->pGetCharInstance = art_quick_get_char_instance; - qpoints->pGet32Instance = art_quick_get32_instance; - qpoints->pGet64Instance = art_quick_get64_instance; - qpoints->pGetObjInstance = art_quick_get_obj_instance; - qpoints->pGetByteStatic = art_quick_get_byte_static; - qpoints->pGetBooleanStatic = art_quick_get_boolean_static; - qpoints->pGetShortStatic = art_quick_get_short_static; - qpoints->pGetCharStatic = art_quick_get_char_static; - qpoints->pGet32Static = art_quick_get32_static; - qpoints->pGet64Static = art_quick_get64_static; - qpoints->pGetObjStatic = art_quick_get_obj_static; - - // Array - qpoints->pAputObjectWithNullAndBoundCheck = art_quick_aput_obj_with_null_and_bound_check; - qpoints->pAputObjectWithBoundCheck = art_quick_aput_obj_with_bound_check; - qpoints->pAputObject = art_quick_aput_obj; - qpoints->pHandleFillArrayData = art_quick_handle_fill_data; - - // JNI - qpoints->pJniMethodStart = JniMethodStart; - qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized; - qpoints->pJniMethodEnd = JniMethodEnd; - qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized; - qpoints->pJniMethodEndWithReference = JniMethodEndWithReference; - qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; - qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline; - - // Locks - qpoints->pLockObject = art_quick_lock_object; - qpoints->pUnlockObject = art_quick_unlock_object; - // More math. qpoints->pCos = cos; qpoints->pSin = sin; @@ -132,35 +80,6 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { qpoints->pStringCompareTo = art_quick_string_compareto; qpoints->pMemcpy = art_quick_memcpy; - // Invocation - qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline; - qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline; - qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge; - qpoints->pInvokeDirectTrampolineWithAccessCheck = - art_quick_invoke_direct_trampoline_with_access_check; - qpoints->pInvokeInterfaceTrampolineWithAccessCheck = - art_quick_invoke_interface_trampoline_with_access_check; - qpoints->pInvokeStaticTrampolineWithAccessCheck = - art_quick_invoke_static_trampoline_with_access_check; - qpoints->pInvokeSuperTrampolineWithAccessCheck = - art_quick_invoke_super_trampoline_with_access_check; - qpoints->pInvokeVirtualTrampolineWithAccessCheck = - art_quick_invoke_virtual_trampoline_with_access_check; - - // Thread - qpoints->pTestSuspend = art_quick_test_suspend; - - // Throws - qpoints->pDeliverException = art_quick_deliver_exception; - qpoints->pThrowArrayBounds = art_quick_throw_array_bounds; - qpoints->pThrowDivZero = art_quick_throw_div_zero; - qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method; - qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception; - qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow; - - // Deoptimize - qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code; - // Read barrier. qpoints->pReadBarrierJni = ReadBarrierJni; qpoints->pReadBarrierMark = art_quick_read_barrier_mark; diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S index 26e668e7a..8064ed696 100644 --- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S +++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S @@ -894,57 +894,107 @@ DEFINE_FUNCTION art_quick_alloc_object_rosalloc RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception END_FUNCTION art_quick_alloc_object_rosalloc -// A handle-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB). -DEFINE_FUNCTION art_quick_alloc_object_tlab - // Fast path tlab allocation. - // RDI: uint32_t type_idx, RSI: ArtMethod* - // RDX, RCX, R8, R9: free. RAX: return val. - // TODO: Add read barrier when this function is used. - // Note this function can/should implement read barrier fast path only - // (no read barrier slow path) because this is the fast path of tlab allocation. - // We can fall back to the allocation slow path to do the read barrier slow path. -#if defined(USE_READ_BARRIER) - int3 - int3 -#endif - // Might need a special macro since rsi and edx is 32b/64b mismatched. - movq ART_METHOD_DEX_CACHE_TYPES_OFFSET_64(%rsi), %rdx // Load dex cache resolved types array - // TODO: Add read barrier when this function is used. - // Might need to break down into multiple instructions to get the base address in a register. - // Load the class - movl 0(%rdx, %rdi, COMPRESSED_REFERENCE_SIZE), %edx +// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab. +// +// RDI: type_idx, RSI: ArtMethod*, RDX/EDX: the class, RAX: return value. +// RCX: scratch, r8: Thread::Current(). +MACRO1(ALLOC_OBJECT_TLAB_FAST_PATH, slowPathLabel) testl %edx, %edx // Check null class - jz .Lart_quick_alloc_object_tlab_slow_path + jz RAW_VAR(slowPathLabel) // Check class status. cmpl LITERAL(MIRROR_CLASS_STATUS_INITIALIZED), MIRROR_CLASS_STATUS_OFFSET(%rdx) - jne .Lart_quick_alloc_object_tlab_slow_path - // Check access flags has kAccClassIsFinalizable + jne RAW_VAR(slowPathLabel) + // No fake dependence needed on x86 + // between status and flags load, + // since each load is a load-acquire, + // no loads reordering. + // Check access flags has + // kAccClassIsFinalizable testl LITERAL(ACCESS_FLAGS_CLASS_IS_FINALIZABLE), MIRROR_CLASS_ACCESS_FLAGS_OFFSET(%rdx) - jnz .Lart_quick_alloc_object_tlab_slow_path + jnz RAW_VAR(slowPathLabel) + movq %gs:THREAD_SELF_OFFSET, %r8 // r8 = thread + movq THREAD_LOCAL_END_OFFSET(%r8), %rax // Load thread_local_end. + subq THREAD_LOCAL_POS_OFFSET(%r8), %rax // Compute the remaining buffer size. movl MIRROR_CLASS_OBJECT_SIZE_OFFSET(%rdx), %ecx // Load the object size. + cmpq %rax, %rcx // Check if it fits. OK to do this + // before rounding up the object size + // assuming the buf size alignment. + ja RAW_VAR(slowPathLabel) addl LITERAL(OBJECT_ALIGNMENT_MASK), %ecx // Align the size by 8. (addr + 7) & ~7. andl LITERAL(OBJECT_ALIGNMENT_MASK_TOGGLED), %ecx - movq %gs:THREAD_SELF_OFFSET, %r8 // r8 = thread - movq THREAD_LOCAL_POS_OFFSET(%r8), %rax // Load thread_local_pos. + movq THREAD_LOCAL_POS_OFFSET(%r8), %rax // Load thread_local_pos + // as allocated object. addq %rax, %rcx // Add the object size. - cmpq THREAD_LOCAL_END_OFFSET(%r8), %rcx // Check if it fits. - ja .Lart_quick_alloc_object_tlab_slow_path movq %rcx, THREAD_LOCAL_POS_OFFSET(%r8) // Update thread_local_pos. - addq LITERAL(1), THREAD_LOCAL_OBJECTS_OFFSET(%r8) // Increment thread_local_objects. + addq LITERAL(1), THREAD_LOCAL_OBJECTS_OFFSET(%r8) // Increase thread_local_objects. // Store the class pointer in the header. // No fence needed for x86. + POISON_HEAP_REF edx movl %edx, MIRROR_OBJECT_CLASS_OFFSET(%rax) ret // Fast path succeeded. -.Lart_quick_alloc_object_tlab_slow_path: +END_MACRO + +// The common slow path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab. +MACRO1(ALLOC_OBJECT_TLAB_SLOW_PATH, cxx_name) SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC // Outgoing argument set up movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current() - call SYMBOL(artAllocObjectFromCodeTLAB) // cxx_name(arg0, arg1, Thread*) + call VAR(cxx_name) // cxx_name(arg0, arg1, Thread*) RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception +END_MACRO + +// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB). +DEFINE_FUNCTION art_quick_alloc_object_tlab + // Fast path tlab allocation. + // RDI: uint32_t type_idx, RSI: ArtMethod* + // RDX, RCX, R8, R9: free. RAX: return val. +#if defined(USE_READ_BARRIER) + int3 + int3 +#endif + // Might need a special macro since rsi and edx is 32b/64b mismatched. + movq ART_METHOD_DEX_CACHE_TYPES_OFFSET_64(%rsi), %rdx // Load dex cache resolved types array + // Might need to break down into multiple instructions to get the base address in a register. + // Load the class + movl 0(%rdx, %rdi, COMPRESSED_REFERENCE_SIZE), %edx + ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path +.Lart_quick_alloc_object_tlab_slow_path: + ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeTLAB END_FUNCTION art_quick_alloc_object_tlab -GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB) +// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB). +DEFINE_FUNCTION art_quick_alloc_object_region_tlab + // Fast path region tlab allocation. + // RDI: uint32_t type_idx, RSI: ArtMethod* + // RDX, RCX, R8, R9: free. RAX: return val. +#if !defined(USE_READ_BARRIER) + int3 + int3 +#endif + // Might need a special macro since rsi and edx is 32b/64b mismatched. + movq ART_METHOD_DEX_CACHE_TYPES_OFFSET_64(%rsi), %rdx // Load dex cache resolved types array + // Might need to break down into multiple instructions to get the base address in a register. + // Load the class + movl 0(%rdx, %rdi, COMPRESSED_REFERENCE_SIZE), %edx + cmpl LITERAL(0), %gs:THREAD_IS_GC_MARKING_OFFSET + jne .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path +.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit: + ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path +.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path: + // The read barrier slow path. Mark the class. + PUSH rdi + PUSH rsi + // Outgoing argument set up + movq %rdx, %rdi // Pass the class as the first param. + call SYMBOL(artReadBarrierMark) // cxx_name(mirror::Object* obj) + movq %rax, %rdx + POP rsi + POP rdi + jmp .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit +.Lart_quick_alloc_object_region_tlab_slow_path: + ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeRegionTLAB +END_FUNCTION art_quick_alloc_object_region_tlab ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER @@ -994,6 +1044,14 @@ DEFINE_FUNCTION art_quick_lock_object RETURN_IF_EAX_ZERO END_FUNCTION art_quick_lock_object +DEFINE_FUNCTION art_quick_lock_object_no_inline + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME + movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current() + call SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*) + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + RETURN_IF_EAX_ZERO +END_FUNCTION art_quick_lock_object_no_inline + DEFINE_FUNCTION art_quick_unlock_object testl %edi, %edi // null check object/edi jz .Lslow_unlock @@ -1037,6 +1095,14 @@ DEFINE_FUNCTION art_quick_unlock_object RETURN_IF_EAX_ZERO END_FUNCTION art_quick_unlock_object +DEFINE_FUNCTION art_quick_unlock_object_no_inline + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME + movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current() + call SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*) + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + RETURN_IF_EAX_ZERO +END_FUNCTION art_quick_unlock_object_no_inline + DEFINE_FUNCTION art_quick_check_cast PUSH rdi // Save args for exc PUSH rsi diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h index 6449efad7..7647ad6e5 100644 --- a/runtime/art_method-inl.h +++ b/runtime/art_method-inl.h @@ -456,13 +456,18 @@ void ArtMethod::VisitRoots(RootVisitorType& visitor, size_t pointer_size) { interface_method->VisitRoots(visitor, pointer_size); } visitor.VisitRoot(declaring_class_.AddressWithoutBarrier()); - // Runtime methods and native methods use the same field as the profiling info for - // storing their own data (jni entrypoint for native methods, and ImtConflictTable for - // some runtime methods). - if (!IsNative() && !IsRuntimeMethod()) { - ProfilingInfo* profiling_info = GetProfilingInfo(pointer_size); - if (profiling_info != nullptr) { - profiling_info->VisitRoots(visitor); + // We know we don't have profiling information if the class hasn't been verified. Note + // that this check also ensures the IsNative call can be made, as IsNative expects a fully + // created class (and not a retired one). + if (klass->IsVerified()) { + // Runtime methods and native methods use the same field as the profiling info for + // storing their own data (jni entrypoint for native methods, and ImtConflictTable for + // some runtime methods). + if (!IsNative() && !IsRuntimeMethod()) { + ProfilingInfo* profiling_info = GetProfilingInfo(pointer_size); + if (profiling_info != nullptr) { + profiling_info->VisitRoots(visitor); + } } } } diff --git a/runtime/art_method.cc b/runtime/art_method.cc index 34d19d151..1790df6be 100644 --- a/runtime/art_method.cc +++ b/runtime/art_method.cc @@ -253,14 +253,17 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* Runtime* runtime = Runtime::Current(); // Call the invoke stub, passing everything as arguments. // If the runtime is not yet started or it is required by the debugger, then perform the - // Invocation by the interpreter. + // Invocation by the interpreter, explicitly forcing interpretation over JIT to prevent + // cycling around the various JIT/Interpreter methods that handle method invocation. if (UNLIKELY(!runtime->IsStarted() || Dbg::IsForcedInterpreterNeededForCalling(self, this))) { if (IsStatic()) { - art::interpreter::EnterInterpreterFromInvoke(self, this, nullptr, args, result); + art::interpreter::EnterInterpreterFromInvoke( + self, this, nullptr, args, result, /*stay_in_interpreter*/ true); } else { mirror::Object* receiver = reinterpret_cast*>(&args[0])->AsMirrorPtr(); - art::interpreter::EnterInterpreterFromInvoke(self, this, receiver, args + 1, result); + art::interpreter::EnterInterpreterFromInvoke( + self, this, receiver, args + 1, result, /*stay_in_interpreter*/ true); } } else { DCHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), sizeof(void*)); @@ -276,7 +279,7 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* // Ensure that we won't be accidentally calling quick compiled code when -Xint. if (kIsDebugBuild && runtime->GetInstrumentation()->IsForcedInterpretOnly()) { - CHECK(!runtime->UseJit()); + CHECK(!runtime->UseJitCompilation()); const void* oat_quick_code = runtime->GetClassLinker()->GetOatMethodQuickCodeFor(this); CHECK(oat_quick_code == nullptr || oat_quick_code != GetEntryPointFromQuickCompiledCode()) << "Don't call compiled code when -Xint " << PrettyMethod(this); @@ -481,7 +484,7 @@ void ArtMethod::CopyFrom(ArtMethod* src, size_t image_pointer_size) { // to the JIT code, but this would require taking the JIT code cache lock to notify // it, which we do not want at this level. Runtime* runtime = Runtime::Current(); - if (runtime->GetJit() != nullptr) { + if (runtime->UseJitCompilation()) { if (runtime->GetJit()->GetCodeCache()->ContainsPc(GetEntryPointFromQuickCompiledCode())) { SetEntryPointFromQuickCompiledCodePtrSize(GetQuickToInterpreterBridge(), image_pointer_size); } diff --git a/runtime/art_method.h b/runtime/art_method.h index 08f02852e..a012a5a9c 100644 --- a/runtime/art_method.h +++ b/runtime/art_method.h @@ -41,6 +41,7 @@ class ShadowFrame; namespace mirror { class Array; class Class; +class IfTable; class PointerArray; } // namespace mirror @@ -50,66 +51,151 @@ class PointerArray; // with the last entry being null to make an assembly implementation of a lookup // faster. class ImtConflictTable { + enum MethodIndex { + kMethodInterface, + kMethodImplementation, + kMethodCount, // Number of elements in enum. + }; + public: // Build a new table copying `other` and adding the new entry formed of // the pair { `interface_method`, `implementation_method` } ImtConflictTable(ImtConflictTable* other, ArtMethod* interface_method, - ArtMethod* implementation_method) { - size_t index = 0; - while (other->entries_[index].interface_method != nullptr) { - entries_[index] = other->entries_[index]; - index++; + ArtMethod* implementation_method, + size_t pointer_size) { + const size_t count = other->NumEntries(pointer_size); + for (size_t i = 0; i < count; ++i) { + SetInterfaceMethod(i, pointer_size, other->GetInterfaceMethod(i, pointer_size)); + SetImplementationMethod(i, pointer_size, other->GetImplementationMethod(i, pointer_size)); } - entries_[index].interface_method = interface_method; - entries_[index].implementation_method = implementation_method; + SetInterfaceMethod(count, pointer_size, interface_method); + SetImplementationMethod(count, pointer_size, implementation_method); // Add the null marker. - entries_[index + 1].interface_method = nullptr; - entries_[index + 1].implementation_method = nullptr; + SetInterfaceMethod(count + 1, pointer_size, nullptr); + SetImplementationMethod(count + 1, pointer_size, nullptr); + } + + // num_entries excludes the header. + ImtConflictTable(size_t num_entries, size_t pointer_size) { + SetInterfaceMethod(num_entries, pointer_size, nullptr); + SetImplementationMethod(num_entries, pointer_size, nullptr); + } + + // Set an entry at an index. + void SetInterfaceMethod(size_t index, size_t pointer_size, ArtMethod* method) { + SetMethod(index * kMethodCount + kMethodInterface, pointer_size, method); + } + + void SetImplementationMethod(size_t index, size_t pointer_size, ArtMethod* method) { + SetMethod(index * kMethodCount + kMethodImplementation, pointer_size, method); + } + + ArtMethod* GetInterfaceMethod(size_t index, size_t pointer_size) const { + return GetMethod(index * kMethodCount + kMethodInterface, pointer_size); + } + + ArtMethod* GetImplementationMethod(size_t index, size_t pointer_size) const { + return GetMethod(index * kMethodCount + kMethodImplementation, pointer_size); + } + + // Visit all of the entries. + // NO_THREAD_SAFETY_ANALYSIS for calling with held locks. Visitor is passed a pair of ArtMethod* + // and also returns one. The order is . + template + void Visit(const Visitor& visitor, size_t pointer_size) NO_THREAD_SAFETY_ANALYSIS { + uint32_t table_index = 0; + for (;;) { + ArtMethod* interface_method = GetInterfaceMethod(table_index, pointer_size); + if (interface_method == nullptr) { + break; + } + ArtMethod* implementation_method = GetImplementationMethod(table_index, pointer_size); + auto input = std::make_pair(interface_method, implementation_method); + std::pair updated = visitor(input); + if (input.first != updated.first) { + SetInterfaceMethod(table_index, pointer_size, updated.first); + } + if (input.second != updated.second) { + SetImplementationMethod(table_index, pointer_size, updated.second); + } + ++table_index; + } } // Lookup the implementation ArtMethod associated to `interface_method`. Return null // if not found. - ArtMethod* Lookup(ArtMethod* interface_method) const { + ArtMethod* Lookup(ArtMethod* interface_method, size_t pointer_size) const { uint32_t table_index = 0; - ArtMethod* current_interface_method; - while ((current_interface_method = entries_[table_index].interface_method) != nullptr) { + for (;;) { + ArtMethod* current_interface_method = GetInterfaceMethod(table_index, pointer_size); + if (current_interface_method == nullptr) { + break; + } if (current_interface_method == interface_method) { - return entries_[table_index].implementation_method; + return GetImplementationMethod(table_index, pointer_size); } - table_index++; + ++table_index; } return nullptr; } - // Compute the size in bytes taken by this table. - size_t ComputeSize() const { + // Compute the number of entries in this table. + size_t NumEntries(size_t pointer_size) const { uint32_t table_index = 0; - size_t total_size = 0; - while ((entries_[table_index].interface_method) != nullptr) { - total_size += sizeof(Entry); - table_index++; + while (GetInterfaceMethod(table_index, pointer_size) != nullptr) { + ++table_index; } + return table_index; + } + + // Compute the size in bytes taken by this table. + size_t ComputeSize(size_t pointer_size) const { // Add the end marker. - return total_size + sizeof(Entry); + return ComputeSize(NumEntries(pointer_size), pointer_size); } // Compute the size in bytes needed for copying the given `table` and add // one more entry. - static size_t ComputeSizeWithOneMoreEntry(ImtConflictTable* table) { - return table->ComputeSize() + sizeof(Entry); + static size_t ComputeSizeWithOneMoreEntry(ImtConflictTable* table, size_t pointer_size) { + return table->ComputeSize(pointer_size) + EntrySize(pointer_size); } - struct Entry { - ArtMethod* interface_method; - ArtMethod* implementation_method; - }; + // Compute size with a fixed number of entries. + static size_t ComputeSize(size_t num_entries, size_t pointer_size) { + return (num_entries + 1) * EntrySize(pointer_size); // Add one for null terminator. + } + + static size_t EntrySize(size_t pointer_size) { + return pointer_size * static_cast(kMethodCount); + } private: + ArtMethod* GetMethod(size_t index, size_t pointer_size) const { + if (pointer_size == 8) { + return reinterpret_cast(static_cast(data64_[index])); + } else { + DCHECK_EQ(pointer_size, 4u); + return reinterpret_cast(static_cast(data32_[index])); + } + } + + void SetMethod(size_t index, size_t pointer_size, ArtMethod* method) { + if (pointer_size == 8) { + data64_[index] = dchecked_integral_cast(reinterpret_cast(method)); + } else { + DCHECK_EQ(pointer_size, 4u); + data32_[index] = dchecked_integral_cast(reinterpret_cast(method)); + } + } + // Array of entries that the assembly stubs will iterate over. Note that this is // not fixed size, and we allocate data prior to calling the constructor // of ImtConflictTable. - Entry entries_[0]; + union { + uint32_t data32_[0]; + uint64_t data64_[0]; + }; DISALLOW_COPY_AND_ASSIGN(ImtConflictTable); }; @@ -265,6 +351,12 @@ class ArtMethod FINAL { SetAccessFlags(GetAccessFlags() | kAccSkipAccessChecks); } + // Should this method be run in the interpreter and count locks (e.g., failed structured- + // locking verification)? + bool MustCountLocks() { + return (GetAccessFlags() & kAccMustCountLocks) != 0; + } + // Returns true if this method could be overridden by a default method. bool IsOverridableByDefaultMethod() SHARED_REQUIRES(Locks::mutator_lock_); @@ -351,7 +443,6 @@ class ArtMethod FINAL { // Find the method that this method overrides. ArtMethod* FindOverriddenMethod(size_t pointer_size) - REQUIRES(Roles::uninterruptible_) SHARED_REQUIRES(Locks::mutator_lock_); // Find the method index for this method within other_dexfile. If this method isn't present then @@ -417,8 +508,8 @@ class ArtMethod FINAL { return reinterpret_cast(GetEntryPointFromJniPtrSize(pointer_size)); } - ALWAYS_INLINE void SetImtConflictTable(ImtConflictTable* table) { - SetEntryPointFromJniPtrSize(table, sizeof(void*)); + ALWAYS_INLINE void SetImtConflictTable(ImtConflictTable* table, size_t pointer_size) { + SetEntryPointFromJniPtrSize(table, pointer_size); } ALWAYS_INLINE void SetProfilingInfo(ProfilingInfo* info) { diff --git a/runtime/base/histogram.h b/runtime/base/histogram.h index bcb7b3b76..0e3bc8e1b 100644 --- a/runtime/base/histogram.h +++ b/runtime/base/histogram.h @@ -85,6 +85,10 @@ template class Histogram { return max_value_added_; } + Value BucketWidth() const { + return bucket_width_; + } + const std::string& Name() const { return name_; } diff --git a/runtime/base/logging.h b/runtime/base/logging.h index 97280c3a0..3b5b8b54a 100644 --- a/runtime/base/logging.h +++ b/runtime/base/logging.h @@ -56,6 +56,7 @@ struct LogVerbosity { bool threads; bool verifier; bool image; + bool systrace_lock_logging; // Enabled with "-verbose:sys-locks". }; // Global log verbosity setting, initialized by InitLogging. diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc index beabce36f..639f913e8 100644 --- a/runtime/check_jni.cc +++ b/runtime/check_jni.cc @@ -1176,14 +1176,16 @@ class ScopedCheck { return false; } - // Get the *correct* JNIEnv by going through our TLS pointer. + // Get the current thread's JNIEnv by going through our TLS pointer. JNIEnvExt* threadEnv = self->GetJniEnv(); // Verify that the current thread is (a) attached and (b) associated with // this particular instance of JNIEnv. if (env != threadEnv) { + // Get the thread owning the JNIEnv that's being used. + Thread* envThread = reinterpret_cast(env)->self; AbortF("thread %s using JNIEnv* from thread %s", - ToStr(*self).c_str(), ToStr(*self).c_str()); + ToStr(*self).c_str(), ToStr(*envThread).c_str()); return false; } diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index fa0107af8..8fcb6b25f 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -687,6 +687,9 @@ bool ClassLinker::InitWithoutImage(std::vector> b self->AssertNoPendingException(); } + // Create conflict tables that depend on the class linker. + runtime->FixupConflictTables(); + FinishInit(self); VLOG(startup) << "ClassLinker::InitFromCompiler exiting"; @@ -773,9 +776,13 @@ static void SanityCheckArtMethod(ArtMethod* m, bool contains = false; for (gc::space::ImageSpace* space : spaces) { auto& header = space->GetImageHeader(); - auto& methods = header.GetMethodsSection(); - auto offset = reinterpret_cast(m) - space->Begin(); - contains |= methods.Contains(offset); + size_t offset = reinterpret_cast(m) - space->Begin(); + + const ImageSection& methods = header.GetMethodsSection(); + contains = contains || methods.Contains(offset); + + const ImageSection& runtime_methods = header.GetRuntimeMethodsSection(); + contains = contains || runtime_methods.Contains(offset); } CHECK(contains) << m << " not found"; } @@ -1056,9 +1063,8 @@ bool ClassLinker::InitFromBootImage(std::string* error_msg) { return true; } -static bool IsBootClassLoader(ScopedObjectAccessAlreadyRunnable& soa, - mirror::ClassLoader* class_loader) - SHARED_REQUIRES(Locks::mutator_lock_) { +bool ClassLinker::IsBootClassLoader(ScopedObjectAccessAlreadyRunnable& soa, + mirror::ClassLoader* class_loader) { return class_loader == nullptr || class_loader->GetClass() == soa.Decode(WellKnownClasses::java_lang_BootClassLoader); @@ -1099,7 +1105,7 @@ static bool FlattenPathClassLoader(mirror::ClassLoader* class_loader, soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements); CHECK(dex_path_list_field != nullptr); CHECK(dex_elements_field != nullptr); - while (!IsBootClassLoader(soa, class_loader)) { + while (!ClassLinker::IsBootClassLoader(soa, class_loader)) { if (class_loader->GetClass() != soa.Decode(WellKnownClasses::dalvik_system_PathClassLoader)) { *error_msg = StringPrintf("Unknown class loader type %s", PrettyTypeOf(class_loader).c_str()); @@ -1438,20 +1444,14 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches( if (*out_forward_dex_cache_array) { ScopedTrace timing("Fixup ArtMethod dex cache arrays"); FixupArtMethodArrayVisitor visitor(header); - header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods( - &visitor, - space->Begin(), - sizeof(void*)); + header.VisitPackedArtMethods(&visitor, space->Begin(), sizeof(void*)); Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader.Get()); } if (kVerifyArtMethodDeclaringClasses) { ScopedTrace timing("Verify declaring classes"); ReaderMutexLock rmu(self, *Locks::heap_bitmap_lock_); VerifyDeclaringClassVisitor visitor; - header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods( - &visitor, - space->Begin(), - sizeof(void*)); + header.VisitPackedArtMethods(&visitor, space->Begin(), sizeof(void*)); } return true; } @@ -1729,9 +1729,8 @@ bool ClassLinker::AddImageSpace( // Set entry point to interpreter if in InterpretOnly mode. if (!runtime->IsAotCompiler() && runtime->GetInstrumentation()->InterpretOnly()) { - const ImageSection& methods = header.GetMethodsSection(); SetInterpreterEntrypointArtMethodVisitor visitor(image_pointer_size_); - methods.VisitPackedArtMethods(&visitor, space->Begin(), image_pointer_size_); + header.VisitPackedArtMethods(&visitor, space->Begin(), image_pointer_size_); } ClassTable* class_table = nullptr; @@ -1800,10 +1799,7 @@ bool ClassLinker::AddImageSpace( // This verification needs to happen after the classes have been added to the class loader. // Since it ensures classes are in the class table. VerifyClassInTableArtMethodVisitor visitor2(class_table); - header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods( - &visitor2, - space->Begin(), - sizeof(void*)); + header.VisitPackedArtMethods(&visitor2, space->Begin(), sizeof(void*)); } VLOG(class_linker) << "Adding image space took " << PrettyDuration(NanoTime() - start_time); return true; @@ -2035,6 +2031,7 @@ void ClassLinker::DeleteClassLoader(Thread* self, const ClassLoaderData& data) { Runtime* const runtime = Runtime::Current(); JavaVMExt* const vm = runtime->GetJavaVM(); vm->DeleteWeakGlobalRef(self, data.weak_root); + // Notify the JIT that we need to remove the methods and/or profiling info. if (runtime->GetJit() != nullptr) { jit::JitCodeCache* code_cache = runtime->GetJit()->GetCodeCache(); if (code_cache != nullptr) { @@ -2752,7 +2749,7 @@ bool ClassLinker::ShouldUseInterpreterEntrypoint(ArtMethod* method, const void* } if (runtime->IsNativeDebuggable()) { - DCHECK(runtime->UseJit() && runtime->GetJit()->JitAtFirstUse()); + DCHECK(runtime->UseJitCompilation() && runtime->GetJit()->JitAtFirstUse()); // If we are doing native debugging, ignore application's AOT code, // since we want to JIT it with extra stackmaps for native debugging. // On the other hand, keep all AOT code from the boot image, since the @@ -5319,6 +5316,19 @@ bool ClassLinker::LoadSuperAndInterfaces(Handle klass, const DexF const DexFile::ClassDef& class_def = dex_file.GetClassDef(klass->GetDexClassDefIndex()); uint16_t super_class_idx = class_def.superclass_idx_; if (super_class_idx != DexFile::kDexNoIndex16) { + // Check that a class does not inherit from itself directly. + // + // TODO: This is a cheap check to detect the straightforward case + // of a class extending itself (b/28685551), but we should do a + // proper cycle detection on loaded classes, to detect all cases + // of class circularity errors (b/28830038). + if (super_class_idx == class_def.class_idx_) { + ThrowClassCircularityError(klass.Get(), + "Class %s extends itself", + PrettyDescriptor(klass.Get()).c_str()); + return false; + } + mirror::Class* super_class = ResolveType(dex_file, super_class_idx, klass.Get()); if (super_class == nullptr) { DCHECK(Thread::Current()->IsExceptionPending()); @@ -5961,16 +5971,49 @@ ClassLinker::DefaultMethodSearchResult ClassLinker::FindDefaultMethodImplementat } } -// Sets imt_ref appropriately for LinkInterfaceMethods. -// If there is no method in the imt location of imt_ref it will store the given method there. -// Otherwise it will set the conflict method which will figure out which method to use during -// runtime. -static void SetIMTRef(ArtMethod* unimplemented_method, - ArtMethod* imt_conflict_method, - size_t image_pointer_size, - ArtMethod* current_method, - /*out*/ArtMethod** imt_ref) - SHARED_REQUIRES(Locks::mutator_lock_) { +ArtMethod* ClassLinker::AddMethodToConflictTable(mirror::Class* klass, + ArtMethod* conflict_method, + ArtMethod* interface_method, + ArtMethod* method, + bool force_new_conflict_method) { + ImtConflictTable* current_table = conflict_method->GetImtConflictTable(sizeof(void*)); + Runtime* const runtime = Runtime::Current(); + LinearAlloc* linear_alloc = GetAllocatorForClassLoader(klass->GetClassLoader()); + bool new_entry = conflict_method == runtime->GetImtConflictMethod() || force_new_conflict_method; + + // Create a new entry if the existing one is the shared conflict method. + ArtMethod* new_conflict_method = new_entry + ? runtime->CreateImtConflictMethod(linear_alloc) + : conflict_method; + + // Allocate a new table. Note that we will leak this table at the next conflict, + // but that's a tradeoff compared to making the table fixed size. + void* data = linear_alloc->Alloc( + Thread::Current(), ImtConflictTable::ComputeSizeWithOneMoreEntry(current_table, + image_pointer_size_)); + if (data == nullptr) { + LOG(ERROR) << "Failed to allocate conflict table"; + return conflict_method; + } + ImtConflictTable* new_table = new (data) ImtConflictTable(current_table, + interface_method, + method, + image_pointer_size_); + + // Do a fence to ensure threads see the data in the table before it is assigned + // to the conflict method. + // Note that there is a race in the presence of multiple threads and we may leak + // memory from the LinearAlloc, but that's a tradeoff compared to using + // atomic operations. + QuasiAtomic::ThreadFenceRelease(); + new_conflict_method->SetImtConflictTable(new_table, image_pointer_size_); + return new_conflict_method; +} + +void ClassLinker::SetIMTRef(ArtMethod* unimplemented_method, + ArtMethod* imt_conflict_method, + ArtMethod* current_method, + /*out*/ArtMethod** imt_ref) { // Place method in imt if entry is empty, place conflict otherwise. if (*imt_ref == unimplemented_method) { *imt_ref = current_method; @@ -5980,9 +6023,9 @@ static void SetIMTRef(ArtMethod* unimplemented_method, // Note that we have checked IsRuntimeMethod, as there may be multiple different // conflict methods. MethodNameAndSignatureComparator imt_comparator( - (*imt_ref)->GetInterfaceMethodIfProxy(image_pointer_size)); + (*imt_ref)->GetInterfaceMethodIfProxy(image_pointer_size_)); if (imt_comparator.HasSameNameAndSignature( - current_method->GetInterfaceMethodIfProxy(image_pointer_size))) { + current_method->GetInterfaceMethodIfProxy(image_pointer_size_))) { *imt_ref = current_method; } else { *imt_ref = imt_conflict_method; @@ -5995,6 +6038,151 @@ static void SetIMTRef(ArtMethod* unimplemented_method, } } +void ClassLinker::FillIMTAndConflictTables(mirror::Class* klass) { + DCHECK(klass->ShouldHaveEmbeddedImtAndVTable()) << PrettyClass(klass); + DCHECK(!klass->IsTemp()) << PrettyClass(klass); + ArtMethod* imt[mirror::Class::kImtSize]; + Runtime* const runtime = Runtime::Current(); + ArtMethod* const unimplemented_method = runtime->GetImtUnimplementedMethod(); + ArtMethod* const conflict_method = runtime->GetImtConflictMethod(); + std::fill_n(imt, arraysize(imt), unimplemented_method); + if (klass->GetIfTable() != nullptr) { + FillIMTFromIfTable(klass->GetIfTable(), + unimplemented_method, + conflict_method, + klass, + true, + false, + &imt[0]); + } + for (size_t i = 0; i < mirror::Class::kImtSize; ++i) { + klass->SetEmbeddedImTableEntry(i, imt[i], image_pointer_size_); + } +} + +static inline uint32_t GetIMTIndex(ArtMethod* interface_method) + SHARED_REQUIRES(Locks::mutator_lock_) { + return interface_method->GetDexMethodIndex() % mirror::Class::kImtSize; +} + +ImtConflictTable* ClassLinker::CreateImtConflictTable(size_t count, + LinearAlloc* linear_alloc, + size_t image_pointer_size) { + void* data = linear_alloc->Alloc(Thread::Current(), + ImtConflictTable::ComputeSize(count, + image_pointer_size)); + return (data != nullptr) ? new (data) ImtConflictTable(count, image_pointer_size) : nullptr; +} + +ImtConflictTable* ClassLinker::CreateImtConflictTable(size_t count, LinearAlloc* linear_alloc) { + return CreateImtConflictTable(count, linear_alloc, image_pointer_size_); +} + +void ClassLinker::FillIMTFromIfTable(mirror::IfTable* if_table, + ArtMethod* unimplemented_method, + ArtMethod* imt_conflict_method, + mirror::Class* klass, + bool create_conflict_tables, + bool ignore_copied_methods, + ArtMethod** imt) { + uint32_t conflict_counts[mirror::Class::kImtSize] = {}; + for (size_t i = 0, length = if_table->Count(); i < length; ++i) { + mirror::Class* interface = if_table->GetInterface(i); + const size_t num_virtuals = interface->NumVirtualMethods(); + const size_t method_array_count = if_table->GetMethodArrayCount(i); + // Virtual methods can be larger than the if table methods if there are default methods. + DCHECK_GE(num_virtuals, method_array_count); + if (kIsDebugBuild) { + if (klass->IsInterface()) { + DCHECK_EQ(method_array_count, 0u); + } else { + DCHECK_EQ(interface->NumDeclaredVirtualMethods(), method_array_count); + } + } + if (method_array_count == 0) { + continue; + } + auto* method_array = if_table->GetMethodArray(i); + for (size_t j = 0; j < method_array_count; ++j) { + ArtMethod* implementation_method = + method_array->GetElementPtrSize(j, image_pointer_size_); + if (ignore_copied_methods && implementation_method->IsCopied()) { + continue; + } + DCHECK(implementation_method != nullptr); + // Miranda methods cannot be used to implement an interface method, but they are safe to put + // in the IMT since their entrypoint is the interface trampoline. If we put any copied methods + // or interface methods in the IMT here they will not create extra conflicts since we compare + // names and signatures in SetIMTRef. + ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_); + const uint32_t imt_index = GetIMTIndex(interface_method); + + // There is only any conflicts if all of the interface methods for an IMT slot don't have + // the same implementation method, keep track of this to avoid creating a conflict table in + // this case. + + // Conflict table size for each IMT slot. + ++conflict_counts[imt_index]; + + SetIMTRef(unimplemented_method, + imt_conflict_method, + implementation_method, + /*out*/&imt[imt_index]); + } + } + + if (create_conflict_tables) { + // Create the conflict tables. + LinearAlloc* linear_alloc = GetAllocatorForClassLoader(klass->GetClassLoader()); + for (size_t i = 0; i < mirror::Class::kImtSize; ++i) { + size_t conflicts = conflict_counts[i]; + if (imt[i] == imt_conflict_method) { + ImtConflictTable* new_table = CreateImtConflictTable(conflicts, linear_alloc); + if (new_table != nullptr) { + ArtMethod* new_conflict_method = + Runtime::Current()->CreateImtConflictMethod(linear_alloc); + new_conflict_method->SetImtConflictTable(new_table, image_pointer_size_); + imt[i] = new_conflict_method; + } else { + LOG(ERROR) << "Failed to allocate conflict table"; + imt[i] = imt_conflict_method; + } + } else { + DCHECK_NE(imt[i], imt_conflict_method); + } + } + + for (size_t i = 0, length = if_table->Count(); i < length; ++i) { + mirror::Class* interface = if_table->GetInterface(i); + const size_t method_array_count = if_table->GetMethodArrayCount(i); + // Virtual methods can be larger than the if table methods if there are default methods. + if (method_array_count == 0) { + continue; + } + auto* method_array = if_table->GetMethodArray(i); + for (size_t j = 0; j < method_array_count; ++j) { + ArtMethod* implementation_method = + method_array->GetElementPtrSize(j, image_pointer_size_); + if (ignore_copied_methods && implementation_method->IsCopied()) { + continue; + } + DCHECK(implementation_method != nullptr); + ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_); + const uint32_t imt_index = GetIMTIndex(interface_method); + if (!imt[imt_index]->IsRuntimeMethod() || + imt[imt_index] == unimplemented_method || + imt[imt_index] == imt_conflict_method) { + continue; + } + ImtConflictTable* table = imt[imt_index]->GetImtConflictTable(image_pointer_size_); + const size_t num_entries = table->NumEntries(image_pointer_size_); + table->SetInterfaceMethod(num_entries, image_pointer_size_, interface_method); + table->SetImplementationMethod(num_entries, image_pointer_size_, implementation_method); + } + } + } +} + // Simple helper function that checks that no subtypes of 'val' are contained within the 'classes' // set. static bool NotSubinterfaceOfAny(const std::unordered_set& classes, @@ -6230,48 +6418,28 @@ static void SanityCheckVTable(Handle klass, uint32_t pointer_size } } -static void FillImtFromSuperClass(Handle klass, - Handle iftable, - ArtMethod* unimplemented_method, - ArtMethod* imt_conflict_method, - ArtMethod** out_imt, - size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_) { +void ClassLinker::FillImtFromSuperClass(Handle klass, + ArtMethod* unimplemented_method, + ArtMethod* imt_conflict_method, + ArtMethod** imt) { DCHECK(klass->HasSuperClass()); mirror::Class* super_class = klass->GetSuperClass(); if (super_class->ShouldHaveEmbeddedImtAndVTable()) { for (size_t i = 0; i < mirror::Class::kImtSize; ++i) { - out_imt[i] = super_class->GetEmbeddedImTableEntry(i, pointer_size); + imt[i] = super_class->GetEmbeddedImTableEntry(i, image_pointer_size_); } } else { // No imt in the super class, need to reconstruct from the iftable. mirror::IfTable* if_table = super_class->GetIfTable(); - const size_t length = super_class->GetIfTableCount(); - for (size_t i = 0; i < length; ++i) { - mirror::Class* interface = iftable->GetInterface(i); - const size_t num_virtuals = interface->NumDeclaredVirtualMethods(); - const size_t method_array_count = if_table->GetMethodArrayCount(i); - DCHECK_EQ(num_virtuals, method_array_count); - if (method_array_count == 0) { - continue; - } - auto* method_array = if_table->GetMethodArray(i); - for (size_t j = 0; j < num_virtuals; ++j) { - auto method = method_array->GetElementPtrSize(j, pointer_size); - DCHECK(method != nullptr) << PrettyClass(super_class); - // Miranda methods cannot be used to implement an interface method and defaults should be - // skipped in case we override it. - if (method->IsDefault() || method->IsMiranda()) { - continue; - } - ArtMethod* interface_method = interface->GetVirtualMethod(j, pointer_size); - uint32_t imt_index = interface_method->GetDexMethodIndex() % mirror::Class::kImtSize; - auto** imt_ref = &out_imt[imt_index]; - if (*imt_ref == unimplemented_method) { - *imt_ref = method; - } else if (*imt_ref != imt_conflict_method) { - *imt_ref = imt_conflict_method; - } - } + if (if_table != nullptr) { + // Ignore copied methods since we will handle these in LinkInterfaceMethods. + FillIMTFromIfTable(if_table, + unimplemented_method, + imt_conflict_method, + klass.Get(), + /*create_conflict_table*/false, + /*ignore_copied_methods*/true, + /*out*/imt); } } } @@ -6314,13 +6482,10 @@ bool ClassLinker::LinkInterfaceMethods( const bool extend_super_iftable = has_superclass; if (has_superclass && fill_tables) { FillImtFromSuperClass(klass, - iftable, unimplemented_method, imt_conflict_method, - out_imt, - image_pointer_size_); + out_imt); } - // Allocate method arrays before since we don't want miss visiting miranda method roots due to // thread suspension. if (fill_tables) { @@ -6404,7 +6569,7 @@ bool ClassLinker::LinkInterfaceMethods( auto* interface_method = iftable->GetInterface(i)->GetVirtualMethod(j, image_pointer_size_); MethodNameAndSignatureComparator interface_name_comparator( interface_method->GetInterfaceMethodIfProxy(image_pointer_size_)); - uint32_t imt_index = interface_method->GetDexMethodIndex() % mirror::Class::kImtSize; + uint32_t imt_index = GetIMTIndex(interface_method); ArtMethod** imt_ptr = &out_imt[imt_index]; // For each method listed in the interface's method list, find the // matching method in our class's method list. We want to favor the @@ -6449,7 +6614,6 @@ bool ClassLinker::LinkInterfaceMethods( // Place method in imt if entry is empty, place conflict otherwise. SetIMTRef(unimplemented_method, imt_conflict_method, - image_pointer_size_, vtable_method, /*out*/imt_ptr); } @@ -6483,6 +6647,17 @@ bool ClassLinker::LinkInterfaceMethods( // The method is not overridable by a default method (i.e. it is directly implemented // in some class). Therefore move onto the next interface method. continue; + } else { + // If the super-classes method is override-able by a default method we need to keep + // track of it since though it is override-able it is not guaranteed to be 'overridden'. + // If it turns out not to be overridden and we did not keep track of it we might add it + // to the vtable twice, causing corruption (vtable entries having inconsistent and + // illegal states, incorrect vtable size, and incorrect or inconsistent iftable entries) + // in this class and any subclasses. + DCHECK(vtable_impl == nullptr || vtable_impl == supers_method) + << "vtable_impl was " << PrettyMethod(vtable_impl) << " and not 'nullptr' or " + << PrettyMethod(supers_method) << " as expected. IFTable appears to be corrupt!"; + vtable_impl = supers_method; } } // If we haven't found it yet we should search through the interfaces for default methods. @@ -6581,7 +6756,6 @@ bool ClassLinker::LinkInterfaceMethods( method_array->SetElementPtrSize(j, current_method, image_pointer_size_); SetIMTRef(unimplemented_method, imt_conflict_method, - image_pointer_size_, current_method, /*out*/imt_ptr); } @@ -6808,6 +6982,7 @@ bool ClassLinker::LinkInterfaceMethods( } // Put some random garbage in old methods to help find stale pointers. if (methods != old_methods && old_methods != nullptr) { + WriterMutexLock mu(self, ClassTableForClassLoader(klass->GetClassLoader())->GetLock()); memset(old_methods, 0xFEu, old_size); } } else { @@ -7652,7 +7827,8 @@ const char* ClassLinker::GetClassRootDescriptor(ClassRoot class_root) { return descriptor; } -jobject ClassLinker::CreatePathClassLoader(Thread* self, std::vector& dex_files) { +jobject ClassLinker::CreatePathClassLoader(Thread* self, + const std::vector& dex_files) { // SOAAlreadyRunnable is protected, and we need something to add a global reference. // We could move the jobject to the callers, but all call-sites do this... ScopedObjectAccessUnchecked soa(self); @@ -7835,6 +8011,7 @@ std::set ClassLinker::GetResolvedClasses(bool ignore_bo VLOG(class_linker) << "Collecting class profile for dex file " << location << " types=" << num_types << " class_defs=" << num_class_defs; DexCacheResolvedClasses resolved_classes(dex_file->GetLocation(), + dex_file->GetBaseLocation(), dex_file->GetLocationChecksum()); size_t num_resolved = 0; std::unordered_set class_set; diff --git a/runtime/class_linker.h b/runtime/class_linker.h index 97a10367f..f6ce545a1 100644 --- a/runtime/class_linker.h +++ b/runtime/class_linker.h @@ -53,6 +53,7 @@ namespace mirror { class StackTraceElement; } // namespace mirror +class ImtConflictTable; template class Handle; template class MutableHandle; class InternTable; @@ -254,7 +255,7 @@ class ClassLinker { SHARED_REQUIRES(Locks::mutator_lock_); // Resolve a Type with the given index from the DexFile, storing the - // result in the DexCache. The referrer is used to identity the + // result in the DexCache. The referrer is used to identify the // target DexCache and ClassLoader to use for resolution. mirror::Class* ResolveType(const DexFile& dex_file, uint16_t type_idx, mirror::Class* referrer) SHARED_REQUIRES(Locks::mutator_lock_) @@ -559,7 +560,7 @@ class ClassLinker { // Creates a GlobalRef PathClassLoader that can be used to load classes from the given dex files. // Note: the objects are not completely set up. Do not use this outside of tests and the compiler. - jobject CreatePathClassLoader(Thread* self, std::vector& dex_files) + jobject CreatePathClassLoader(Thread* self, const std::vector& dex_files) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_); @@ -610,6 +611,30 @@ class ClassLinker { const std::set& classes) REQUIRES(!dex_lock_); + static bool IsBootClassLoader(ScopedObjectAccessAlreadyRunnable& soa, + mirror::ClassLoader* class_loader) + SHARED_REQUIRES(Locks::mutator_lock_); + + ArtMethod* AddMethodToConflictTable(mirror::Class* klass, + ArtMethod* conflict_method, + ArtMethod* interface_method, + ArtMethod* method, + bool force_new_conflict_method) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Create a conflict table with a specified capacity. + ImtConflictTable* CreateImtConflictTable(size_t count, LinearAlloc* linear_alloc); + + // Static version for when the class linker is not yet created. + static ImtConflictTable* CreateImtConflictTable(size_t count, + LinearAlloc* linear_alloc, + size_t pointer_size); + + + // Create the IMT and conflict tables for a class. + void FillIMTAndConflictTables(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); + + struct DexCacheData { // Weak root to the DexCache. Note: Do not decode this unnecessarily or else class unloading may // not work properly. @@ -999,7 +1024,7 @@ class ClassLinker { // Returns null if not found. ClassTable* ClassTableForClassLoader(mirror::ClassLoader* class_loader) - SHARED_REQUIRES(Locks::mutator_lock_, Locks::classlinker_classes_lock_); + SHARED_REQUIRES(Locks::mutator_lock_); // Insert a new class table if not found. ClassTable* InsertClassTableForClassLoader(mirror::ClassLoader* class_loader) @@ -1057,6 +1082,28 @@ class ClassLinker { REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + // Sets imt_ref appropriately for LinkInterfaceMethods. + // If there is no method in the imt location of imt_ref it will store the given method there. + // Otherwise it will set the conflict method which will figure out which method to use during + // runtime. + void SetIMTRef(ArtMethod* unimplemented_method, + ArtMethod* imt_conflict_method, + ArtMethod* current_method, + /*out*/ArtMethod** imt_ref) SHARED_REQUIRES(Locks::mutator_lock_); + + void FillIMTFromIfTable(mirror::IfTable* if_table, + ArtMethod* unimplemented_method, + ArtMethod* imt_conflict_method, + mirror::Class* klass, + bool create_conflict_tables, + bool ignore_copied_methods, + ArtMethod** imt) SHARED_REQUIRES(Locks::mutator_lock_); + + void FillImtFromSuperClass(Handle klass, + ArtMethod* unimplemented_method, + ArtMethod* imt_conflict_method, + ArtMethod** imt) SHARED_REQUIRES(Locks::mutator_lock_); + std::vector boot_class_path_; std::vector> boot_dex_files_; diff --git a/runtime/class_table.h b/runtime/class_table.h index eb784b5c7..686381d35 100644 --- a/runtime/class_table.h +++ b/runtime/class_table.h @@ -153,6 +153,10 @@ class ClassTable { REQUIRES(!lock_) SHARED_REQUIRES(Locks::mutator_lock_); + ReaderWriterMutex& GetLock() { + return lock_; + } + private: // Lock to guard inserting and removing. mutable ReaderWriterMutex lock_; diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc index b4208fe05..75cce424e 100644 --- a/runtime/common_throws.cc +++ b/runtime/common_throws.cc @@ -137,13 +137,21 @@ void ThrowClassCircularityError(mirror::Class* c) { ThrowException("Ljava/lang/ClassCircularityError;", c, msg.str().c_str()); } +void ThrowClassCircularityError(mirror::Class* c, const char* fmt, ...) { + va_list args; + va_start(args, fmt); + ThrowException("Ljava/lang/ClassCircularityError;", c, fmt, &args); + va_end(args); +} + // ClassFormatError void ThrowClassFormatError(mirror::Class* referrer, const char* fmt, ...) { va_list args; va_start(args, fmt); ThrowException("Ljava/lang/ClassFormatError;", referrer, fmt, &args); - va_end(args);} + va_end(args); +} // IllegalAccessError diff --git a/runtime/common_throws.h b/runtime/common_throws.h index 39c4e52b1..c3a1f09db 100644 --- a/runtime/common_throws.h +++ b/runtime/common_throws.h @@ -58,6 +58,9 @@ void ThrowArrayStoreException(mirror::Class* element_class, mirror::Class* array void ThrowClassCircularityError(mirror::Class* c) SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; +void ThrowClassCircularityError(mirror::Class* c, const char* fmt, ...) + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; + // ClassCastException void ThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type) diff --git a/runtime/compiler_filter.h b/runtime/compiler_filter.h index 6289d8a22..e8d74dd9e 100644 --- a/runtime/compiler_filter.h +++ b/runtime/compiler_filter.h @@ -44,6 +44,8 @@ class CompilerFilter FINAL { kEverything, // Compile everything capable of being compiled. }; + static const Filter kDefaultCompilerFilter = kSpeed; + // Returns true if an oat file with this compiler filter contains // compiled executable code. static bool IsCompilationEnabled(Filter filter); diff --git a/runtime/debugger.cc b/runtime/debugger.cc index 55f68d3f2..80056423a 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -582,7 +582,7 @@ class UpdateEntryPointsClassVisitor : public ClassVisitor { if (Runtime::Current()->GetHeap()->IsInBootImageOatFile(code) && !m.IsNative() && !m.IsProxyMethod()) { - instrumentation_->UpdateMethodsCode(&m, GetQuickToInterpreterBridge()); + instrumentation_->UpdateMethodsCodeFromDebugger(&m, GetQuickToInterpreterBridge()); } } return true; diff --git a/runtime/dex_cache_resolved_classes.h b/runtime/dex_cache_resolved_classes.h index 80c12cb64..0febbedf0 100644 --- a/runtime/dex_cache_resolved_classes.h +++ b/runtime/dex_cache_resolved_classes.h @@ -26,8 +26,11 @@ namespace art { // Data structure for passing around which classes belonging to a dex cache / dex file are resolved. class DexCacheResolvedClasses { public: - DexCacheResolvedClasses(const std::string& dex_location, uint32_t location_checksum) + DexCacheResolvedClasses(const std::string& dex_location, + const std::string& base_location, + uint32_t location_checksum) : dex_location_(dex_location), + base_location_(base_location), location_checksum_(location_checksum) {} // Only compare the key elements, ignore the resolved classes. @@ -35,6 +38,7 @@ class DexCacheResolvedClasses { if (location_checksum_ != other.location_checksum_) { return static_cast(location_checksum_ - other.location_checksum_); } + // Don't need to compare base_location_ since dex_location_ has more info. return dex_location_.compare(other.dex_location_); } @@ -47,6 +51,10 @@ class DexCacheResolvedClasses { return dex_location_; } + const std::string& GetBaseLocation() const { + return base_location_; + } + uint32_t GetLocationChecksum() const { return location_checksum_; } @@ -57,6 +65,7 @@ class DexCacheResolvedClasses { private: const std::string dex_location_; + const std::string base_location_; const uint32_t location_checksum_; // Array of resolved class def indexes. mutable std::unordered_set classes_; diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc index 63f3f08ba..af12abfc0 100644 --- a/runtime/dex_file.cc +++ b/runtime/dex_file.cc @@ -518,7 +518,7 @@ const DexFile::ClassDef* DexFile::FindClassDef(const char* descriptor, size_t ha return (class_def_idx != DexFile::kDexNoIndex) ? &GetClassDef(class_def_idx) : nullptr; } - // Fast path for rate no class defs case. + // Fast path for rare no class defs case. const uint32_t num_class_defs = NumClassDefs(); if (num_class_defs == 0) { return nullptr; @@ -548,8 +548,8 @@ const DexFile::ClassDef* DexFile::FindClassDef(uint16_t type_idx) const { } const DexFile::FieldId* DexFile::FindFieldId(const DexFile::TypeId& declaring_klass, - const DexFile::StringId& name, - const DexFile::TypeId& type) const { + const DexFile::StringId& name, + const DexFile::TypeId& type) const { // Binary search MethodIds knowing that they are sorted by class_idx, name_idx then proto_idx const uint16_t class_idx = GetIndexForTypeId(declaring_klass); const uint32_t name_idx = GetIndexForStringId(name); diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc index 3df4e98c8..bbffbbb7b 100644 --- a/runtime/dex_file_verifier.cc +++ b/runtime/dex_file_verifier.cc @@ -251,6 +251,14 @@ bool DexFileVerifier::CheckValidOffsetAndSize(uint32_t offset, return true; } +bool DexFileVerifier::CheckSizeLimit(uint32_t size, uint32_t limit, const char* label) { + if (size > limit) { + ErrorStringPrintf("Size(%u) should not exceed limit(%u) for %s.", size, limit, label); + return false; + } + return true; +} + bool DexFileVerifier::CheckHeader() { // Check file size from the header. uint32_t expected_size = header_->file_size_; @@ -298,10 +306,12 @@ bool DexFileVerifier::CheckHeader() { header_->type_ids_size_, 4, "type-ids") && + CheckSizeLimit(header_->type_ids_size_, DexFile::kDexNoIndex16, "type-ids") && CheckValidOffsetAndSize(header_->proto_ids_off_, header_->proto_ids_size_, 4, "proto-ids") && + CheckSizeLimit(header_->proto_ids_size_, DexFile::kDexNoIndex16, "proto-ids") && CheckValidOffsetAndSize(header_->field_ids_off_, header_->field_ids_size_, 4, @@ -1786,13 +1796,8 @@ bool DexFileVerifier::CheckInterProtoIdItem() { while (curr_it.HasNext() && prev_it.HasNext()) { uint16_t prev_idx = prev_it.GetTypeIdx(); uint16_t curr_idx = curr_it.GetTypeIdx(); - if (prev_idx == DexFile::kDexNoIndex16) { - break; - } - if (UNLIKELY(curr_idx == DexFile::kDexNoIndex16)) { - ErrorStringPrintf("Out-of-order proto_id arguments"); - return false; - } + DCHECK_NE(prev_idx, DexFile::kDexNoIndex16); + DCHECK_NE(curr_idx, DexFile::kDexNoIndex16); if (prev_idx < curr_idx) { break; @@ -1804,6 +1809,12 @@ bool DexFileVerifier::CheckInterProtoIdItem() { prev_it.Next(); curr_it.Next(); } + if (!curr_it.HasNext()) { + // Either a duplicate ProtoId or a ProtoId with a shorter argument list follows + // a ProtoId with a longer one. Both cases are forbidden by the specification. + ErrorStringPrintf("Out-of-order proto_id arguments"); + return false; + } } } @@ -2358,7 +2369,8 @@ static bool CheckAtMostOneOfPublicProtectedPrivate(uint32_t flags) { static std::string GetStringOrError(const uint8_t* const begin, const DexFile::Header* const header, uint32_t string_idx) { - if (header->string_ids_size_ < string_idx) { + // The `string_idx` is not guaranteed to be valid yet. + if (header->string_ids_size_ <= string_idx) { return "(error)"; } @@ -2375,9 +2387,11 @@ static std::string GetStringOrError(const uint8_t* const begin, static std::string GetClassOrError(const uint8_t* const begin, const DexFile::Header* const header, uint32_t class_idx) { - if (header->type_ids_size_ < class_idx) { - return "(error)"; - } + // The `class_idx` is either `FieldId::class_idx_` or `MethodId::class_idx_` and + // it has already been checked in `DexFileVerifier::CheckClassDataItemField()` + // or `DexFileVerifier::CheckClassDataItemMethod()`, respectively, to match + // a valid defining class. + CHECK_LT(class_idx, header->type_ids_size_); const DexFile::TypeId* type_id = reinterpret_cast(begin + header->type_ids_off_) + class_idx; @@ -2390,9 +2404,8 @@ static std::string GetClassOrError(const uint8_t* const begin, static std::string GetFieldDescriptionOrError(const uint8_t* const begin, const DexFile::Header* const header, uint32_t idx) { - if (header->field_ids_size_ < idx) { - return "(error)"; - } + // The `idx` has already been checked in `DexFileVerifier::CheckClassDataItemField()`. + CHECK_LT(idx, header->field_ids_size_); const DexFile::FieldId* field_id = reinterpret_cast(begin + header->field_ids_off_) + idx; @@ -2408,9 +2421,8 @@ static std::string GetFieldDescriptionOrError(const uint8_t* const begin, static std::string GetMethodDescriptionOrError(const uint8_t* const begin, const DexFile::Header* const header, uint32_t idx) { - if (header->method_ids_size_ < idx) { - return "(error)"; - } + // The `idx` has already been checked in `DexFileVerifier::CheckClassDataItemMethod()`. + CHECK_LT(idx, header->method_ids_size_); const DexFile::MethodId* method_id = reinterpret_cast(begin + header->method_ids_off_) + idx; @@ -2608,7 +2620,13 @@ bool DexFileVerifier::CheckMethodAccessFlags(uint32_t method_index, *error_msg = StringPrintf("Constructor %" PRIu32 "(%s) is not flagged correctly wrt/ static.", method_index, GetMethodDescriptionOrError(begin_, header_, method_index).c_str()); - return false; + if (header_->GetVersion() >= DexFile::kDefaultMethodsVersion) { + return false; + } else { + // Allow in older versions, but warn. + LOG(WARNING) << "This dex file is invalid and will be rejected in the future. Error is: " + << *error_msg; + } } } // Check that static and private methods, as well as constructors, are in the direct methods list, @@ -2662,7 +2680,13 @@ bool DexFileVerifier::CheckMethodAccessFlags(uint32_t method_index, *error_msg = StringPrintf("Constructor %u(%s) must not be abstract or native", method_index, GetMethodDescriptionOrError(begin_, header_, method_index).c_str()); - return false; + if (header_->GetVersion() >= DexFile::kDefaultMethodsVersion) { + return false; + } else { + // Allow in older versions, but warn. + LOG(WARNING) << "This dex file is invalid and will be rejected in the future. Error is: " + << *error_msg; + } } if ((method_access_flags & kAccAbstract) != 0) { // Abstract methods are not allowed to have the following flags. diff --git a/runtime/dex_file_verifier.h b/runtime/dex_file_verifier.h index be0e6d83f..90409db44 100644 --- a/runtime/dex_file_verifier.h +++ b/runtime/dex_file_verifier.h @@ -49,6 +49,8 @@ class DexFileVerifier { // Checks whether the offset is zero (when size is zero) or that the offset falls within the area // claimed by the file. bool CheckValidOffsetAndSize(uint32_t offset, uint32_t size, size_t alignment, const char* label); + // Checks whether the size is less than the limit. + bool CheckSizeLimit(uint32_t size, uint32_t limit, const char* label); bool CheckIndex(uint32_t field, uint32_t limit, const char* label); bool CheckHeader(); diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc index 344d186ad..3741c1e76 100644 --- a/runtime/dex_file_verifier_test.cc +++ b/runtime/dex_file_verifier_test.cc @@ -57,7 +57,14 @@ static const uint8_t kBase64Map[256] = { 255, 255, 255, 255 }; -static inline uint8_t* DecodeBase64(const char* src, size_t* dst_size) { +// Make the Dex file version 37. +static void MakeDexVersion37(DexFile* dex_file) { + size_t offset = OFFSETOF_MEMBER(DexFile::Header, magic_) + 6; + CHECK_EQ(*(dex_file->Begin() + offset), '5'); + *(const_cast(dex_file->Begin()) + offset) = '7'; +} + +static inline std::unique_ptr DecodeBase64(const char* src, size_t* dst_size) { std::vector tmp; uint32_t t = 0, y = 0; int g = 3; @@ -100,7 +107,7 @@ static inline uint8_t* DecodeBase64(const char* src, size_t* dst_size) { *dst_size = 0; } std::copy(tmp.begin(), tmp.end(), dst.get()); - return dst.release(); + return dst; } static void FixUpChecksum(uint8_t* dex_file) { @@ -113,25 +120,18 @@ static void FixUpChecksum(uint8_t* dex_file) { header->checksum_ = adler_checksum; } -// Custom deleter. Necessary to clean up the memory we use (to be able to mutate). -struct DexFileDeleter { - void operator()(DexFile* in) { - if (in != nullptr) { - delete[] in->Begin(); - delete in; - } - } -}; - -using DexFileUniquePtr = std::unique_ptr; - class DexFileVerifierTest : public CommonRuntimeTest { protected: void VerifyModification(const char* dex_file_base64_content, const char* location, std::function f, const char* expected_error) { - DexFileUniquePtr dex_file(WrapAsDexFile(dex_file_base64_content)); + size_t length; + std::unique_ptr dex_bytes = DecodeBase64(dex_file_base64_content, &length); + CHECK(dex_bytes != nullptr); + // Note: `dex_file` will be destroyed before `dex_bytes`. + std::unique_ptr dex_file( + new DexFile(dex_bytes.get(), length, "tmp", 0, nullptr, nullptr)); f(dex_file.get()); FixUpChecksum(const_cast(dex_file->Begin())); @@ -150,15 +150,6 @@ class DexFileVerifierTest : public CommonRuntimeTest { } } } - - private: - static DexFile* WrapAsDexFile(const char* dex_file_content_in_base_64) { - // Decode base64. - size_t length; - uint8_t* dex_bytes = DecodeBase64(dex_file_content_in_base_64, &length); - CHECK(dex_bytes != nullptr); - return new DexFile(dex_bytes, length, "tmp", 0, nullptr, nullptr); - } }; static std::unique_ptr OpenDexFileBase64(const char* base64, @@ -290,7 +281,9 @@ static const char kMethodFlagsTestDex[] = // Find the method data for the first method with the given name (from class 0). Note: the pointer // is to the access flags, so that the caller doesn't have to handle the leb128-encoded method-index // delta. -static const uint8_t* FindMethodData(const DexFile* dex_file, const char* name) { +static const uint8_t* FindMethodData(const DexFile* dex_file, + const char* name, + /*out*/ uint32_t* method_idx = nullptr) { const DexFile::ClassDef& class_def = dex_file->GetClassDef(0); const uint8_t* class_data = dex_file->GetClassData(class_def); @@ -316,6 +309,9 @@ static const uint8_t* FindMethodData(const DexFile* dex_file, const char* name) const DexFile::StringId& string_id = dex_file->GetStringId(name_index); const char* str = dex_file->GetStringData(string_id); if (strcmp(name, str) == 0) { + if (method_idx != nullptr) { + *method_idx = method_index; + } DecodeUnsignedLeb128(&trailing); return trailing; } @@ -449,6 +445,7 @@ TEST_F(DexFileVerifierTest, MethodAccessFlagsConstructors) { kMethodFlagsTestDex, "method_flags_constructor_native_nocode", [&](DexFile* dex_file) { + MakeDexVersion37(dex_file); ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized); ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized); @@ -461,6 +458,7 @@ TEST_F(DexFileVerifierTest, MethodAccessFlagsConstructors) { kMethodFlagsTestDex, "method_flags_constructor_abstract_nocode", [&](DexFile* dex_file) { + MakeDexVersion37(dex_file); ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized); ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized); @@ -521,6 +519,7 @@ TEST_F(DexFileVerifierTest, MethodAccessFlagsConstructors) { kMethodFlagsTestDex, "init_not_allowed_flags", [&](DexFile* dex_file) { + MakeDexVersion37(dex_file); ApplyMaskToMethodFlags(dex_file, "foo", ~kAccDeclaredSynchronized); ApplyMaskToMethodFlags(dex_file, "bar", ~kAccDeclaredSynchronized); @@ -683,6 +682,22 @@ TEST_F(DexFileVerifierTest, MethodAccessFlagsIgnoredOK) { } } +TEST_F(DexFileVerifierTest, B28552165) { + // Regression test for bad error string retrieval in different situations. + // Using invalid access flags to trigger the error. + VerifyModification( + kMethodFlagsTestDex, + "b28552165", + [](DexFile* dex_file) { + OrMaskToMethodFlags(dex_file, "foo", kAccPublic | kAccProtected); + uint32_t method_idx; + FindMethodData(dex_file, "foo", &method_idx); + auto* method_id = const_cast(&dex_file->GetMethodId(method_idx)); + method_id->name_idx_ = dex_file->NumStringIds(); + }, + "Method may have only one of public/protected/private, LMethodFlags;.(error)"); +} + // Set of dex files for interface method tests. As it's not as easy to mutate method names, it's // just easier to break up bad cases. @@ -725,13 +740,6 @@ static uint32_t ApplyMaskShifted(uint32_t src_value, uint32_t mask) { return result; } -// Make the Dex file version 37. -static void MakeDexVersion37(DexFile* dex_file) { - size_t offset = OFFSETOF_MEMBER(DexFile::Header, magic_) + 6; - CHECK_EQ(*(dex_file->Begin() + offset), '5'); - *(const_cast(dex_file->Begin()) + offset) = '7'; -} - TEST_F(DexFileVerifierTest, MethodAccessFlagsInterfaces) { VerifyModification( kMethodFlagsInterface, @@ -1436,4 +1444,81 @@ TEST_F(DexFileVerifierTest, SectionAlignment) { } } +// Generated from +// +// .class LOverloading; +// +// .super Ljava/lang/Object; +// +// .method public static foo()V +// .registers 1 +// return-void +// .end method +// +// .method public static foo(I)V +// .registers 1 +// return-void +// .end method +static const char kProtoOrderingTestDex[] = + "ZGV4CjAzNQA1L+ABE6voQ9Lr4Ci//efB53oGnDr5PinsAQAAcAAAAHhWNBIAAAAAAAAAAFgBAAAG" + "AAAAcAAAAAQAAACIAAAAAgAAAJgAAAAAAAAAAAAAAAIAAACwAAAAAQAAAMAAAAAMAQAA4AAAAOAA" + "AADjAAAA8gAAAAYBAAAJAQAADQEAAAAAAAABAAAAAgAAAAMAAAADAAAAAwAAAAAAAAAEAAAAAwAA" + "ABQBAAABAAAABQAAAAEAAQAFAAAAAQAAAAAAAAACAAAAAAAAAP////8AAAAASgEAAAAAAAABSQAN" + "TE92ZXJsb2FkaW5nOwASTGphdmEvbGFuZy9PYmplY3Q7AAFWAAJWSQADZm9vAAAAAQAAAAAAAAAA" + "AAAAAAAAAAEAAAAAAAAAAAAAAAEAAAAOAAAAAQABAAAAAAAAAAAAAQAAAA4AAAACAAAJpAIBCbgC" + "AAAMAAAAAAAAAAEAAAAAAAAAAQAAAAYAAABwAAAAAgAAAAQAAACIAAAAAwAAAAIAAACYAAAABQAA" + "AAIAAACwAAAABgAAAAEAAADAAAAAAiAAAAYAAADgAAAAARAAAAEAAAAUAQAAAxAAAAIAAAAcAQAA" + "ASAAAAIAAAAkAQAAACAAAAEAAABKAQAAABAAAAEAAABYAQAA"; + +TEST_F(DexFileVerifierTest, ProtoOrdering) { + { + // The input dex file should be good before modification. + ScratchFile tmp; + std::string error_msg; + std::unique_ptr raw(OpenDexFileBase64(kProtoOrderingTestDex, + tmp.GetFilename().c_str(), + &error_msg)); + ASSERT_TRUE(raw.get() != nullptr) << error_msg; + } + + // Modify the order of the ProtoIds for two overloads of "foo" with the + // same return type and one having longer parameter list than the other. + for (size_t i = 0; i != 2; ++i) { + VerifyModification( + kProtoOrderingTestDex, + "proto_ordering", + [i](DexFile* dex_file) { + uint32_t method_idx; + const uint8_t* data = FindMethodData(dex_file, "foo", &method_idx); + CHECK(data != nullptr); + // There should be 2 methods called "foo". + CHECK_LT(method_idx + 1u, dex_file->NumMethodIds()); + CHECK_EQ(dex_file->GetMethodId(method_idx).name_idx_, + dex_file->GetMethodId(method_idx + 1).name_idx_); + CHECK_EQ(dex_file->GetMethodId(method_idx).proto_idx_ + 1u, + dex_file->GetMethodId(method_idx + 1).proto_idx_); + // Their return types should be the same. + uint32_t proto1_idx = dex_file->GetMethodId(method_idx).proto_idx_; + const DexFile::ProtoId& proto1 = dex_file->GetProtoId(proto1_idx); + const DexFile::ProtoId& proto2 = dex_file->GetProtoId(proto1_idx + 1u); + CHECK_EQ(proto1.return_type_idx_, proto2.return_type_idx_); + // And the first should not have any parameters while the second should have some. + CHECK(!DexFileParameterIterator(*dex_file, proto1).HasNext()); + CHECK(DexFileParameterIterator(*dex_file, proto2).HasNext()); + if (i == 0) { + // Swap the proto parameters and shorties to break the ordering. + std::swap(const_cast(proto1.parameters_off_), + const_cast(proto2.parameters_off_)); + std::swap(const_cast(proto1.shorty_idx_), + const_cast(proto2.shorty_idx_)); + } else { + // Copy the proto parameters and shorty to create duplicate proto id. + const_cast(proto1.parameters_off_) = proto2.parameters_off_; + const_cast(proto1.shorty_idx_) = proto2.shorty_idx_; + } + }, + "Out-of-order proto_id arguments"); + } +} + } // namespace art diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h index 16fbfaad3..fc6257302 100644 --- a/runtime/entrypoints/entrypoint_utils-inl.h +++ b/runtime/entrypoints/entrypoint_utils-inl.h @@ -514,12 +514,18 @@ inline ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object** this_ CHECK(self->IsExceptionPending()); return nullptr; } else if (!method_reference_class->IsInterface()) { - // It is not an interface. - mirror::Class* super_class = referring_class->GetSuperClass(); + // It is not an interface. If the referring class is in the class hierarchy of the + // referenced class in the bytecode, we use its super class. Otherwise, we throw + // a NoSuchMethodError. + mirror::Class* super_class = nullptr; + if (method_reference_class->IsAssignableFrom(referring_class)) { + super_class = referring_class->GetSuperClass(); + } uint16_t vtable_index = resolved_method->GetMethodIndex(); if (access_check) { // Check existence of super class. - if (super_class == nullptr || !super_class->HasVTable() || + if (super_class == nullptr || + !super_class->HasVTable() || vtable_index >= static_cast(super_class->GetVTableLength())) { // Behavior to agree with that of the verifier. ThrowNoSuchMethodError(type, resolved_method->GetDeclaringClass(), @@ -693,8 +699,13 @@ inline ArtMethod* FindMethodFast(uint32_t method_idx, mirror::Object* this_objec // Need to do full type resolution... return nullptr; } else if (!method_reference_class->IsInterface()) { - // It is not an interface. - mirror::Class* super_class = referrer->GetDeclaringClass()->GetSuperClass(); + // It is not an interface. If the referring class is in the class hierarchy of the + // referenced class in the bytecode, we use its super class. Otherwise, we cannot + // resolve the method. + if (!method_reference_class->IsAssignableFrom(referring_class)) { + return nullptr; + } + mirror::Class* super_class = referring_class->GetSuperClass(); if (resolved_method->GetMethodIndex() >= super_class->GetVTableLength()) { // The super class does not have the method. return nullptr; diff --git a/runtime/entrypoints/quick/quick_default_externs.h b/runtime/entrypoints/quick/quick_default_externs.h index 4e01d8031..f3a0d2f3e 100644 --- a/runtime/entrypoints/quick/quick_default_externs.h +++ b/runtime/entrypoints/quick/quick_default_externs.h @@ -77,6 +77,10 @@ extern "C" void art_quick_handle_fill_data(void*, void*); extern "C" void art_quick_lock_object(art::mirror::Object*); extern "C" void art_quick_unlock_object(art::mirror::Object*); +// Lock entrypoints that do not inline any behavior (e.g., thin-locks). +extern "C" void art_quick_lock_object_no_inline(art::mirror::Object*); +extern "C" void art_quick_unlock_object_no_inline(art::mirror::Object*); + // Math entrypoints. extern "C" int64_t art_quick_d2l(double); extern "C" int64_t art_quick_f2l(float); diff --git a/runtime/entrypoints/quick/quick_default_init_entrypoints.h b/runtime/entrypoints/quick/quick_default_init_entrypoints.h new file mode 100644 index 000000000..5dafa8b59 --- /dev/null +++ b/runtime/entrypoints/quick/quick_default_init_entrypoints.h @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_DEFAULT_INIT_ENTRYPOINTS_H_ +#define ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_DEFAULT_INIT_ENTRYPOINTS_H_ + +#include "base/logging.h" +#include "entrypoints/jni/jni_entrypoints.h" +#include "entrypoints/runtime_asm_entrypoints.h" +#include "quick_alloc_entrypoints.h" +#include "quick_default_externs.h" +#include "quick_entrypoints.h" + +namespace art { + +void DefaultInitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { + // JNI + jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub; + + // Alloc + ResetQuickAllocEntryPoints(qpoints); + + // DexCache + qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage; + qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access; + qpoints->pInitializeType = art_quick_initialize_type; + qpoints->pResolveString = art_quick_resolve_string; + + // Field + qpoints->pSet8Instance = art_quick_set8_instance; + qpoints->pSet8Static = art_quick_set8_static; + qpoints->pSet16Instance = art_quick_set16_instance; + qpoints->pSet16Static = art_quick_set16_static; + qpoints->pSet32Instance = art_quick_set32_instance; + qpoints->pSet32Static = art_quick_set32_static; + qpoints->pSet64Instance = art_quick_set64_instance; + qpoints->pSet64Static = art_quick_set64_static; + qpoints->pSetObjInstance = art_quick_set_obj_instance; + qpoints->pSetObjStatic = art_quick_set_obj_static; + qpoints->pGetByteInstance = art_quick_get_byte_instance; + qpoints->pGetBooleanInstance = art_quick_get_boolean_instance; + qpoints->pGetShortInstance = art_quick_get_short_instance; + qpoints->pGetCharInstance = art_quick_get_char_instance; + qpoints->pGet32Instance = art_quick_get32_instance; + qpoints->pGet64Instance = art_quick_get64_instance; + qpoints->pGetObjInstance = art_quick_get_obj_instance; + qpoints->pGetByteStatic = art_quick_get_byte_static; + qpoints->pGetBooleanStatic = art_quick_get_boolean_static; + qpoints->pGetShortStatic = art_quick_get_short_static; + qpoints->pGetCharStatic = art_quick_get_char_static; + qpoints->pGet32Static = art_quick_get32_static; + qpoints->pGet64Static = art_quick_get64_static; + qpoints->pGetObjStatic = art_quick_get_obj_static; + + // Array + qpoints->pAputObjectWithNullAndBoundCheck = art_quick_aput_obj_with_null_and_bound_check; + qpoints->pAputObjectWithBoundCheck = art_quick_aput_obj_with_bound_check; + qpoints->pAputObject = art_quick_aput_obj; + qpoints->pHandleFillArrayData = art_quick_handle_fill_data; + + // JNI + qpoints->pJniMethodStart = JniMethodStart; + qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized; + qpoints->pJniMethodEnd = JniMethodEnd; + qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized; + qpoints->pJniMethodEndWithReference = JniMethodEndWithReference; + qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; + qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline; + + // Locks + if (UNLIKELY(VLOG_IS_ON(systrace_lock_logging))) { + qpoints->pLockObject = art_quick_lock_object_no_inline; + qpoints->pUnlockObject = art_quick_unlock_object_no_inline; + } else { + qpoints->pLockObject = art_quick_lock_object; + qpoints->pUnlockObject = art_quick_unlock_object; + } + + // Invocation + qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline; + qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline; + qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge; + qpoints->pInvokeDirectTrampolineWithAccessCheck = + art_quick_invoke_direct_trampoline_with_access_check; + qpoints->pInvokeInterfaceTrampolineWithAccessCheck = + art_quick_invoke_interface_trampoline_with_access_check; + qpoints->pInvokeStaticTrampolineWithAccessCheck = + art_quick_invoke_static_trampoline_with_access_check; + qpoints->pInvokeSuperTrampolineWithAccessCheck = + art_quick_invoke_super_trampoline_with_access_check; + qpoints->pInvokeVirtualTrampolineWithAccessCheck = + art_quick_invoke_virtual_trampoline_with_access_check; + + // Thread + qpoints->pTestSuspend = art_quick_test_suspend; + + // Throws + qpoints->pDeliverException = art_quick_deliver_exception; + qpoints->pThrowArrayBounds = art_quick_throw_array_bounds; + qpoints->pThrowDivZero = art_quick_throw_div_zero; + qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method; + qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception; + qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow; + + // Deoptimize + qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code; +}; + +} // namespace art + +#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_DEFAULT_INIT_ENTRYPOINTS_H_ diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index da6af724a..e9cdbb743 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -2174,7 +2174,8 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUT imt_index % mirror::Class::kImtSize, sizeof(void*)); if (LIKELY(conflict_method->IsRuntimeMethod())) { ImtConflictTable* current_table = conflict_method->GetImtConflictTable(sizeof(void*)); - method = current_table->Lookup(interface_method); + DCHECK(current_table != nullptr); + method = current_table->Lookup(interface_method, sizeof(void*)); } else { // It seems we aren't really a conflict method! method = cls->FindVirtualMethodForInterface(interface_method, sizeof(void*)); @@ -2225,34 +2226,13 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUT ArtMethod* conflict_method = cls->GetEmbeddedImTableEntry( imt_index % mirror::Class::kImtSize, sizeof(void*)); if (conflict_method->IsRuntimeMethod()) { - ImtConflictTable* current_table = conflict_method->GetImtConflictTable(sizeof(void*)); - Runtime* runtime = Runtime::Current(); - LinearAlloc* linear_alloc = (cls->GetClassLoader() == nullptr) - ? runtime->GetLinearAlloc() - : cls->GetClassLoader()->GetAllocator(); - bool is_new_entry = (conflict_method == runtime->GetImtConflictMethod()); - - // Create a new entry if the existing one is the shared conflict method. - ArtMethod* new_conflict_method = is_new_entry - ? runtime->CreateImtConflictMethod(linear_alloc) - : conflict_method; - - // Allocate a new table. Note that we will leak this table at the next conflict, - // but that's a tradeoff compared to making the table fixed size. - void* data = linear_alloc->Alloc( - self, ImtConflictTable::ComputeSizeWithOneMoreEntry(current_table)); - CHECK(data != nullptr) << "Out of memory"; - ImtConflictTable* new_table = new (data) ImtConflictTable( - current_table, interface_method, method); - - // Do a fence to ensure threads see the data in the table before it is assigned - // to the conlict method. - // Note that there is a race in the presence of multiple threads and we may leak - // memory from the LinearAlloc, but that's a tradeoff compared to using - // atomic operations. - QuasiAtomic::ThreadFenceRelease(); - new_conflict_method->SetImtConflictTable(new_table); - if (is_new_entry) { + ArtMethod* new_conflict_method = Runtime::Current()->GetClassLinker()->AddMethodToConflictTable( + cls.Get(), + conflict_method, + interface_method, + method, + /*force_new_conflict_method*/false); + if (new_conflict_method != conflict_method) { // Update the IMT if we create a new conflict method. No fence needed here, as the // data is consistent. cls->SetEmbeddedImTableEntry(imt_index % mirror::Class::kImtSize, diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index c2f772f87..cdd5f2e12 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -119,6 +119,8 @@ static constexpr uint32_t kAllocSpaceBeginForDeterministicAoT = 0x40000000; // Dump the rosalloc stats on SIGQUIT. static constexpr bool kDumpRosAllocStatsOnSigQuit = false; +static constexpr size_t kNativeAllocationHistogramBuckets = 16; + static inline bool CareAboutPauseTimes() { return Runtime::Current()->InJankPerceptibleProcessState(); } @@ -186,6 +188,11 @@ Heap::Heap(size_t initial_size, total_objects_freed_ever_(0), num_bytes_allocated_(0), native_bytes_allocated_(0), + native_histogram_lock_("Native allocation lock"), + native_allocation_histogram_("Native allocation sizes", + 1U, + kNativeAllocationHistogramBuckets), + native_free_histogram_("Native free sizes", 1U, kNativeAllocationHistogramBuckets), num_bytes_freed_revoke_(0), verify_missing_card_marks_(false), verify_system_weaks_(false), @@ -304,7 +311,7 @@ Heap::Heap(size_t initial_size, const OatHeader& boot_oat_header = boot_oat_file->GetOatHeader(); const char* boot_classpath = - boot_oat_header.GetStoreValueByKey(OatHeader::kBootClassPath); + boot_oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey); if (boot_classpath == nullptr) { continue; } @@ -1185,6 +1192,20 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) { rosalloc_space_->DumpStats(os); } + { + MutexLock mu(Thread::Current(), native_histogram_lock_); + if (native_allocation_histogram_.SampleSize() > 0u) { + os << "Histogram of native allocation "; + native_allocation_histogram_.DumpBins(os); + os << " bucket size " << native_allocation_histogram_.BucketWidth() << "\n"; + } + if (native_free_histogram_.SampleSize() > 0u) { + os << "Histogram of native free "; + native_free_histogram_.DumpBins(os); + os << " bucket size " << native_free_histogram_.BucketWidth() << "\n"; + } + } + BaseMutex::DumpAll(os); } @@ -2687,8 +2708,8 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, concurrent_start_bytes_ = std::numeric_limits::max(); } - if ((gc_type == collector::kGcTypeFull) && runtime->UseJit()) { - // It's time to clear all inline caches, in case some classes can be unloaded. + // It's time to clear all inline caches, in case some classes can be unloaded. + if ((gc_type == collector::kGcTypeFull) && (runtime->GetJit() != nullptr)) { runtime->GetJit()->GetCodeCache()->ClearGcRootsInInlineCaches(self); } @@ -3848,6 +3869,10 @@ void Heap::RunFinalization(JNIEnv* env, uint64_t timeout) { void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) { Thread* self = ThreadForEnv(env); + { + MutexLock mu(self, native_histogram_lock_); + native_allocation_histogram_.AddValue(bytes); + } if (native_need_to_run_finalization_) { RunFinalization(env, kNativeAllocationFinalizeTimeout); UpdateMaxNativeFootprint(); @@ -3892,6 +3917,10 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) { void Heap::RegisterNativeFree(JNIEnv* env, size_t bytes) { size_t expected_size; + { + MutexLock mu(Thread::Current(), native_histogram_lock_); + native_free_histogram_.AddValue(bytes); + } do { expected_size = native_bytes_allocated_.LoadRelaxed(); if (UNLIKELY(bytes > expected_size)) { diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index fada1a221..2a1a4a17a 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -241,9 +241,9 @@ class Heap { SHARED_REQUIRES(Locks::mutator_lock_); void RegisterNativeAllocation(JNIEnv* env, size_t bytes) - REQUIRES(!*gc_complete_lock_, !*pending_task_lock_); + REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !native_histogram_lock_); void RegisterNativeFree(JNIEnv* env, size_t bytes) - REQUIRES(!*gc_complete_lock_, !*pending_task_lock_); + REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !native_histogram_lock_); // Change the allocator, updates entrypoints. void ChangeAllocator(AllocatorType allocator) @@ -532,7 +532,7 @@ class Heap { space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const SHARED_REQUIRES(Locks::mutator_lock_); - void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_); + void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_, !native_histogram_lock_); // Do a pending collector transition. void DoPendingCollectorTransition() REQUIRES(!*gc_complete_lock_); @@ -654,7 +654,8 @@ class Heap { std::string SafePrettyTypeOf(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS; // GC performance measuring - void DumpGcPerformanceInfo(std::ostream& os) REQUIRES(!*gc_complete_lock_); + void DumpGcPerformanceInfo(std::ostream& os) + REQUIRES(!*gc_complete_lock_, !native_histogram_lock_); void ResetGcPerformanceInfo() REQUIRES(!*gc_complete_lock_); // Thread pool. @@ -1156,6 +1157,11 @@ class Heap { // Bytes which are allocated and managed by native code but still need to be accounted for. Atomic native_bytes_allocated_; + // Native allocation stats. + Mutex native_histogram_lock_; + Histogram native_allocation_histogram_; + Histogram native_free_histogram_; + // Number of bytes freed by thread local buffer revokes. This will // cancel out the ahead-of-time bulk counting of bytes allocated in // rosalloc thread-local buffers. It is temporarily accumulated diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc index d386c7435..78c570fa9 100644 --- a/runtime/gc/space/image_space.cc +++ b/runtime/gc/space/image_space.cc @@ -880,7 +880,7 @@ class FixupObjectVisitor : public FixupVisitor { class ForwardObjectAdapter { public: - ALWAYS_INLINE ForwardObjectAdapter(const FixupVisitor* visitor) : visitor_(visitor) {} + ALWAYS_INLINE explicit ForwardObjectAdapter(const FixupVisitor* visitor) : visitor_(visitor) {} template ALWAYS_INLINE T* operator()(T* src) const { @@ -893,7 +893,7 @@ class ForwardObjectAdapter { class ForwardCodeAdapter { public: - ALWAYS_INLINE ForwardCodeAdapter(const FixupVisitor* visitor) + ALWAYS_INLINE explicit ForwardCodeAdapter(const FixupVisitor* visitor) : visitor_(visitor) {} template @@ -914,10 +914,26 @@ class FixupArtMethodVisitor : public FixupVisitor, public ArtMethodVisitor { pointer_size_(pointer_size) {} virtual void Visit(ArtMethod* method) NO_THREAD_SAFETY_ANALYSIS { - if (fixup_heap_objects_) { - method->UpdateObjectsForImageRelocation(ForwardObjectAdapter(this), pointer_size_); + // TODO: Separate visitor for runtime vs normal methods. + if (UNLIKELY(method->IsRuntimeMethod())) { + ImtConflictTable* table = method->GetImtConflictTable(pointer_size_); + if (table != nullptr) { + ImtConflictTable* new_table = ForwardObject(table); + if (table != new_table) { + method->SetImtConflictTable(new_table, pointer_size_); + } + } + const void* old_code = method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size_); + const void* new_code = ForwardCode(old_code); + if (old_code != new_code) { + method->SetEntryPointFromQuickCompiledCodePtrSize(new_code, pointer_size_); + } + } else { + if (fixup_heap_objects_) { + method->UpdateObjectsForImageRelocation(ForwardObjectAdapter(this), pointer_size_); + } + method->UpdateEntrypoints(ForwardCodeAdapter(this), pointer_size_); } - method->UpdateEntrypoints(ForwardCodeAdapter(this), pointer_size_); } private: @@ -1018,6 +1034,7 @@ static bool RelocateInPlace(ImageHeader& image_header, const ImageSection& objects_section = image_header.GetImageSection(ImageHeader::kSectionObjects); uintptr_t objects_begin = reinterpret_cast(target_base + objects_section.Offset()); uintptr_t objects_end = reinterpret_cast(target_base + objects_section.End()); + FixupObjectAdapter fixup_adapter(boot_image, boot_oat, app_image, app_oat); if (fixup_image) { // Two pass approach, fix up all classes first, then fix up non class-objects. // The visited bitmap is used to ensure that pointer arrays are not forwarded twice. @@ -1037,7 +1054,6 @@ static bool RelocateInPlace(ImageHeader& image_header, ScopedObjectAccess soa(Thread::Current()); timing.NewTiming("Fixup objects"); bitmap->VisitMarkedRange(objects_begin, objects_end, fixup_object_visitor); - FixupObjectAdapter fixup_adapter(boot_image, boot_oat, app_image, app_oat); // Fixup image roots. CHECK(app_image.InSource(reinterpret_cast( image_header.GetImageRoots()))); @@ -1104,19 +1120,18 @@ static bool RelocateInPlace(ImageHeader& image_header, boot_oat, app_image, app_oat); - image_header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods( - &method_visitor, - target_base, - pointer_size); + image_header.VisitPackedArtMethods(&method_visitor, target_base, pointer_size); } if (fixup_image) { { // Only touches objects in the app image, no need for mutator lock. TimingLogger::ScopedTiming timing("Fixup fields", &logger); FixupArtFieldVisitor field_visitor(boot_image, boot_oat, app_image, app_oat); - image_header.GetImageSection(ImageHeader::kSectionArtFields).VisitPackedArtFields( - &field_visitor, - target_base); + image_header.VisitPackedArtFields(&field_visitor, target_base); + } + { + TimingLogger::ScopedTiming timing("Fixup conflict tables", &logger); + image_header.VisitPackedImtConflictTables(fixup_adapter, target_base, pointer_size); } // In the app image case, the image methods are actually in the boot image. image_header.RelocateImageMethods(boot_image.Delta()); diff --git a/runtime/globals.h b/runtime/globals.h index e7ea6f378..477cbdf5d 100644 --- a/runtime/globals.h +++ b/runtime/globals.h @@ -51,11 +51,31 @@ static constexpr bool kIsDebugBuild = false; static constexpr bool kIsDebugBuild = true; #endif -// Whether or not this is a target (vs host) build. Useful in conditionals where ART_TARGET isn't. +// ART_TARGET - Defined for target builds of ART. +// ART_TARGET_LINUX - Defined for target Linux builds of ART. +// ART_TARGET_ANDROID - Defined for target Android builds of ART. +// Note: Either ART_TARGET_LINUX or ART_TARGET_ANDROID need to be set when ART_TARGET is set. +// Note: When ART_TARGET_LINUX is defined mem_map.h will not be using Ashmem for memory mappings +// (usually only available on Android kernels). #if defined(ART_TARGET) +// Useful in conditionals where ART_TARGET isn't. static constexpr bool kIsTargetBuild = true; +#if defined(ART_TARGET_LINUX) +static constexpr bool kIsTargetLinux = true; +#elif defined(ART_TARGET_ANDROID) +static constexpr bool kIsTargetLinux = false; +#else +#error "Either ART_TARGET_LINUX or ART_TARGET_ANDROID needs to be defined for target builds." +#endif #else static constexpr bool kIsTargetBuild = false; +#if defined(ART_TARGET_LINUX) +#error "ART_TARGET_LINUX defined for host build." +#elif defined(ART_TARGET_ANDROID) +#error "ART_TARGET_ANDROID defined for host build." +#else +static constexpr bool kIsTargetLinux = false; +#endif #endif // Garbage collector constants. diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc index 3885c605b..989539516 100644 --- a/runtime/hprof/hprof.cc +++ b/runtime/hprof/hprof.cc @@ -505,6 +505,7 @@ class Hprof : public SingleRootVisitor { // Walk the roots and the heap. output_->StartNewRecord(HPROF_TAG_HEAP_DUMP_SEGMENT, kHprofTime); + simple_roots_.clear(); runtime->VisitRoots(this); runtime->VisitImageRoots(this); runtime->GetHeap()->VisitObjectsPaused(VisitObjectCallback, this); @@ -884,6 +885,14 @@ class Hprof : public SingleRootVisitor { gc::EqAllocRecordTypesPtr> frames_; std::unordered_map allocation_records_; + // Set used to keep track of what simple root records we have already + // emitted, to avoid emitting duplicate entries. The simple root records are + // those that contain no other information than the root type and the object + // id. A pair of root type and object id is packed into a uint64_t, with + // the root type in the upper 32 bits and the object id in the lower 32 + // bits. + std::unordered_set simple_roots_; + friend class GcRootVisitor; DISALLOW_COPY_AND_ASSIGN(Hprof); }; @@ -962,10 +971,14 @@ void Hprof::MarkRootObject(const mirror::Object* obj, jobject jni_obj, HprofHeap case HPROF_ROOT_MONITOR_USED: case HPROF_ROOT_INTERNED_STRING: case HPROF_ROOT_DEBUGGER: - case HPROF_ROOT_VM_INTERNAL: - __ AddU1(heap_tag); - __ AddObjectId(obj); + case HPROF_ROOT_VM_INTERNAL: { + uint64_t key = (static_cast(heap_tag) << 32) | PointerToLowMemUInt32(obj); + if (simple_roots_.insert(key).second) { + __ AddU1(heap_tag); + __ AddObjectId(obj); + } break; + } // ID: object ID // ID: JNI global ref ID diff --git a/runtime/image-inl.h b/runtime/image-inl.h index e3307d87b..ea75a622c 100644 --- a/runtime/image-inl.h +++ b/runtime/image-inl.h @@ -19,6 +19,8 @@ #include "image.h" +#include "art_method.h" + namespace art { template @@ -42,6 +44,20 @@ inline mirror::ObjectArray* ImageHeader::GetImageRoots() const { return image_roots; } +template +inline void ImageHeader::VisitPackedImtConflictTables(const Visitor& visitor, + uint8_t* base, + size_t pointer_size) const { + const ImageSection& section = GetImageSection(kSectionIMTConflictTables); + for (size_t pos = 0; pos < section.Size(); ) { + auto* table = reinterpret_cast(base + section.Offset() + pos); + table->Visit([&visitor](const std::pair& methods) { + return std::make_pair(visitor(methods.first), visitor(methods.second)); + }, pointer_size); + pos += table->ComputeSize(pointer_size); + } +} + } // namespace art #endif // ART_RUNTIME_IMAGE_INL_H_ diff --git a/runtime/image.cc b/runtime/image.cc index 1f54e3e6a..a9552c27d 100644 --- a/runtime/image.cc +++ b/runtime/image.cc @@ -24,7 +24,7 @@ namespace art { const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' }; -const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '7', '\0' }; +const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '9', '\0' }; ImageHeader::ImageHeader(uint32_t image_begin, uint32_t image_size, @@ -147,9 +147,10 @@ std::ostream& operator<<(std::ostream& os, const ImageSection& section) { return os << "size=" << section.Size() << " range=" << section.Offset() << "-" << section.End(); } -void ImageSection::VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const { - for (size_t pos = 0; pos < Size(); ) { - auto* array = reinterpret_cast*>(base + Offset() + pos); +void ImageHeader::VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const { + const ImageSection& fields = GetFieldsSection(); + for (size_t pos = 0; pos < fields.Size(); ) { + auto* array = reinterpret_cast*>(base + fields.Offset() + pos); for (size_t i = 0; i < array->size(); ++i) { visitor->Visit(&array->At(i, sizeof(ArtField))); } @@ -157,18 +158,25 @@ void ImageSection::VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) } } -void ImageSection::VisitPackedArtMethods(ArtMethodVisitor* visitor, - uint8_t* base, - size_t pointer_size) const { +void ImageHeader::VisitPackedArtMethods(ArtMethodVisitor* visitor, + uint8_t* base, + size_t pointer_size) const { const size_t method_alignment = ArtMethod::Alignment(pointer_size); const size_t method_size = ArtMethod::Size(pointer_size); - for (size_t pos = 0; pos < Size(); ) { - auto* array = reinterpret_cast*>(base + Offset() + pos); + const ImageSection& methods = GetMethodsSection(); + for (size_t pos = 0; pos < methods.Size(); ) { + auto* array = reinterpret_cast*>(base + methods.Offset() + pos); for (size_t i = 0; i < array->size(); ++i) { visitor->Visit(&array->At(i, method_size, method_alignment)); } pos += array->ComputeSize(array->size(), method_size, method_alignment); } + const ImageSection& runtime_methods = GetRuntimeMethodsSection(); + for (size_t pos = 0; pos < runtime_methods.Size(); ) { + auto* method = reinterpret_cast(base + runtime_methods.Offset() + pos); + visitor->Visit(method); + pos += method_size; + } } } // namespace art diff --git a/runtime/image.h b/runtime/image.h index 8e5dbad57..2ea9af772 100644 --- a/runtime/image.h +++ b/runtime/image.h @@ -64,12 +64,6 @@ class PACKED(4) ImageSection { return offset - offset_ < size_; } - // Visit ArtMethods in the section starting at base. - void VisitPackedArtMethods(ArtMethodVisitor* visitor, uint8_t* base, size_t pointer_size) const; - - // Visit ArtMethods in the section starting at base. - void VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const; - private: uint32_t offset_; uint32_t size_; @@ -200,6 +194,8 @@ class PACKED(4) ImageHeader { kSectionObjects, kSectionArtFields, kSectionArtMethods, + kSectionRuntimeMethods, + kSectionIMTConflictTables, kSectionDexCacheArrays, kSectionInternedStrings, kSectionClassTable, @@ -211,10 +207,19 @@ class PACKED(4) ImageHeader { void SetImageMethod(ImageMethod index, ArtMethod* method); const ImageSection& GetImageSection(ImageSections index) const; + const ImageSection& GetMethodsSection() const { return GetImageSection(kSectionArtMethods); } + const ImageSection& GetRuntimeMethodsSection() const { + return GetImageSection(kSectionRuntimeMethods); + } + + const ImageSection& GetFieldsSection() const { + return GetImageSection(ImageHeader::kSectionArtFields); + } + template mirror::Object* GetImageRoot(ImageRoot image_root) const SHARED_REQUIRES(Locks::mutator_lock_); @@ -265,6 +270,19 @@ class PACKED(4) ImageHeader { return boot_image_size_ != 0u; } + // Visit ArtMethods in the section starting at base. Includes runtime methods. + // TODO: Delete base parameter if it is always equal to GetImageBegin. + void VisitPackedArtMethods(ArtMethodVisitor* visitor, uint8_t* base, size_t pointer_size) const; + + // Visit ArtMethods in the section starting at base. + // TODO: Delete base parameter if it is always equal to GetImageBegin. + void VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const; + + template + void VisitPackedImtConflictTables(const Visitor& visitor, + uint8_t* base, + size_t pointer_size) const; + private: static const uint8_t kImageMagic[4]; static const uint8_t kImageVersion[4]; diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc index 34bc45857..61119f849 100644 --- a/runtime/instrumentation.cc +++ b/runtime/instrumentation.cc @@ -687,8 +687,7 @@ void Instrumentation::ResetQuickAllocEntryPoints() { } } -void Instrumentation::UpdateMethodsCode(ArtMethod* method, const void* quick_code) { - DCHECK(method->GetDeclaringClass()->IsResolved()); +void Instrumentation::UpdateMethodsCodeImpl(ArtMethod* method, const void* quick_code) { const void* new_quick_code; if (LIKELY(!instrumentation_stubs_installed_)) { new_quick_code = quick_code; @@ -710,6 +709,18 @@ void Instrumentation::UpdateMethodsCode(ArtMethod* method, const void* quick_cod UpdateEntrypoints(method, new_quick_code); } +void Instrumentation::UpdateMethodsCode(ArtMethod* method, const void* quick_code) { + DCHECK(method->GetDeclaringClass()->IsResolved()); + UpdateMethodsCodeImpl(method, quick_code); +} + +void Instrumentation::UpdateMethodsCodeFromDebugger(ArtMethod* method, const void* quick_code) { + // When debugger attaches, we may update the entry points of all methods of a class + // to the interpreter bridge. A method's declaring class might not be in resolved + // state yet in that case. + UpdateMethodsCodeImpl(method, quick_code); +} + bool Instrumentation::AddDeoptimizedMethod(ArtMethod* method) { if (IsDeoptimizedMethod(method)) { // Already in the map. Return. diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h index a4c3d4153..ce6ead453 100644 --- a/runtime/instrumentation.h +++ b/runtime/instrumentation.h @@ -227,6 +227,10 @@ class Instrumentation { void UpdateMethodsCode(ArtMethod* method, const void* quick_code) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_); + // Update the code of a method respecting any installed stubs from debugger. + void UpdateMethodsCodeFromDebugger(ArtMethod* method, const void* quick_code) + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_); + // Get the quick code for the given method. More efficient than asking the class linker as it // will short-cut to GetCode if instrumentation and static method resolution stubs aren't // installed. @@ -493,6 +497,9 @@ class Instrumentation { SHARED_REQUIRES(Locks::mutator_lock_, deoptimized_methods_lock_); bool IsDeoptimizedMethodsEmpty() const SHARED_REQUIRES(Locks::mutator_lock_, deoptimized_methods_lock_); + void UpdateMethodsCodeImpl(ArtMethod* method, const void* quick_code) + SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_); + // Have we hijacked ArtMethod::code_ so that it calls instrumentation/interpreter code? bool instrumentation_stubs_installed_; diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc index 97dbe5d21..1d0e600e4 100644 --- a/runtime/interpreter/interpreter.cc +++ b/runtime/interpreter/interpreter.cc @@ -264,12 +264,12 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_ ShadowFrame& shadow_frame, JValue result_register); #endif -static JValue Execute(Thread* self, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, - JValue result_register) - SHARED_REQUIRES(Locks::mutator_lock_); - -static inline JValue Execute(Thread* self, const DexFile::CodeItem* code_item, - ShadowFrame& shadow_frame, JValue result_register) { +static inline JValue Execute( + Thread* self, + const DexFile::CodeItem* code_item, + ShadowFrame& shadow_frame, + JValue result_register, + bool stay_in_interpreter = false) SHARED_REQUIRES(Locks::mutator_lock_) { DCHECK(!shadow_frame.GetMethod()->IsAbstract()); DCHECK(!shadow_frame.GetMethod()->IsNative()); if (LIKELY(shadow_frame.GetDexPC() == 0)) { // Entering the method, but not via deoptimization. @@ -284,27 +284,34 @@ static inline JValue Execute(Thread* self, const DexFile::CodeItem* code_item, method, 0); } - jit::Jit* jit = Runtime::Current()->GetJit(); - if (jit != nullptr) { - jit->MethodEntered(self, shadow_frame.GetMethod()); - if (jit->CanInvokeCompiledCode(method)) { - JValue result; + if (!stay_in_interpreter) { + jit::Jit* jit = Runtime::Current()->GetJit(); + if (jit != nullptr) { + jit->MethodEntered(self, shadow_frame.GetMethod()); + if (jit->CanInvokeCompiledCode(method)) { + JValue result; - // Pop the shadow frame before calling into compiled code. - self->PopShadowFrame(); - ArtInterpreterToCompiledCodeBridge(self, nullptr, code_item, &shadow_frame, &result); - // Push the shadow frame back as the caller will expect it. - self->PushShadowFrame(&shadow_frame); + // Pop the shadow frame before calling into compiled code. + self->PopShadowFrame(); + ArtInterpreterToCompiledCodeBridge(self, nullptr, code_item, &shadow_frame, &result); + // Push the shadow frame back as the caller will expect it. + self->PushShadowFrame(&shadow_frame); - return result; + return result; + } } } } shadow_frame.GetMethod()->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self); + // Lock counting is a special version of accessibility checks, and for simplicity and + // reduction of template parameters, we gate it behind access-checks mode. + ArtMethod* method = shadow_frame.GetMethod(); + DCHECK(!method->SkipAccessChecks() || !method->MustCountLocks()); + bool transaction_active = Runtime::Current()->IsActiveTransaction(); - if (LIKELY(shadow_frame.GetMethod()->SkipAccessChecks())) { + if (LIKELY(method->SkipAccessChecks())) { // Enter the "without access check" interpreter. if (kInterpreterImplKind == kMterpImplKind) { if (transaction_active) { @@ -382,7 +389,8 @@ static inline JValue Execute(Thread* self, const DexFile::CodeItem* code_item, } void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receiver, - uint32_t* args, JValue* result) { + uint32_t* args, JValue* result, + bool stay_in_interpreter) { DCHECK_EQ(self, Thread::Current()); bool implicit_check = !Runtime::Current()->ExplicitStackOverflowChecks(); if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEndForInterpreter(implicit_check))) { @@ -457,7 +465,7 @@ void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receive } } if (LIKELY(!method->IsNative())) { - JValue r = Execute(self, code_item, *shadow_frame, JValue()); + JValue r = Execute(self, code_item, *shadow_frame, JValue(), stay_in_interpreter); if (result != nullptr) { *result = r; } @@ -487,6 +495,10 @@ void EnterInterpreterFromDeoptimize(Thread* self, // Are we executing the first shadow frame? bool first = true; while (shadow_frame != nullptr) { + // We do not want to recover lock state for lock counting when deoptimizing. Currently, + // the compiler should not have compiled a method that failed structured-locking checks. + DCHECK(!shadow_frame->GetMethod()->MustCountLocks()); + self->SetTopOfShadowStack(shadow_frame); const DexFile::CodeItem* code_item = shadow_frame->GetMethod()->GetCodeItem(); const uint32_t dex_pc = shadow_frame->GetDexPC(); @@ -506,8 +518,24 @@ void EnterInterpreterFromDeoptimize(Thread* self, // instruction, as it already executed. // TODO: should be tested more once b/17586779 is fixed. const Instruction* instr = Instruction::At(&code_item->insns_[dex_pc]); - DCHECK(instr->IsInvoke()); - new_dex_pc = dex_pc + instr->SizeInCodeUnits(); + if (instr->IsInvoke()) { + new_dex_pc = dex_pc + instr->SizeInCodeUnits(); + } else if (instr->Opcode() == Instruction::NEW_INSTANCE) { + // It's possible to deoptimize at a NEW_INSTANCE dex instruciton that's for a + // java string, which is turned into a call into StringFactory.newEmptyString(); + if (kIsDebugBuild) { + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + mirror::Class* klass = class_linker->ResolveType( + instr->VRegB_21c(), shadow_frame->GetMethod()); + DCHECK(klass->IsStringClass()); + } + // Skip the dex instruction since we essentially come back from an invocation. + new_dex_pc = dex_pc + instr->SizeInCodeUnits(); + } else { + DCHECK(false) << "Unexpected instruction opcode " << instr->Opcode() + << " at dex_pc " << dex_pc + << " of method: " << PrettyMethod(shadow_frame->GetMethod(), false); + } } else { // Nothing to do, the dex_pc is the one at which the code requested // the deoptimization. diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h index 6353a9b7b..bf4bcff85 100644 --- a/runtime/interpreter/interpreter.h +++ b/runtime/interpreter/interpreter.h @@ -33,8 +33,11 @@ class Thread; namespace interpreter { // Called by ArtMethod::Invoke, shadow frames arguments are taken from the args array. +// The optional stay_in_interpreter parameter (false by default) can be used by clients to +// explicitly force interpretation in the remaining path that implements method invocation. extern void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, - mirror::Object* receiver, uint32_t* args, JValue* result) + mirror::Object* receiver, uint32_t* args, JValue* result, + bool stay_in_interpreter = false) SHARED_REQUIRES(Locks::mutator_lock_); // 'from_code' denotes whether the deoptimization was explicitly triggered by compiled code. diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h index e5b89e2f9..69376fd7a 100644 --- a/runtime/interpreter/interpreter_common.h +++ b/runtime/interpreter/interpreter_common.h @@ -95,7 +95,9 @@ static inline void DoMonitorEnter(Thread* self, StackHandleScope<1> hs(self); Handle h_ref(hs.NewHandle(ref)); h_ref->MonitorEnter(self); - frame->GetLockCountData().AddMonitor(self, h_ref.Get()); + if (kMonitorCounting && frame->GetMethod()->MustCountLocks()) { + frame->GetLockCountData().AddMonitor(self, h_ref.Get()); + } } template @@ -107,7 +109,19 @@ static inline void DoMonitorExit(Thread* self, StackHandleScope<1> hs(self); Handle h_ref(hs.NewHandle(ref)); h_ref->MonitorExit(self); - frame->GetLockCountData().RemoveMonitorOrThrow(self, h_ref.Get()); + if (kMonitorCounting && frame->GetMethod()->MustCountLocks()) { + frame->GetLockCountData().RemoveMonitorOrThrow(self, h_ref.Get()); + } +} + +template +static inline bool DoMonitorCheckOnExit(Thread* self, ShadowFrame* frame) + NO_THREAD_SAFETY_ANALYSIS + REQUIRES(!Roles::uninterruptible_) { + if (kMonitorCounting && frame->GetMethod()->MustCountLocks()) { + return frame->GetLockCountData().CheckAllMonitorsReleasedOrThrow(self); + } + return true; } void AbortTransactionF(Thread* self, const char* fmt, ...) diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc index 13cfb9877..f03036b6a 100644 --- a/runtime/interpreter/interpreter_goto_table_impl.cc +++ b/runtime/interpreter/interpreter_goto_table_impl.cc @@ -104,8 +104,7 @@ namespace interpreter { } HANDLE_INSTRUCTION_END(); #define HANDLE_MONITOR_CHECKS() \ - if (!shadow_frame.GetLockCountData(). \ - CheckAllMonitorsReleasedOrThrow(self)) { \ + if (!DoMonitorCheckOnExit(self, &shadow_frame)) { \ HANDLE_PENDING_EXCEPTION(); \ } @@ -2584,7 +2583,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF instrumentation); if (found_dex_pc == DexFile::kDexNoIndex) { // Structured locking is to be enforced for abnormal termination, too. - shadow_frame.GetLockCountData().CheckAllMonitorsReleasedOrThrow(self); + DoMonitorCheckOnExit(self, &shadow_frame); return JValue(); /* Handled in caller. */ } else { int32_t displacement = static_cast(found_dex_pc) - static_cast(dex_pc); diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc index 4323d4f42..18330babe 100644 --- a/runtime/interpreter/interpreter_switch_impl.cc +++ b/runtime/interpreter/interpreter_switch_impl.cc @@ -34,8 +34,7 @@ namespace interpreter { instrumentation); \ if (found_dex_pc == DexFile::kDexNoIndex) { \ /* Structured locking is to be enforced for abnormal termination, too. */ \ - shadow_frame.GetLockCountData(). \ - CheckAllMonitorsReleasedOrThrow(self); \ + DoMonitorCheckOnExit(self, &shadow_frame); \ if (interpret_one_instruction) { \ /* Signal mterp to return to caller */ \ shadow_frame.SetDexPC(DexFile::kDexNoIndex); \ @@ -57,8 +56,7 @@ namespace interpreter { } while (false) #define HANDLE_MONITOR_CHECKS() \ - if (!shadow_frame.GetLockCountData(). \ - CheckAllMonitorsReleasedOrThrow(self)) { \ + if (!DoMonitorCheckOnExit(self, &shadow_frame)) { \ HANDLE_PENDING_EXCEPTION(); \ } diff --git a/runtime/interpreter/mterp/out/mterp_x86.S b/runtime/interpreter/mterp/out/mterp_x86.S index e46f9cde9..f78e1bc41 100644 --- a/runtime/interpreter/mterp/out/mterp_x86.S +++ b/runtime/interpreter/mterp/out/mterp_x86.S @@ -12985,6 +12985,7 @@ MterpCommonTakenBranch: * not-taken path. All Dalvik not-taken conditional branch offsets are 2. */ .L_check_not_taken_osr: + EXPORT_PC movl rSELF, %eax movl %eax, OUT_ARG0(%esp) leal OFF_FP_SHADOWFRAME(rFP), %ecx diff --git a/runtime/interpreter/mterp/out/mterp_x86_64.S b/runtime/interpreter/mterp/out/mterp_x86_64.S index 62dce6e77..031cec823 100644 --- a/runtime/interpreter/mterp/out/mterp_x86_64.S +++ b/runtime/interpreter/mterp/out/mterp_x86_64.S @@ -11961,6 +11961,7 @@ MterpCommonTakenBranch: * not-taken path. All Dalvik not-taken conditional branch offsets are 2. */ .L_check_not_taken_osr: + EXPORT_PC movq rSELF, OUT_ARG0 leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1 movl $2, OUT_32_ARG2 diff --git a/runtime/interpreter/mterp/x86/footer.S b/runtime/interpreter/mterp/x86/footer.S index fa03e78fe..e8c8ca8d7 100644 --- a/runtime/interpreter/mterp/x86/footer.S +++ b/runtime/interpreter/mterp/x86/footer.S @@ -234,6 +234,7 @@ MterpCommonTakenBranch: * not-taken path. All Dalvik not-taken conditional branch offsets are 2. */ .L_check_not_taken_osr: + EXPORT_PC movl rSELF, %eax movl %eax, OUT_ARG0(%esp) leal OFF_FP_SHADOWFRAME(rFP), %ecx diff --git a/runtime/interpreter/mterp/x86_64/footer.S b/runtime/interpreter/mterp/x86_64/footer.S index 54d0cb1ce..f78f16357 100644 --- a/runtime/interpreter/mterp/x86_64/footer.S +++ b/runtime/interpreter/mterp/x86_64/footer.S @@ -213,6 +213,7 @@ MterpCommonTakenBranch: * not-taken path. All Dalvik not-taken conditional branch offsets are 2. */ .L_check_not_taken_osr: + EXPORT_PC movq rSELF, OUT_ARG0 leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1 movl $$2, OUT_32_ARG2 diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc index a41fd4504..d983a9fa1 100644 --- a/runtime/java_vm_ext.cc +++ b/runtime/java_vm_ext.cc @@ -318,6 +318,7 @@ class JII { } JavaVMExt* raw_vm = reinterpret_cast(vm); delete raw_vm->GetRuntime(); + android::ResetNativeLoader(); return JNI_OK; } @@ -942,6 +943,11 @@ extern "C" jint JNI_CreateJavaVM(JavaVM** p_vm, JNIEnv** p_env, void* vm_args) { if (!Runtime::Create(options, ignore_unrecognized)) { return JNI_ERR; } + + // Initialize native loader. This step makes sure we have + // everything set up before we start using JNI. + android::InitializeNativeLoader(); + Runtime* runtime = Runtime::Current(); bool started = runtime->Start(); if (!started) { @@ -950,6 +956,7 @@ extern "C" jint JNI_CreateJavaVM(JavaVM** p_vm, JNIEnv** p_env, void* vm_args) { LOG(WARNING) << "CreateJavaVM failed"; return JNI_ERR; } + *p_env = Thread::Current()->GetJniEnv(); *p_vm = runtime->GetJavaVM(); return JNI_OK; diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc index c36543f1f..b6b7eb184 100644 --- a/runtime/jit/jit.cc +++ b/runtime/jit/jit.cc @@ -51,7 +51,7 @@ bool Jit::generate_debug_info_ = false; JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& options) { auto* jit_options = new JitOptions; - jit_options->use_jit_ = options.GetOrDefault(RuntimeArgumentMap::UseJIT); + jit_options->use_jit_compilation_ = options.GetOrDefault(RuntimeArgumentMap::UseJitCompilation); jit_options->code_cache_initial_capacity_ = options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheInitialCapacity); @@ -102,14 +102,26 @@ JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& opt static_cast(1)); } + if (options.Exists(RuntimeArgumentMap::JITInvokeTransitionWeight)) { + jit_options->invoke_transition_weight_ = + *options.Get(RuntimeArgumentMap::JITInvokeTransitionWeight); + if (jit_options->invoke_transition_weight_ > jit_options->warmup_threshold_) { + LOG(FATAL) << "Invoke transition weight is above the warmup threshold."; + } else if (jit_options->invoke_transition_weight_ == 0) { + LOG(FATAL) << "Invoke transition weight cannot be 0."; + } + } else { + jit_options->invoke_transition_weight_ = std::max( + jit_options->warmup_threshold_ / Jit::kDefaultInvokeTransitionWeightRatio, + static_cast(1));; + } + return jit_options; } bool Jit::ShouldUsePriorityThreadWeight() { - // TODO(calin): verify that IsSensitiveThread covers only the cases we are interested on. - // In particular if apps can set StrictMode policies for any of their threads, case in which - // we need to find another way to track sensitive threads. - return Runtime::Current()->InJankPerceptibleProcessState() && Thread::IsSensitiveThread(); + return Runtime::Current()->InJankPerceptibleProcessState() + && Thread::Current()->IsJitSensitiveThread(); } void Jit::DumpInfo(std::ostream& os) { @@ -132,9 +144,11 @@ Jit::Jit() : dump_info_on_shutdown_(false), cumulative_timings_("JIT timings"), memory_use_("Memory used for compilation", 16), lock_("JIT memory use lock"), + use_jit_compilation_(true), save_profiling_info_(false) {} Jit* Jit::Create(JitOptions* options, std::string* error_msg) { + DCHECK(options->UseJitCompilation() || options->GetSaveProfilingInfo()); std::unique_ptr jit(new Jit); jit->dump_info_on_shutdown_ = options->DumpJitInfoOnShutdown(); if (jit_compiler_handle_ == nullptr && !LoadCompiler(error_msg)) { @@ -148,6 +162,7 @@ Jit* Jit::Create(JitOptions* options, std::string* error_msg) { if (jit->GetCodeCache() == nullptr) { return nullptr; } + jit->use_jit_compilation_ = options->UseJitCompilation(); jit->save_profiling_info_ = options->GetSaveProfilingInfo(); VLOG(jit) << "JIT created with initial_capacity=" << PrettySize(options->GetCodeCacheInitialCapacity()) @@ -160,8 +175,7 @@ Jit* Jit::Create(JitOptions* options, std::string* error_msg) { jit->warm_method_threshold_ = options->GetWarmupThreshold(); jit->osr_method_threshold_ = options->GetOsrThreshold(); jit->priority_thread_weight_ = options->GetPriorityThreadWeight(); - jit->transition_weight_ = std::max( - jit->warm_method_threshold_ / kDefaultTransitionRatio, static_cast(1)); + jit->invoke_transition_weight_ = options->GetInvokeTransitionWeight(); jit->CreateThreadPool(); @@ -227,6 +241,7 @@ bool Jit::LoadCompiler(std::string* error_msg) { } bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool osr) { + DCHECK(Runtime::Current()->UseJitCompilation()); DCHECK(!method->IsRuntimeMethod()); // Don't compile the method if it has breakpoints. @@ -331,8 +346,12 @@ Jit::~Jit() { } void Jit::NewTypeLoadedIfUsingJit(mirror::Class* type) { + if (!Runtime::Current()->UseJitCompilation()) { + // No need to notify if we only use the JIT to save profiles. + return; + } jit::Jit* jit = Runtime::Current()->GetJit(); - if (jit != nullptr && jit->generate_debug_info_) { + if (jit->generate_debug_info_) { DCHECK(jit->jit_types_loaded_ != nullptr); jit->jit_types_loaded_(jit->jit_compiler_handle_, &type, 1); } @@ -546,6 +565,7 @@ class JitCompileTask FINAL : public Task { VLOG(jit) << "Start profiling " << PrettyMethod(method_); } } + ProfileSaver::NotifyJitActivity(); } void Finalize() OVERRIDE { @@ -606,22 +626,24 @@ void Jit::AddSamples(Thread* self, ArtMethod* method, uint16_t count, bool with_ } // Avoid jumping more than one state at a time. new_count = std::min(new_count, hot_method_threshold_ - 1); - } else if (starting_count < hot_method_threshold_) { - if ((new_count >= hot_method_threshold_) && - !code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) { - DCHECK(thread_pool_ != nullptr); - thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompile)); - } - // Avoid jumping more than one state at a time. - new_count = std::min(new_count, osr_method_threshold_ - 1); - } else if (starting_count < osr_method_threshold_) { - if (!with_backedges) { - // If the samples don't contain any back edge, we don't increment the hotness. - return; - } - if ((new_count >= osr_method_threshold_) && !code_cache_->IsOsrCompiled(method)) { - DCHECK(thread_pool_ != nullptr); - thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompileOsr)); + } else if (use_jit_compilation_) { + if (starting_count < hot_method_threshold_) { + if ((new_count >= hot_method_threshold_) && + !code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) { + DCHECK(thread_pool_ != nullptr); + thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompile)); + } + // Avoid jumping more than one state at a time. + new_count = std::min(new_count, osr_method_threshold_ - 1); + } else if (starting_count < osr_method_threshold_) { + if (!with_backedges) { + // If the samples don't contain any back edge, we don't increment the hotness. + return; + } + if ((new_count >= osr_method_threshold_) && !code_cache_->IsOsrCompiled(method)) { + DCHECK(thread_pool_ != nullptr); + thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompileOsr)); + } } } // Update hotness counter @@ -629,7 +651,8 @@ void Jit::AddSamples(Thread* self, ArtMethod* method, uint16_t count, bool with_ } void Jit::MethodEntered(Thread* thread, ArtMethod* method) { - if (UNLIKELY(Runtime::Current()->GetJit()->JitAtFirstUse())) { + Runtime* runtime = Runtime::Current(); + if (UNLIKELY(runtime->UseJitCompilation() && runtime->GetJit()->JitAtFirstUse())) { // The compiler requires a ProfilingInfo object. ProfilingInfo::Create(thread, method, /* retry_allocation */ true); JitCompileTask compile_task(method, JitCompileTask::kCompile); diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h index 8198c18eb..f3a6240e8 100644 --- a/runtime/jit/jit.h +++ b/runtime/jit/jit.h @@ -44,7 +44,7 @@ class Jit { static constexpr bool kStressMode = kIsDebugBuild; static constexpr size_t kDefaultCompileThreshold = kStressMode ? 2 : 10000; static constexpr size_t kDefaultPriorityThreadWeightRatio = 1000; - static constexpr size_t kDefaultTransitionRatio = 100; + static constexpr size_t kDefaultInvokeTransitionWeightRatio = 500; virtual ~Jit(); static Jit* Create(JitOptions* options, std::string* error_msg); @@ -87,6 +87,15 @@ class Jit { return priority_thread_weight_; } + // Returns false if we only need to save profile information and not compile methods. + bool UseJitCompilation() const { + return use_jit_compilation_; + } + + bool SaveProfilingInfo() const { + return save_profiling_info_; + } + // Wait until there is no more pending compilation tasks. void WaitForCompilationToFinish(Thread* self); @@ -106,12 +115,12 @@ class Jit { void NotifyInterpreterToCompiledCodeTransition(Thread* self, ArtMethod* caller) SHARED_REQUIRES(Locks::mutator_lock_) { - AddSamples(self, caller, transition_weight_, false); + AddSamples(self, caller, invoke_transition_weight_, false); } void NotifyCompiledCodeToInterpreterTransition(Thread* self, ArtMethod* callee) SHARED_REQUIRES(Locks::mutator_lock_) { - AddSamples(self, callee, transition_weight_, false); + AddSamples(self, callee, invoke_transition_weight_, false); } // Starts the profile saver if the config options allow profile recording. @@ -179,13 +188,14 @@ class Jit { std::unique_ptr code_cache_; + bool use_jit_compilation_; bool save_profiling_info_; static bool generate_debug_info_; uint16_t hot_method_threshold_; uint16_t warm_method_threshold_; uint16_t osr_method_threshold_; uint16_t priority_thread_weight_; - uint16_t transition_weight_; + uint16_t invoke_transition_weight_; std::unique_ptr thread_pool_; DISALLOW_COPY_AND_ASSIGN(Jit); @@ -206,6 +216,9 @@ class JitOptions { uint16_t GetPriorityThreadWeight() const { return priority_thread_weight_; } + size_t GetInvokeTransitionWeight() const { + return invoke_transition_weight_; + } size_t GetCodeCacheInitialCapacity() const { return code_cache_initial_capacity_; } @@ -218,33 +231,34 @@ class JitOptions { bool GetSaveProfilingInfo() const { return save_profiling_info_; } - bool UseJIT() const { - return use_jit_; + bool UseJitCompilation() const { + return use_jit_compilation_; } - void SetUseJIT(bool b) { - use_jit_ = b; + void SetUseJitCompilation(bool b) { + use_jit_compilation_ = b; } void SetSaveProfilingInfo(bool b) { save_profiling_info_ = b; } void SetJitAtFirstUse() { - use_jit_ = true; + use_jit_compilation_ = true; compile_threshold_ = 0; } private: - bool use_jit_; + bool use_jit_compilation_; size_t code_cache_initial_capacity_; size_t code_cache_max_capacity_; size_t compile_threshold_; size_t warmup_threshold_; size_t osr_threshold_; uint16_t priority_thread_weight_; + size_t invoke_transition_weight_; bool dump_info_on_shutdown_; bool save_profiling_info_; JitOptions() - : use_jit_(false), + : use_jit_compilation_(false), code_cache_initial_capacity_(0), code_cache_max_capacity_(0), compile_threshold_(0), diff --git a/runtime/jit/offline_profiling_info.cc b/runtime/jit/offline_profiling_info.cc index a3078bc1b..c99d3636a 100644 --- a/runtime/jit/offline_profiling_info.cc +++ b/runtime/jit/offline_profiling_info.cc @@ -645,7 +645,8 @@ std::set ProfileCompilationInfo::GetResolvedClasses() c for (auto&& pair : info_) { const std::string& profile_key = pair.first; const DexFileData& data = pair.second; - DexCacheResolvedClasses classes(profile_key, data.checksum); + // TODO: Is it OK to use the same location for both base and dex location here? + DexCacheResolvedClasses classes(profile_key, profile_key, data.checksum); classes.AddClasses(data.class_set.begin(), data.class_set.end()); ret.insert(classes); } diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc index 7a9d2506d..8358ce360 100644 --- a/runtime/jit/profile_saver.cc +++ b/runtime/jit/profile_saver.cc @@ -33,14 +33,15 @@ namespace art { // TODO: read the constants from ProfileOptions, // Add a random delay each time we go to sleep so that we don't hammer the CPU // with all profile savers running at the same time. -static constexpr const uint64_t kRandomDelayMaxMs = 30 * 1000; // 30 seconds -static constexpr const uint64_t kMaxBackoffMs = 10 * 60 * 1000; // 10 minutes -static constexpr const uint64_t kSavePeriodMs = 20 * 1000; // 20 seconds +static constexpr const uint64_t kMinSavePeriodNs = MsToNs(20 * 1000); // 20 seconds static constexpr const uint64_t kSaveResolvedClassesDelayMs = 2 * 1000; // 2 seconds -static constexpr const double kBackoffCoef = 2.0; static constexpr const uint32_t kMinimumNumberOfMethodsToSave = 10; static constexpr const uint32_t kMinimumNumberOfClassesToSave = 10; +static constexpr const uint32_t kMinimumNumberOfNotificationBeforeWake = + kMinimumNumberOfMethodsToSave; +static constexpr const uint32_t kMaximumNumberOfNotificationBeforeWake = 50; + ProfileSaver* ProfileSaver::instance_ = nullptr; pthread_t ProfileSaver::profiler_pthread_ = 0U; @@ -55,6 +56,8 @@ ProfileSaver::ProfileSaver(const std::string& output_filename, shutting_down_(false), last_save_number_of_methods_(0), last_save_number_of_classes_(0), + last_time_ns_saver_woke_up_(0), + jit_activity_notifications_(0), wait_lock_("ProfileSaver wait lock"), period_condition_("ProfileSaver period condition", wait_lock_), total_bytes_written_(0), @@ -65,7 +68,9 @@ ProfileSaver::ProfileSaver(const std::string& output_filename, total_ms_of_sleep_(0), total_ns_of_work_(0), total_number_of_foreign_dex_marks_(0), - max_number_of_profile_entries_cached_(0) { + max_number_of_profile_entries_cached_(0), + total_number_of_hot_spikes_(0), + total_number_of_wake_ups_(0) { AddTrackedLocations(output_filename, app_data_dir, code_paths); if (!app_data_dir.empty()) { // The application directory is used to determine which dex files are owned by app. @@ -83,55 +88,89 @@ ProfileSaver::ProfileSaver(const std::string& output_filename, } void ProfileSaver::Run() { - srand(MicroTime() * getpid()); Thread* self = Thread::Current(); - uint64_t save_period_ms = kSavePeriodMs; - VLOG(profiler) << "Save profiling information every " << save_period_ms << " ms"; - bool cache_resolved_classes = true; + // Fetch the resolved classes for the app images after sleeping for + // kSaveResolvedClassesDelayMs. + // TODO(calin) This only considers the case of the primary profile file. + // Anything that gets loaded in the same VM will not have their resolved + // classes save (unless they started before the initial saving was done). + { + MutexLock mu(self, wait_lock_); + period_condition_.TimedWait(self, kSaveResolvedClassesDelayMs, 0); + total_ms_of_sleep_ += kSaveResolvedClassesDelayMs; + } + FetchAndCacheResolvedClasses(); + + // Loop for the profiled methods. while (!ShuttingDown(self)) { - uint64_t sleep_time_ms; - if (cache_resolved_classes) { - // Sleep less long for the first iteration since we want to record loaded classes shortly - // after app launch. - sleep_time_ms = kSaveResolvedClassesDelayMs; - } else { - const uint64_t random_sleep_delay_ms = rand() % kRandomDelayMaxMs; - sleep_time_ms = save_period_ms + random_sleep_delay_ms; - } + uint64_t sleep_start = NanoTime(); { MutexLock mu(self, wait_lock_); - period_condition_.TimedWait(self, sleep_time_ms, 0); + period_condition_.Wait(self); + total_number_of_wake_ups_++; + // We might have been woken up by a huge number of notifications to guarantee saving. + // If we didn't meet the minimum saving period go back to sleep (only if missed by + // a reasonable margin). + uint64_t sleep_time = NanoTime() - last_time_ns_saver_woke_up_; + while (kMinSavePeriodNs - sleep_time > (kMinSavePeriodNs / 10)) { + period_condition_.TimedWait(self, NsToMs(kMinSavePeriodNs - sleep_time), 0); + total_number_of_wake_ups_++; + sleep_time = NanoTime() - last_time_ns_saver_woke_up_; + } } - total_ms_of_sleep_ += sleep_time_ms; + total_ms_of_sleep_ += NsToMs(NanoTime() - sleep_start); + if (ShuttingDown(self)) { break; } - uint64_t start = NanoTime(); - if (cache_resolved_classes) { - // TODO(calin) This only considers the case of the primary profile file. - // Anything that gets loaded in the same VM will not have their resolved - // classes save (unless they started before the initial saving was done). - FetchAndCacheResolvedClasses(); - } else { - bool profile_saved_to_disk = ProcessProfilingInfo(); - if (profile_saved_to_disk) { - // Reset the period to the initial value as it's highly likely to JIT again. - save_period_ms = kSavePeriodMs; - VLOG(profiler) << "Profile saver: saved something, period reset to: " << save_period_ms; - } else { - // If we don't need to save now it is less likely that we will need to do - // so in the future. Increase the time between saves according to the - // kBackoffCoef, but make it no larger than kMaxBackoffMs. - save_period_ms = std::min(kMaxBackoffMs, - static_cast(kBackoffCoef * save_period_ms)); - VLOG(profiler) << "Profile saver: nothing to save, delaying period to: " << save_period_ms; - } + uint16_t new_methods = 0; + uint64_t start_work = NanoTime(); + bool profile_saved_to_disk = ProcessProfilingInfo(&new_methods); + // Update the notification counter based on result. Note that there might be contention on this + // but we don't care about to be 100% precise. + if (!profile_saved_to_disk) { + // If we didn't save to disk it may be because we didn't have enough new methods. + // Set the jit activity notifications to new_methods so we can wake up earlier if needed. + jit_activity_notifications_ = new_methods; } - cache_resolved_classes = false; + total_ns_of_work_ += NanoTime() - start_work; + } +} + +void ProfileSaver::NotifyJitActivity() { + MutexLock mu(Thread::Current(), *Locks::profiler_lock_); + if (instance_ == nullptr || instance_->shutting_down_) { + return; + } + instance_->NotifyJitActivityInternal(); +} - total_ns_of_work_ += (NanoTime() - start); +void ProfileSaver::WakeUpSaver() { + jit_activity_notifications_ = 0; + last_time_ns_saver_woke_up_ = NanoTime(); + period_condition_.Signal(Thread::Current()); +} + +void ProfileSaver::NotifyJitActivityInternal() { + // Unlikely to overflow but if it happens, + // we would have waken up the saver long before that. + jit_activity_notifications_++; + // Note that we are not as precise as we could be here but we don't want to wake the saver + // every time we see a hot method. + if (jit_activity_notifications_ > kMinimumNumberOfNotificationBeforeWake) { + MutexLock wait_mutex(Thread::Current(), wait_lock_); + if ((NanoTime() - last_time_ns_saver_woke_up_) > kMinSavePeriodNs) { + WakeUpSaver(); + } + } else if (jit_activity_notifications_ > kMaximumNumberOfNotificationBeforeWake) { + // Make sure to wake up the saver if we see a spike in the number of notifications. + // This is a precaution to avoid "loosing" a big number of methods in case + // this is a spike with no jit after. + total_number_of_hot_spikes_++; + MutexLock wait_mutex(Thread::Current(), wait_lock_); + WakeUpSaver(); } } @@ -145,20 +184,25 @@ ProfileCompilationInfo* ProfileSaver::GetCachedProfiledInfo(const std::string& f void ProfileSaver::FetchAndCacheResolvedClasses() { ScopedTrace trace(__PRETTY_FUNCTION__); - ClassLinker* const class_linker = Runtime::Current()->GetClassLinker(); std::set resolved_classes = class_linker->GetResolvedClasses(/*ignore boot classes*/ true); MutexLock mu(Thread::Current(), *Locks::profiler_lock_); uint64_t total_number_of_profile_entries_cached = 0; + for (const auto& it : tracked_dex_base_locations_) { - std::set resolved_classes_for_location; + std::set resolved_classes_for_location; const std::string& filename = it.first; const std::set& locations = it.second; for (const DexCacheResolvedClasses& classes : resolved_classes) { - if (locations.find(classes.GetDexLocation()) != locations.end()) { + if (locations.find(classes.GetBaseLocation()) != locations.end()) { + VLOG(profiler) << "Added classes for location " << classes.GetBaseLocation() + << " (" << classes.GetDexLocation() << ")"; resolved_classes_for_location.insert(classes); + } else { + VLOG(profiler) << "Location not found " << classes.GetBaseLocation() + << " (" << classes.GetDexLocation() << ")"; } } ProfileCompilationInfo* info = GetCachedProfiledInfo(filename); @@ -170,7 +214,7 @@ void ProfileSaver::FetchAndCacheResolvedClasses() { total_number_of_profile_entries_cached); } -bool ProfileSaver::ProcessProfilingInfo() { +bool ProfileSaver::ProcessProfilingInfo(uint16_t* new_methods) { ScopedTrace trace(__PRETTY_FUNCTION__); SafeMap> tracked_locations; { @@ -181,6 +225,8 @@ bool ProfileSaver::ProcessProfilingInfo() { bool profile_file_saved = false; uint64_t total_number_of_profile_entries_cached = 0; + *new_methods = 0; + for (const auto& it : tracked_locations) { if (ShuttingDown(Thread::Current())) { return true; @@ -211,6 +257,7 @@ bool ProfileSaver::ProcessProfilingInfo() { total_number_of_skipped_writes_++; continue; } + *new_methods = std::max(static_cast(delta_number_of_methods), *new_methods); uint64_t bytes_written; // Force the save. In case the profile data is corrupted or the the profile // has the wrong version this will "fix" the file to the correct format. @@ -247,12 +294,17 @@ bool ProfileSaver::ProcessProfilingInfo() { void* ProfileSaver::RunProfileSaverThread(void* arg) { Runtime* runtime = Runtime::Current(); - ProfileSaver* profile_saver = reinterpret_cast(arg); - CHECK(runtime->AttachCurrentThread("Profile Saver", - /*as_daemon*/true, - runtime->GetSystemThreadGroup(), - /*create_peer*/true)); + bool attached = runtime->AttachCurrentThread("Profile Saver", + /*as_daemon*/true, + runtime->GetSystemThreadGroup(), + /*create_peer*/true); + if (!attached) { + CHECK(runtime->IsShuttingDown(Thread::Current())); + return nullptr; + } + + ProfileSaver* profile_saver = reinterpret_cast(arg); profile_saver->Run(); runtime->DetachCurrentThread(); @@ -285,7 +337,7 @@ void ProfileSaver::Start(const std::string& output_filename, const std::vector& code_paths, const std::string& foreign_dex_profile_path, const std::string& app_data_dir) { - DCHECK(Runtime::Current()->UseJit()); + DCHECK(Runtime::Current()->SaveProfileInfo()); DCHECK(!output_filename.empty()); DCHECK(jit_code_cache != nullptr); @@ -480,17 +532,20 @@ bool ProfileSaver::MaybeRecordDexUseInternal( // frameworks/base/services/core/java/com/android/server/pm/PackageDexOptimizer.java) std::replace(dex_location_real_path_str.begin(), dex_location_real_path_str.end(), '/', '@'); std::string flag_path = foreign_dex_profile_path + "/" + dex_location_real_path_str; - // No need to give any sort of access to flag_path. The system has enough permissions - // to test for its existence. - int fd = TEMP_FAILURE_RETRY(open(flag_path.c_str(), O_CREAT | O_EXCL, 0)); + // We use O_RDONLY as the access mode because we must supply some access + // mode, and there is no access mode that means 'create but do not read' the + // file. We will not not actually read from the file. + int fd = TEMP_FAILURE_RETRY(open(flag_path.c_str(), + O_CREAT | O_RDONLY | O_EXCL | O_CLOEXEC | O_NOFOLLOW, 0)); if (fd != -1) { if (close(fd) != 0) { PLOG(WARNING) << "Could not close file after flagging foreign dex use " << flag_path; } return true; } else { - if (errno != EEXIST) { - // Another app could have already created the file. + if (errno != EEXIST && errno != EACCES) { + // Another app could have already created the file, and selinux may not + // allow the read access to the file implied by the call to open. PLOG(WARNING) << "Could not create foreign dex use mark " << flag_path; return false; } @@ -517,7 +572,38 @@ void ProfileSaver::DumpInfo(std::ostream& os) { << "ProfileSaver total_number_of_foreign_dex_marks=" << total_number_of_foreign_dex_marks_ << '\n' << "ProfileSaver max_number_profile_entries_cached=" - << max_number_of_profile_entries_cached_ << '\n'; + << max_number_of_profile_entries_cached_ << '\n' + << "ProfileSaver total_number_of_hot_spikes=" << total_number_of_hot_spikes_ << '\n' + << "ProfileSaver total_number_of_wake_ups=" << total_number_of_wake_ups_ << '\n'; +} + + +void ProfileSaver::ForceProcessProfiles() { + ProfileSaver* saver = nullptr; + { + MutexLock mu(Thread::Current(), *Locks::profiler_lock_); + saver = instance_; + } + // TODO(calin): this is not actually thread safe as the instance_ may have been deleted, + // but we only use this in testing when we now this won't happen. + // Refactor the way we handle the instance so that we don't end up in this situation. + if (saver != nullptr) { + uint16_t new_methods; + saver->ProcessProfilingInfo(&new_methods); + } +} + +bool ProfileSaver::HasSeenMethod(const std::string& profile, + const DexFile* dex_file, + uint16_t method_idx) { + MutexLock mu(Thread::Current(), *Locks::profiler_lock_); + if (instance_ != nullptr) { + ProfileCompilationInfo* info = instance_->GetCachedProfiledInfo(profile); + if (info != nullptr) { + return info->ContainsMethod(MethodReference(dex_file, method_idx)); + } + } + return false; } } // namespace art diff --git a/runtime/jit/profile_saver.h b/runtime/jit/profile_saver.h index 0a222bfdc..c6da95931 100644 --- a/runtime/jit/profile_saver.h +++ b/runtime/jit/profile_saver.h @@ -49,6 +49,17 @@ class ProfileSaver { // If the profile saver is running, dumps statistics to the `os`. Otherwise it does nothing. static void DumpInstanceInfo(std::ostream& os); + // NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock. + static void NotifyJitActivity() + REQUIRES(!Locks::profiler_lock_, !wait_lock_) + NO_THREAD_SAFETY_ANALYSIS; + + // Just for testing purpose. + static void ForceProcessProfiles(); + static bool HasSeenMethod(const std::string& profile, + const DexFile* dex_file, + uint16_t method_idx); + private: ProfileSaver(const std::string& output_filename, jit::JitCodeCache* jit_code_cache, @@ -65,7 +76,13 @@ class ProfileSaver { void Run() REQUIRES(!Locks::profiler_lock_, !wait_lock_); // Processes the existing profiling info from the jit code cache and returns // true if it needed to be saved to disk. - bool ProcessProfilingInfo(); + bool ProcessProfilingInfo(uint16_t* new_methods) + REQUIRES(!Locks::profiler_lock_) + REQUIRES(!Locks::mutator_lock_); + + void NotifyJitActivityInternal() REQUIRES(!wait_lock_); + void WakeUpSaver() REQUIRES(wait_lock_); + // Returns true if the saver is shutting down (ProfileSaver::Stop() has been called). bool ShuttingDown(Thread* self) REQUIRES(!Locks::profiler_lock_); @@ -112,6 +129,8 @@ class ProfileSaver { bool shutting_down_ GUARDED_BY(Locks::profiler_lock_); uint32_t last_save_number_of_methods_; uint32_t last_save_number_of_classes_; + uint64_t last_time_ns_saver_woke_up_ GUARDED_BY(wait_lock_); + uint32_t jit_activity_notifications_; // A local cache for the profile information. Maps each tracked file to its // profile information. The size of this cache is usually very small and tops @@ -133,6 +152,8 @@ class ProfileSaver { uint64_t total_number_of_foreign_dex_marks_; // TODO(calin): replace with an actual size. uint64_t max_number_of_profile_entries_cached_; + uint64_t total_number_of_hot_spikes_; + uint64_t total_number_of_wake_ups_; DISALLOW_COPY_AND_ASSIGN(ProfileSaver); }; diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc index 5d89c2180..771f8ed29 100644 --- a/runtime/mem_map.cc +++ b/runtime/mem_map.cc @@ -302,8 +302,9 @@ MemMap* MemMap::MapAnonymous(const char* name, if (use_ashmem) { if (!kIsTargetBuild) { - // When not on Android ashmem is faked using files in /tmp. Ensure that such files won't - // fail due to ulimit restrictions. If they will then use a regular mmap. + // When not on Android (either host or assuming a linux target) ashmem is faked using + // files in /tmp. Ensure that such files won't fail due to ulimit restrictions. If they + // will then use a regular mmap. struct rlimit rlimit_fsize; CHECK_EQ(getrlimit(RLIMIT_FSIZE, &rlimit_fsize), 0); use_ashmem = (rlimit_fsize.rlim_cur == RLIM_INFINITY) || diff --git a/runtime/mem_map.h b/runtime/mem_map.h index 3eaf57684..597f0d46e 100644 --- a/runtime/mem_map.h +++ b/runtime/mem_map.h @@ -68,7 +68,7 @@ class MemMap { bool low_4gb, bool reuse, std::string* error_msg, - bool use_ashmem = true); + bool use_ashmem = !kIsTargetLinux); // Create placeholder for a region allocated by direct call to mmap. // This is useful when we do not have control over the code calling mmap, @@ -172,7 +172,7 @@ class MemMap { const char* tail_name, int tail_prot, std::string* error_msg, - bool use_ashmem = true); + bool use_ashmem = !kIsTargetLinux); static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map) REQUIRES(!Locks::mem_maps_lock_); diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h index 5b6ded162..8c20fa680 100644 --- a/runtime/mirror/class.h +++ b/runtime/mirror/class.h @@ -673,7 +673,7 @@ class MANAGED Class FINAL : public Object { // `This` and `klass` must be classes. Class* GetCommonSuperClass(Handle klass) SHARED_REQUIRES(Locks::mutator_lock_); - void SetSuperClass(Class *new_super_class) SHARED_REQUIRES(Locks::mutator_lock_) { + void SetSuperClass(Class* new_super_class) SHARED_REQUIRES(Locks::mutator_lock_) { // Super class is assigned once, except during class linker initialization. Class* old_super_class = GetFieldObject(OFFSET_OF_OBJECT_MEMBER(Class, super_class_)); DCHECK(old_super_class == nullptr || old_super_class == new_super_class); diff --git a/runtime/modifiers.h b/runtime/modifiers.h index 6dd182a11..fd7a125bc 100644 --- a/runtime/modifiers.h +++ b/runtime/modifiers.h @@ -60,10 +60,13 @@ static constexpr uint32_t kAccDefault = 0x00400000; // method (ru // This is set by the class linker during LinkInterfaceMethods. Prior to that point we do not know // if any particular method needs to be a default conflict. Used to figure out at runtime if // invoking this method will throw an exception. -static constexpr uint32_t kAccDefaultConflict = 0x00800000; // method (runtime) +static constexpr uint32_t kAccDefaultConflict = 0x00800000; // method (runtime) // Set by the verifier for a method we do not want the compiler to compile. -static constexpr uint32_t kAccCompileDontBother = 0x01000000; // method (runtime) +static constexpr uint32_t kAccCompileDontBother = 0x01000000; // method (runtime) + +// Set by the verifier for a method that could not be verified to follow structured locking. +static constexpr uint32_t kAccMustCountLocks = 0x02000000; // method (runtime) // Special runtime-only flags. // Interface and all its super-interfaces with default methods have been recursively initialized. diff --git a/runtime/monitor.cc b/runtime/monitor.cc index 3680c7831..71c866f3d 100644 --- a/runtime/monitor.cc +++ b/runtime/monitor.cc @@ -215,6 +215,85 @@ void Monitor::SetObject(mirror::Object* object) { obj_ = GcRoot(object); } +// Note: Adapted from CurrentMethodVisitor in thread.cc. We must not resolve here. + +struct NthCallerWithDexPcVisitor FINAL : public StackVisitor { + explicit NthCallerWithDexPcVisitor(Thread* thread, size_t frame) + SHARED_REQUIRES(Locks::mutator_lock_) + : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFramesNoResolve), + method_(nullptr), + dex_pc_(0), + current_frame_number_(0), + wanted_frame_number_(frame) {} + bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + ArtMethod* m = GetMethod(); + if (m == nullptr || m->IsRuntimeMethod()) { + // Runtime method, upcall, or resolution issue. Skip. + return true; + } + + // Is this the requested frame? + if (current_frame_number_ == wanted_frame_number_) { + method_ = m; + dex_pc_ = GetDexPc(false /* abort_on_error*/); + return false; + } + + // Look for more. + current_frame_number_++; + return true; + } + + ArtMethod* method_; + uint32_t dex_pc_; + + private: + size_t current_frame_number_; + const size_t wanted_frame_number_; +}; + +// This function is inlined and just helps to not have the VLOG and ATRACE check at all the +// potential tracing points. +void Monitor::AtraceMonitorLock(Thread* self, mirror::Object* obj, bool is_wait) { + if (UNLIKELY(VLOG_IS_ON(systrace_lock_logging) && ATRACE_ENABLED())) { + AtraceMonitorLockImpl(self, obj, is_wait); + } +} + +void Monitor::AtraceMonitorLockImpl(Thread* self, mirror::Object* obj, bool is_wait) { + // Wait() requires a deeper call stack to be useful. Otherwise you'll see "Waiting at + // Object.java". Assume that we'll wait a nontrivial amount, so it's OK to do a longer + // stack walk than if !is_wait. + NthCallerWithDexPcVisitor visitor(self, is_wait ? 1U : 0U); + visitor.WalkStack(false); + const char* prefix = is_wait ? "Waiting on " : "Locking "; + + const char* filename; + int32_t line_number; + TranslateLocation(visitor.method_, visitor.dex_pc_, &filename, &line_number); + + // It would be nice to have a stable "ID" for the object here. However, the only stable thing + // would be the identity hashcode. But we cannot use IdentityHashcode here: For one, there are + // times when it is unsafe to make that call (see stack dumping for an explanation). More + // importantly, we would have to give up on thin-locking when adding systrace locks, as the + // identity hashcode is stored in the lockword normally (so can't be used with thin-locks). + // + // Because of thin-locks we also cannot use the monitor id (as there is no monitor). Monitor ids + // also do not have to be stable, as the monitor may be deflated. + std::string tmp = StringPrintf("%s %d at %s:%d", + prefix, + (obj == nullptr ? -1 : static_cast(reinterpret_cast(obj))), + (filename != nullptr ? filename : "null"), + line_number); + ATRACE_BEGIN(tmp.c_str()); +} + +void Monitor::AtraceMonitorUnlock() { + if (UNLIKELY(VLOG_IS_ON(systrace_lock_logging))) { + ATRACE_END(); + } +} + std::string Monitor::PrettyContentionInfo(const std::string& owner_name, pid_t owner_tid, ArtMethod* owners_method, @@ -228,8 +307,8 @@ std::string Monitor::PrettyContentionInfo(const std::string& owner_name, std::ostringstream oss; oss << "monitor contention with owner " << owner_name << " (" << owner_tid << ")"; if (owners_method != nullptr) { - oss << " owner method=" << PrettyMethod(owners_method); - oss << " from " << owners_filename << ":" << owners_line_number; + oss << " at " << PrettyMethod(owners_method); + oss << "(" << owners_filename << ":" << owners_line_number << ")"; } oss << " waiters=" << num_waiters; return oss.str(); @@ -246,10 +325,10 @@ void Monitor::Lock(Thread* self) { if (lock_profiling_threshold_ != 0) { locking_method_ = self->GetCurrentMethod(&locking_dex_pc_); } - return; + break; } else if (owner_ == self) { // Recursive. lock_count_++; - return; + break; } // Contended. const bool log_contention = (lock_profiling_threshold_ != 0); @@ -284,8 +363,9 @@ void Monitor::Lock(Thread* self) { const char* filename; int32_t line_number; TranslateLocation(m, pc, &filename, &line_number); - oss << " blocking from " << (filename != nullptr ? filename : "null") - << ":" << line_number; + oss << " blocking from " + << PrettyMethod(m) << "(" << (filename != nullptr ? filename : "null") << ":" + << line_number << ")"; ATRACE_BEGIN(oss.str().c_str()); } monitor_contenders_.Wait(self); // Still contended so wait. @@ -318,6 +398,8 @@ void Monitor::Lock(Thread* self) { } if (sample_percent != 0 && (static_cast(rand() % 100) < sample_percent)) { if (wait_ms > kLongWaitMs && owners_method != nullptr) { + uint32_t pc; + ArtMethod* m = self->GetCurrentMethod(&pc); // TODO: We should maybe check that original_owner is still a live thread. LOG(WARNING) << "Long " << PrettyContentionInfo(original_owner_name, @@ -325,7 +407,7 @@ void Monitor::Lock(Thread* self) { owners_method, owners_dex_pc, num_waiters) - << " for " << PrettyDuration(MsToNs(wait_ms)); + << " in " << PrettyMethod(m) << " for " << PrettyDuration(MsToNs(wait_ms)); } const char* owners_filename; int32_t owners_line_number; @@ -348,6 +430,8 @@ void Monitor::Lock(Thread* self) { monitor_lock_.Lock(self); // Reacquire locks in order. --num_waiters_; } + + AtraceMonitorLock(self, GetObject(), false /* is_wait */); } static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...) @@ -457,6 +541,7 @@ bool Monitor::Unlock(Thread* self) { } if (owner == self) { // We own the monitor, so nobody else can be in here. + AtraceMonitorUnlock(); if (lock_count_ == 0) { owner_ = nullptr; locking_method_ = nullptr; @@ -523,6 +608,11 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, uintptr_t saved_dex_pc = locking_dex_pc_; locking_dex_pc_ = 0; + AtraceMonitorUnlock(); // For the implict Unlock() just above. This will only end the deepest + // nesting, but that is enough for the visualization, and corresponds to + // the single Lock() we do afterwards. + AtraceMonitorLock(self, GetObject(), true /* is_wait */); + bool was_interrupted = false; { // Update thread state. If the GC wakes up, it'll ignore us, knowing @@ -586,6 +676,8 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, self->ThrowNewException("Ljava/lang/InterruptedException;", nullptr); } + AtraceMonitorUnlock(); // End Wait(). + // Re-acquire the monitor and lock. Lock(self); monitor_lock_.Lock(self); @@ -775,6 +867,7 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) { case LockWord::kUnlocked: { LockWord thin_locked(LockWord::FromThinLockId(thread_id, 0, lock_word.ReadBarrierState())); if (h_obj->CasLockWordWeakSequentiallyConsistent(lock_word, thin_locked)) { + AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */); // CasLockWord enforces more than the acquire ordering we need here. return h_obj.Get(); // Success! } @@ -790,10 +883,12 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) { lock_word.ReadBarrierState())); if (!kUseReadBarrier) { h_obj->SetLockWord(thin_locked, true); + AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */); return h_obj.Get(); // Success! } else { // Use CAS to preserve the read barrier state. if (h_obj->CasLockWordWeakSequentiallyConsistent(lock_word, thin_locked)) { + AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */); return h_obj.Get(); // Success! } } @@ -830,7 +925,7 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) { continue; // Start from the beginning. default: { LOG(FATAL) << "Invalid monitor state " << lock_word.GetState(); - return h_obj.Get(); + UNREACHABLE(); } } } @@ -869,11 +964,13 @@ bool Monitor::MonitorExit(Thread* self, mirror::Object* obj) { if (!kUseReadBarrier) { DCHECK_EQ(new_lw.ReadBarrierState(), 0U); h_obj->SetLockWord(new_lw, true); + AtraceMonitorUnlock(); // Success! return true; } else { // Use CAS to preserve the read barrier state. if (h_obj->CasLockWordWeakSequentiallyConsistent(lock_word, new_lw)) { + AtraceMonitorUnlock(); // Success! return true; } diff --git a/runtime/monitor.h b/runtime/monitor.h index 8c7496b52..7b4b8f946 100644 --- a/runtime/monitor.h +++ b/runtime/monitor.h @@ -250,6 +250,17 @@ class Monitor { uint32_t GetOwnerThreadId() REQUIRES(!monitor_lock_); + // Support for systrace output of monitor operations. + ALWAYS_INLINE static void AtraceMonitorLock(Thread* self, + mirror::Object* obj, + bool is_wait) + SHARED_REQUIRES(Locks::mutator_lock_); + static void AtraceMonitorLockImpl(Thread* self, + mirror::Object* obj, + bool is_wait) + SHARED_REQUIRES(Locks::mutator_lock_); + ALWAYS_INLINE static void AtraceMonitorUnlock(); + static uint32_t lock_profiling_threshold_; Mutex monitor_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; diff --git a/runtime/monitor_pool.cc b/runtime/monitor_pool.cc index ce38e4f10..a47a4b2cf 100644 --- a/runtime/monitor_pool.cc +++ b/runtime/monitor_pool.cc @@ -28,7 +28,11 @@ namespace mirror { } // namespace mirror MonitorPool::MonitorPool() - : num_chunks_(0), capacity_(0), first_free_(nullptr) { + : current_chunk_list_index_(0), num_chunks_(0), current_chunk_list_capacity_(0), + first_free_(nullptr) { + for (size_t i = 0; i < kMaxChunkLists; ++i) { + monitor_chunks_[i] = nullptr; // Not absolutely required, but ... + } AllocateChunk(); // Get our first chunk. } @@ -37,24 +41,19 @@ MonitorPool::MonitorPool() void MonitorPool::AllocateChunk() { DCHECK(first_free_ == nullptr); - // Do we need to resize? - if (num_chunks_ == capacity_) { - if (capacity_ == 0U) { - // Initialization. - capacity_ = kInitialChunkStorage; - uintptr_t* new_backing = new uintptr_t[capacity_](); - DCHECK(monitor_chunks_.LoadRelaxed() == nullptr); - monitor_chunks_.StoreRelaxed(new_backing); - } else { - size_t new_capacity = 2 * capacity_; - uintptr_t* new_backing = new uintptr_t[new_capacity](); - uintptr_t* old_backing = monitor_chunks_.LoadRelaxed(); - memcpy(new_backing, old_backing, sizeof(uintptr_t) * capacity_); - monitor_chunks_.StoreRelaxed(new_backing); - capacity_ = new_capacity; - old_chunk_arrays_.push_back(std::unique_ptr(old_backing)); - VLOG(monitor) << "Resizing to capacity " << capacity_; - } + // Do we need to allocate another chunk list? + if (num_chunks_ == current_chunk_list_capacity_) { + if (current_chunk_list_capacity_ != 0U) { + ++current_chunk_list_index_; + CHECK_LT(current_chunk_list_index_, kMaxChunkLists) << "Out of space for inflated monitors"; + VLOG(monitor) << "Expanding to capacity " + << 2 * ChunkListCapacity(current_chunk_list_index_) - kInitialChunkStorage; + } // else we're initializing + current_chunk_list_capacity_ = ChunkListCapacity(current_chunk_list_index_); + uintptr_t* new_list = new uintptr_t[current_chunk_list_capacity_](); + DCHECK(monitor_chunks_[current_chunk_list_index_] == nullptr); + monitor_chunks_[current_chunk_list_index_] = new_list; + num_chunks_ = 0; } // Allocate the chunk. @@ -65,7 +64,7 @@ void MonitorPool::AllocateChunk() { CHECK_EQ(0U, reinterpret_cast(chunk) % kMonitorAlignment); // Add the chunk. - *(monitor_chunks_.LoadRelaxed() + num_chunks_) = reinterpret_cast(chunk); + monitor_chunks_[current_chunk_list_index_][num_chunks_] = reinterpret_cast(chunk); num_chunks_++; // Set up the free list @@ -73,8 +72,8 @@ void MonitorPool::AllocateChunk() { (kChunkCapacity - 1) * kAlignedMonitorSize); last->next_free_ = nullptr; // Eagerly compute id. - last->monitor_id_ = OffsetToMonitorId((num_chunks_ - 1) * kChunkSize + - (kChunkCapacity - 1) * kAlignedMonitorSize); + last->monitor_id_ = OffsetToMonitorId(current_chunk_list_index_* (kMaxListSize * kChunkSize) + + (num_chunks_ - 1) * kChunkSize + (kChunkCapacity - 1) * kAlignedMonitorSize); for (size_t i = 0; i < kChunkCapacity - 1; ++i) { Monitor* before = reinterpret_cast(reinterpret_cast(last) - kAlignedMonitorSize); @@ -91,21 +90,19 @@ void MonitorPool::AllocateChunk() { void MonitorPool::FreeInternal() { // This is on shutdown with NO_THREAD_SAFETY_ANALYSIS, can't/don't need to lock. - uintptr_t* backing = monitor_chunks_.LoadRelaxed(); - DCHECK(backing != nullptr); - DCHECK_GT(capacity_, 0U); - DCHECK_GT(num_chunks_, 0U); - - for (size_t i = 0; i < capacity_; ++i) { - if (i < num_chunks_) { - DCHECK_NE(backing[i], 0U); - allocator_.deallocate(reinterpret_cast(backing[i]), kChunkSize); - } else { - DCHECK_EQ(backing[i], 0U); + DCHECK_NE(current_chunk_list_capacity_, 0UL); + for (size_t i = 0; i <= current_chunk_list_index_; ++i) { + DCHECK_NE(monitor_chunks_[i], static_cast(nullptr)); + for (size_t j = 0; j < ChunkListCapacity(i); ++j) { + if (i < current_chunk_list_index_ || j < num_chunks_) { + DCHECK_NE(monitor_chunks_[i][j], 0U); + allocator_.deallocate(reinterpret_cast(monitor_chunks_[i][j]), kChunkSize); + } else { + DCHECK_EQ(monitor_chunks_[i][j], 0U); + } } + delete[] monitor_chunks_[i]; } - - delete[] backing; } Monitor* MonitorPool::CreateMonitorInPool(Thread* self, Thread* owner, mirror::Object* obj, diff --git a/runtime/monitor_pool.h b/runtime/monitor_pool.h index 875b3fe73..99810e0c8 100644 --- a/runtime/monitor_pool.h +++ b/runtime/monitor_pool.h @@ -128,12 +128,17 @@ class MonitorPool { void ReleaseMonitorToPool(Thread* self, Monitor* monitor); void ReleaseMonitorsToPool(Thread* self, MonitorList::Monitors* monitors); - // Note: This is safe as we do not ever move chunks. + // Note: This is safe as we do not ever move chunks. All needed entries in the monitor_chunks_ + // data structure are read-only once we get here. Updates happen-before this call because + // the lock word was stored with release semantics and we read it with acquire semantics to + // retrieve the id. Monitor* LookupMonitor(MonitorId mon_id) { size_t offset = MonitorIdToOffset(mon_id); size_t index = offset / kChunkSize; + size_t top_index = index / kMaxListSize; + size_t list_index = index % kMaxListSize; size_t offset_in_chunk = offset % kChunkSize; - uintptr_t base = *(monitor_chunks_.LoadRelaxed()+index); + uintptr_t base = monitor_chunks_[top_index][list_index]; return reinterpret_cast(base + offset_in_chunk); } @@ -142,28 +147,37 @@ class MonitorPool { return base_addr <= mon_ptr && (mon_ptr - base_addr < kChunkSize); } - // Note: This is safe as we do not ever move chunks. MonitorId ComputeMonitorIdInPool(Monitor* mon, Thread* self) { MutexLock mu(self, *Locks::allocated_monitor_ids_lock_); - for (size_t index = 0; index < num_chunks_; ++index) { - uintptr_t chunk_addr = *(monitor_chunks_.LoadRelaxed() + index); - if (IsInChunk(chunk_addr, mon)) { - return OffsetToMonitorId( - reinterpret_cast(mon) - chunk_addr + index * kChunkSize); + for (size_t i = 0; i <= current_chunk_list_index_; ++i) { + for (size_t j = 0; j < ChunkListCapacity(i); ++j) { + if (j >= num_chunks_ && i == current_chunk_list_index_) { + break; + } + uintptr_t chunk_addr = monitor_chunks_[i][j]; + if (IsInChunk(chunk_addr, mon)) { + return OffsetToMonitorId( + reinterpret_cast(mon) - chunk_addr + + i * (kMaxListSize * kChunkSize) + j * kChunkSize); + } } } LOG(FATAL) << "Did not find chunk that contains monitor."; return 0; } - static size_t MonitorIdToOffset(MonitorId id) { + static constexpr size_t MonitorIdToOffset(MonitorId id) { return id << 3; } - static MonitorId OffsetToMonitorId(size_t offset) { + static constexpr MonitorId OffsetToMonitorId(size_t offset) { return static_cast(offset >> 3); } + static constexpr size_t ChunkListCapacity(size_t index) { + return kInitialChunkStorage << index; + } + // TODO: There are assumptions in the code that monitor addresses are 8B aligned (>>3). static constexpr size_t kMonitorAlignment = 8; // Size of a monitor, rounded up to a multiple of alignment. @@ -174,20 +188,47 @@ class MonitorPool { // Chunk size that is referenced in the id. We can collapse this to the actually used storage // in a chunk, i.e., kChunkCapacity * kAlignedMonitorSize, but this will mean proper divisions. static constexpr size_t kChunkSize = kPageSize; - // The number of initial chunks storable in monitor_chunks_. The number is large enough to make - // resizing unlikely, but small enough to not waste too much memory. - static constexpr size_t kInitialChunkStorage = 8U; - - // List of memory chunks. Each chunk is kChunkSize. - Atomic monitor_chunks_; - // Number of chunks stored. + static_assert(IsPowerOfTwo(kChunkSize), "kChunkSize must be power of 2"); + // The number of chunks of storage that can be referenced by the initial chunk list. + // The total number of usable monitor chunks is typically 255 times this number, so it + // should be large enough that we don't run out. We run out of address bits if it's > 512. + // Currently we set it a bit smaller, to save half a page per process. We make it tiny in + // debug builds to catch growth errors. The only value we really expect to tune. + static constexpr size_t kInitialChunkStorage = kIsDebugBuild ? 1U : 256U; + static_assert(IsPowerOfTwo(kInitialChunkStorage), "kInitialChunkStorage must be power of 2"); + // The number of lists, each containing pointers to storage chunks. + static constexpr size_t kMaxChunkLists = 8; // Dictated by 3 bit index. Don't increase above 8. + static_assert(IsPowerOfTwo(kMaxChunkLists), "kMaxChunkLists must be power of 2"); + static constexpr size_t kMaxListSize = kInitialChunkStorage << (kMaxChunkLists - 1); + // We lose 3 bits in monitor id due to 3 bit monitor_chunks_ index, and gain it back from + // the 3 bit alignment constraint on monitors: + static_assert(kMaxListSize * kChunkSize < (1 << LockWord::kMonitorIdSize), + "Monitor id bits don't fit"); + static_assert(IsPowerOfTwo(kMaxListSize), "kMaxListSize must be power of 2"); + + // Array of pointers to lists (again arrays) of pointers to chunks containing monitors. + // Zeroth entry points to a list (array) of kInitialChunkStorage pointers to chunks. + // Each subsequent list as twice as large as the preceding one. + // Monitor Ids are interpreted as follows: + // Top 3 bits (of 28): index into monitor_chunks_. + // Next 16 bits: index into the chunk list, i.e. monitor_chunks_[i]. + // Last 9 bits: offset within chunk, expressed as multiple of kMonitorAlignment. + // If we set kInitialChunkStorage to 512, this would allow us to use roughly 128K chunks of + // monitors, which is 0.5GB of monitors. With this maximum setting, the largest chunk list + // contains 64K entries, and we make full use of the available index space. With a + // kInitialChunkStorage value of 256, this is proportionately reduced to 0.25GB of monitors. + // Updates to monitor_chunks_ are guarded by allocated_monitor_ids_lock_ . + // No field in this entire data structure is ever updated once a monitor id whose lookup + // requires it has been made visible to another thread. Thus readers never race with + // updates, in spite of the fact that they acquire no locks. + uintptr_t* monitor_chunks_[kMaxChunkLists]; // uintptr_t is really a Monitor* . + // Highest currently used index in monitor_chunks_ . Used for newly allocated chunks. + size_t current_chunk_list_index_ GUARDED_BY(Locks::allocated_monitor_ids_lock_); + // Number of chunk pointers stored in monitor_chunks_[current_chunk_list_index_] so far. size_t num_chunks_ GUARDED_BY(Locks::allocated_monitor_ids_lock_); - // Number of chunks storable. - size_t capacity_ GUARDED_BY(Locks::allocated_monitor_ids_lock_); - - // To avoid race issues when resizing, we keep all the previous arrays. - std::vector> old_chunk_arrays_ - GUARDED_BY(Locks::allocated_monitor_ids_lock_); + // After the initial allocation, this is always equal to + // ChunkListCapacity(current_chunk_list_index_). + size_t current_chunk_list_capacity_ GUARDED_BY(Locks::allocated_monitor_ids_lock_); typedef TrackingAllocator Allocator; Allocator allocator_; diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc index 0abe39d87..0126b4d0a 100644 --- a/runtime/native/dalvik_system_DexFile.cc +++ b/runtime/native/dalvik_system_DexFile.cc @@ -16,6 +16,8 @@ #include "dalvik_system_DexFile.h" +#include + #include "base/logging.h" #include "base/stl_util.h" #include "base/stringprintf.h" @@ -27,6 +29,7 @@ #include "mirror/class_loader.h" #include "mirror/object-inl.h" #include "mirror/string.h" +#include "oat_file.h" #include "oat_file_assistant.h" #include "oat_file_manager.h" #include "os.h" @@ -387,6 +390,61 @@ static jint GetDexOptNeeded(JNIEnv* env, return oat_file_assistant.GetDexOptNeeded(filter); } +static jstring DexFile_getDexFileStatus(JNIEnv* env, + jclass, + jstring javaFilename, + jstring javaInstructionSet) { + ScopedUtfChars filename(env, javaFilename); + if (env->ExceptionCheck()) { + return nullptr; + } + + ScopedUtfChars instruction_set(env, javaInstructionSet); + if (env->ExceptionCheck()) { + return nullptr; + } + + const InstructionSet target_instruction_set = GetInstructionSetFromString( + instruction_set.c_str()); + if (target_instruction_set == kNone) { + ScopedLocalRef iae(env, env->FindClass("java/lang/IllegalArgumentException")); + std::string message(StringPrintf("Instruction set %s is invalid.", instruction_set.c_str())); + env->ThrowNew(iae.get(), message.c_str()); + return nullptr; + } + + OatFileAssistant oat_file_assistant(filename.c_str(), target_instruction_set, + false /* profile_changed */, + false /* load_executable */); + + std::ostringstream status; + bool oat_file_exists = false; + bool odex_file_exists = false; + if (oat_file_assistant.OatFileExists()) { + oat_file_exists = true; + status << *oat_file_assistant.OatFileName() << " [compilation_filter="; + status << CompilerFilter::NameOfFilter(oat_file_assistant.OatFileCompilerFilter()); + status << ", status=" << oat_file_assistant.OatFileStatus(); + } + + if (oat_file_assistant.OdexFileExists()) { + odex_file_exists = true; + if (oat_file_exists) { + status << "] "; + } + status << *oat_file_assistant.OdexFileName() << " [compilation_filter="; + status << CompilerFilter::NameOfFilter(oat_file_assistant.OdexFileCompilerFilter()); + status << ", status=" << oat_file_assistant.OdexFileStatus(); + } + + if (!oat_file_exists && !odex_file_exists) { + status << "invalid["; + } + + status << "]"; + return env->NewStringUTF(status.str().c_str()); +} + static jint DexFile_getDexOptNeeded(JNIEnv* env, jclass, jstring javaFilename, @@ -481,6 +539,16 @@ static jstring DexFile_getNonProfileGuidedCompilerFilter(JNIEnv* env, return env->NewStringUTF(new_filter_str.c_str()); } +static jboolean DexFile_isBackedByOatFile(JNIEnv* env, jclass, jobject cookie) { + const OatFile* oat_file = nullptr; + std::vector dex_files; + if (!ConvertJavaArrayToDexFiles(env, cookie, /*out */ dex_files, /* out */ oat_file)) { + DCHECK(env->ExceptionCheck()); + return false; + } + return oat_file != nullptr; +} + static JNINativeMethod gMethods[] = { NATIVE_METHOD(DexFile, closeDexFile, "(Ljava/lang/Object;)Z"), NATIVE_METHOD(DexFile, @@ -506,6 +574,9 @@ static JNINativeMethod gMethods[] = { NATIVE_METHOD(DexFile, getNonProfileGuidedCompilerFilter, "(Ljava/lang/String;)Ljava/lang/String;"), + NATIVE_METHOD(DexFile, isBackedByOatFile, "(Ljava/lang/Object;)Z"), + NATIVE_METHOD(DexFile, getDexFileStatus, + "(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;") }; void register_dalvik_system_DexFile(JNIEnv* env) { diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc index f355c2a94..6c943dc17 100644 --- a/runtime/native/dalvik_system_VMRuntime.cc +++ b/runtime/native/dalvik_system_VMRuntime.cc @@ -145,6 +145,10 @@ static jboolean VMRuntime_isDebuggerActive(JNIEnv*, jobject) { return Dbg::IsDebuggerActive(); } +static jboolean VMRuntime_isNativeDebuggable(JNIEnv*, jobject) { + return Runtime::Current()->IsNativeDebuggable(); +} + static jobjectArray VMRuntime_properties(JNIEnv* env, jobject) { return toStringArray(env, Runtime::Current()->GetProperties()); } @@ -212,6 +216,10 @@ static void VMRuntime_registerNativeAllocation(JNIEnv* env, jobject, jint bytes) Runtime::Current()->GetHeap()->RegisterNativeAllocation(env, static_cast(bytes)); } +static void VMRuntime_registerSensitiveThread(JNIEnv*, jobject) { + Runtime::Current()->RegisterSensitiveThread(); +} + static void VMRuntime_registerNativeFree(JNIEnv* env, jobject, jint bytes) { if (UNLIKELY(bytes < 0)) { ScopedObjectAccess soa(env); @@ -637,12 +645,14 @@ static JNINativeMethod gMethods[] = { NATIVE_METHOD(VMRuntime, disableJitCompilation, "()V"), NATIVE_METHOD(VMRuntime, getTargetHeapUtilization, "()F"), NATIVE_METHOD(VMRuntime, isDebuggerActive, "!()Z"), + NATIVE_METHOD(VMRuntime, isNativeDebuggable, "!()Z"), NATIVE_METHOD(VMRuntime, nativeSetTargetHeapUtilization, "(F)V"), NATIVE_METHOD(VMRuntime, newNonMovableArray, "!(Ljava/lang/Class;I)Ljava/lang/Object;"), NATIVE_METHOD(VMRuntime, newUnpaddedArray, "!(Ljava/lang/Class;I)Ljava/lang/Object;"), NATIVE_METHOD(VMRuntime, properties, "()[Ljava/lang/String;"), NATIVE_METHOD(VMRuntime, setTargetSdkVersionNative, "(I)V"), NATIVE_METHOD(VMRuntime, registerNativeAllocation, "(I)V"), + NATIVE_METHOD(VMRuntime, registerSensitiveThread, "()V"), NATIVE_METHOD(VMRuntime, registerNativeFree, "(I)V"), NATIVE_METHOD(VMRuntime, requestConcurrentGC, "()V"), NATIVE_METHOD(VMRuntime, requestHeapTrim, "()V"), diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc index 6b7ca40be..0624da38c 100644 --- a/runtime/native/java_lang_Class.cc +++ b/runtime/native/java_lang_Class.cc @@ -238,12 +238,13 @@ static mirror::Field* GetPublicFieldRecursive( DCHECK(name != nullptr); DCHECK(self != nullptr); - StackHandleScope<1> hs(self); + StackHandleScope<2> hs(self); MutableHandle h_clazz(hs.NewHandle(clazz)); + Handle h_name(hs.NewHandle(name)); // We search the current class, its direct interfaces then its superclass. while (h_clazz.Get() != nullptr) { - mirror::Field* result = GetDeclaredField(self, h_clazz.Get(), name); + mirror::Field* result = GetDeclaredField(self, h_clazz.Get(), h_name.Get()); if ((result != nullptr) && (result->GetAccessFlags() & kAccPublic)) { return result; } else if (UNLIKELY(self->IsExceptionPending())) { @@ -258,7 +259,7 @@ static mirror::Field* GetPublicFieldRecursive( self->AssertPendingException(); return nullptr; } - result = GetPublicFieldRecursive(self, iface, name); + result = GetPublicFieldRecursive(self, iface, h_name.Get()); if (result != nullptr) { DCHECK(result->GetAccessFlags() & kAccPublic); return result; diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc index ddcaadefa..54b8afd1f 100644 --- a/runtime/native/java_lang_reflect_Constructor.cc +++ b/runtime/native/java_lang_reflect_Constructor.cc @@ -34,20 +34,38 @@ static jobject Constructor_getAnnotationNative(JNIEnv* env, jobject javaMethod, ScopedFastNativeObjectAccess soa(env); StackHandleScope<1> hs(soa.Self()); ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod); - Handle klass(hs.NewHandle(soa.Decode(annotationType))); - return soa.AddLocalReference( - method->GetDexFile()->GetAnnotationForMethod(method, klass)); + if (method->IsProxyMethod()) { + return nullptr; + } else { + Handle klass(hs.NewHandle(soa.Decode(annotationType))); + return soa.AddLocalReference( + method->GetDexFile()->GetAnnotationForMethod(method, klass)); + } } static jobjectArray Constructor_getDeclaredAnnotations(JNIEnv* env, jobject javaMethod) { ScopedFastNativeObjectAccess soa(env); ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod); - return soa.AddLocalReference(method->GetDexFile()->GetAnnotationsForMethod(method)); + if (method->IsProxyMethod()) { + mirror::Class* class_class = mirror::Class::GetJavaLangClass(); + mirror::Class* class_array_class = + Runtime::Current()->GetClassLinker()->FindArrayClass(soa.Self(), &class_class); + if (class_array_class == nullptr) { + return nullptr; + } + mirror::ObjectArray* empty_array = + mirror::ObjectArray::Alloc(soa.Self(), class_array_class, 0); + return soa.AddLocalReference(empty_array); + } else { + return soa.AddLocalReference( + method->GetDexFile()->GetAnnotationsForMethod(method)); + } } static jobjectArray Constructor_getExceptionTypes(JNIEnv* env, jobject javaMethod) { ScopedFastNativeObjectAccess soa(env); - ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod); + ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod) + ->GetInterfaceMethodIfProxy(sizeof(void*)); mirror::ObjectArray* result_array = method->GetDexFile()->GetExceptionTypesForMethod(method); if (result_array == nullptr) { @@ -69,7 +87,12 @@ static jobjectArray Constructor_getExceptionTypes(JNIEnv* env, jobject javaMetho static jobjectArray Constructor_getParameterAnnotationsNative(JNIEnv* env, jobject javaMethod) { ScopedFastNativeObjectAccess soa(env); ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod); - return soa.AddLocalReference(method->GetDexFile()->GetParameterAnnotations(method)); + if (method->IsProxyMethod()) { + return nullptr; + } else { + return soa.AddLocalReference( + method->GetDexFile()->GetParameterAnnotations(method)); + } } static jboolean Constructor_isAnnotationPresentNative(JNIEnv* env, jobject javaMethod, @@ -77,6 +100,10 @@ static jboolean Constructor_isAnnotationPresentNative(JNIEnv* env, jobject javaM ScopedFastNativeObjectAccess soa(env); StackHandleScope<1> hs(soa.Self()); ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod); + if (method->IsProxyMethod()) { + // Proxies have no annotations. + return false; + } Handle klass(hs.NewHandle(soa.Decode(annotationType))); return method->GetDexFile()->IsMethodAnnotationPresent(method, klass); } diff --git a/runtime/oat.h b/runtime/oat.h index 543d99f2a..57675dc73 100644 --- a/runtime/oat.h +++ b/runtime/oat.h @@ -43,7 +43,7 @@ class PACKED(4) OatHeader { static constexpr const char* kNativeDebuggableKey = "native-debuggable"; static constexpr const char* kCompilerFilter = "compiler-filter"; static constexpr const char* kClassPathKey = "classpath"; - static constexpr const char* kBootClassPath = "bootclasspath"; + static constexpr const char* kBootClassPathKey = "bootclasspath"; static constexpr const char kTrueValue[] = "true"; static constexpr const char kFalseValue[] = "false"; diff --git a/runtime/oat_file.h b/runtime/oat_file.h index 11a9d76da..aa727ff45 100644 --- a/runtime/oat_file.h +++ b/runtime/oat_file.h @@ -48,6 +48,9 @@ class DummyOatFile; class OatFile { public: + // Special classpath that skips shared library check. + static constexpr const char* kSpecialSharedLibrary = "&"; + typedef art::OatDexFile OatDexFile; // Opens an oat file contained within the given elf file. This is always opened as @@ -370,6 +373,10 @@ class OatDexFile FINAL { return lookup_table_data_; } + const uint8_t* GetDexFilePointer() const { + return dex_file_pointer_; + } + ~OatDexFile(); private: diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc index 3f95772b4..fba10ca01 100644 --- a/runtime/oat_file_assistant.cc +++ b/runtime/oat_file_assistant.cc @@ -30,6 +30,7 @@ #include "base/logging.h" #include "base/stringprintf.h" +#include "compiler_filter.h" #include "class_linker.h" #include "gc/heap.h" #include "gc/space/image_space.h" @@ -43,6 +44,24 @@ namespace art { +std::ostream& operator << (std::ostream& stream, const OatFileAssistant::OatStatus status) { + switch (status) { + case OatFileAssistant::kOatOutOfDate: + stream << "kOatOutOfDate"; + break; + case OatFileAssistant::kOatUpToDate: + stream << "kOatUpToDate"; + break; + case OatFileAssistant::kOatNeedsRelocation: + stream << "kOatNeedsRelocation"; + break; + default: + UNREACHABLE(); + } + + return stream; +} + OatFileAssistant::OatFileAssistant(const char* dex_location, const InstructionSet isa, bool profile_changed, @@ -179,11 +198,38 @@ OatFileAssistant::DexOptNeeded OatFileAssistant::GetDexOptNeeded(CompilerFilter: return HasOriginalDexFiles() ? kDex2OatNeeded : kNoDexOptNeeded; } +// Figure out the currently specified compile filter option in the runtime. +// Returns true on success, false if the compiler filter is invalid, in which +// case error_msg describes the problem. +static bool GetRuntimeCompilerFilterOption(CompilerFilter::Filter* filter, + std::string* error_msg) { + CHECK(filter != nullptr); + CHECK(error_msg != nullptr); + + *filter = CompilerFilter::kDefaultCompilerFilter; + for (StringPiece option : Runtime::Current()->GetCompilerOptions()) { + if (option.starts_with("--compiler-filter=")) { + const char* compiler_filter_string = option.substr(strlen("--compiler-filter=")).data(); + if (!CompilerFilter::ParseCompilerFilter(compiler_filter_string, filter)) { + *error_msg = std::string("Unknown --compiler-filter value: ") + + std::string(compiler_filter_string); + return false; + } + } + } + return true; +} + OatFileAssistant::ResultOfAttemptToUpdate -OatFileAssistant::MakeUpToDate(CompilerFilter::Filter target, std::string* error_msg) { +OatFileAssistant::MakeUpToDate(std::string* error_msg) { + CompilerFilter::Filter target; + if (!GetRuntimeCompilerFilterOption(&target, error_msg)) { + return kUpdateNotAttempted; + } + switch (GetDexOptNeeded(target)) { case kNoDexOptNeeded: return kUpdateSucceeded; - case kDex2OatNeeded: return GenerateOatFile(target, error_msg); + case kDex2OatNeeded: return GenerateOatFile(error_msg); case kPatchOatNeeded: return RelocateOatFile(OdexFileName(), error_msg); case kSelfPatchOatNeeded: return RelocateOatFile(OatFileName(), error_msg); } @@ -350,6 +396,12 @@ bool OatFileAssistant::OdexFileIsUpToDate() { return cached_odex_file_is_up_to_date_; } +CompilerFilter::Filter OatFileAssistant::OdexFileCompilerFilter() { + const OatFile* odex_file = GetOdexFile(); + CHECK(odex_file != nullptr); + + return odex_file->GetCompilerFilter(); +} std::string OatFileAssistant::ArtFileName(const OatFile* oat_file) const { const std::string oat_file_location = oat_file->GetLocation(); // Replace extension with .art @@ -428,6 +480,13 @@ bool OatFileAssistant::OatFileIsUpToDate() { return cached_oat_file_is_up_to_date_; } +CompilerFilter::Filter OatFileAssistant::OatFileCompilerFilter() { + const OatFile* oat_file = GetOatFile(); + CHECK(oat_file != nullptr); + + return oat_file->GetCompilerFilter(); +} + OatFileAssistant::OatStatus OatFileAssistant::GivenOatFileStatus(const OatFile& file) { // TODO: This could cause GivenOatFileIsOutOfDate to be called twice, which // is more work than we need to do. If performance becomes a concern, and @@ -634,7 +693,7 @@ OatFileAssistant::RelocateOatFile(const std::string* input_file, std::string* er } OatFileAssistant::ResultOfAttemptToUpdate -OatFileAssistant::GenerateOatFile(CompilerFilter::Filter target, std::string* error_msg) { +OatFileAssistant::GenerateOatFile(std::string* error_msg) { CHECK(error_msg != nullptr); Runtime* runtime = Runtime::Current(); @@ -678,7 +737,6 @@ OatFileAssistant::GenerateOatFile(CompilerFilter::Filter target, std::string* er args.push_back("--dex-file=" + dex_location_); args.push_back("--oat-fd=" + std::to_string(oat_file->Fd())); args.push_back("--oat-location=" + oat_file_name); - args.push_back("--compiler-filter=" + CompilerFilter::NameOfFilter(target)); if (!Dex2Oat(args, error_msg)) { // Manually delete the file. This ensures there is no garbage left over if @@ -713,7 +771,11 @@ bool OatFileAssistant::Dex2Oat(const std::vector& args, argv.push_back("--runtime-arg"); argv.push_back("-classpath"); argv.push_back("--runtime-arg"); - argv.push_back(runtime->GetClassPathString()); + std::string class_path = runtime->GetClassPathString(); + if (class_path == "") { + class_path = OatFile::kSpecialSharedLibrary; + } + argv.push_back(class_path); if (runtime->IsDebuggable()) { argv.push_back("--debuggable"); } diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h index d3228deac..f48cdf343 100644 --- a/runtime/oat_file_assistant.h +++ b/runtime/oat_file_assistant.h @@ -19,6 +19,7 @@ #include #include +#include #include #include "arch/instruction_set.h" @@ -159,15 +160,12 @@ class OatFileAssistant { }; // Attempts to generate or relocate the oat file as needed to make it up to - // date with in a way that is at least as good as an oat file generated with - // the given compiler filter. - // Returns the result of attempting to update the code. + // date based on the current runtime and compiler options. // // If the result is not kUpdateSucceeded, the value of error_msg will be set // to a string describing why there was a failure or the update was not // attempted. error_msg must not be null. - ResultOfAttemptToUpdate MakeUpToDate(CompilerFilter::Filter target_compiler_filter, - std::string* error_msg); + ResultOfAttemptToUpdate MakeUpToDate(std::string* error_msg); // Returns an oat file that can be used for loading dex files. // Returns null if no suitable oat file was found. @@ -214,6 +212,9 @@ class OatFileAssistant { bool OdexFileIsOutOfDate(); bool OdexFileNeedsRelocation(); bool OdexFileIsUpToDate(); + // Must only be called if the associated odex file exists, i.e, if + // |OdexFileExists() == true|. + CompilerFilter::Filter OdexFileCompilerFilter(); // When the dex files is compiled on the target device, the oat file is the // result. The oat file will have been relocated to some @@ -230,6 +231,9 @@ class OatFileAssistant { bool OatFileIsOutOfDate(); bool OatFileNeedsRelocation(); bool OatFileIsUpToDate(); + // Must only be called if the associated oat file exists, i.e, if + // |OatFileExists() == true|. + CompilerFilter::Filter OatFileCompilerFilter(); // Return image file name. Does not cache since it relies on the oat file. std::string ArtFileName(const OatFile* oat_file) const; @@ -250,14 +254,15 @@ class OatFileAssistant { // attempted. error_msg must not be null. ResultOfAttemptToUpdate RelocateOatFile(const std::string* input_file, std::string* error_msg); - // Generate the oat file from the dex file using the given compiler filter. + // Generate the oat file from the dex file using the current runtime + // compiler options. // This does not check the current status before attempting to generate the // oat file. // // If the result is not kUpdateSucceeded, the value of error_msg will be set // to a string describing why there was a failure or the update was not // attempted. error_msg must not be null. - ResultOfAttemptToUpdate GenerateOatFile(CompilerFilter::Filter filter, std::string* error_msg); + ResultOfAttemptToUpdate GenerateOatFile(std::string* error_msg); // Executes dex2oat using the current runtime configuration overridden with // the given arguments. This does not check to see if dex2oat is enabled in @@ -438,6 +443,8 @@ class OatFileAssistant { DISALLOW_COPY_AND_ASSIGN(OatFileAssistant); }; +std::ostream& operator << (std::ostream& stream, const OatFileAssistant::OatStatus status); + } // namespace art #endif // ART_RUNTIME_OAT_FILE_ASSISTANT_H_ diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc index f50d1cb74..15a1aa4d1 100644 --- a/runtime/oat_file_assistant_test.cc +++ b/runtime/oat_file_assistant_test.cc @@ -453,8 +453,7 @@ TEST_F(OatFileAssistantTest, NoDexNoOat) { // Trying to make the oat file up to date should not fail or crash. std::string error_msg; - EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, - oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg)); + EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, oat_file_assistant.MakeUpToDate(&error_msg)); // Trying to get the best oat file should fail, but not crash. std::unique_ptr oat_file = oat_file_assistant.GetBestOatFile(); @@ -705,8 +704,9 @@ TEST_F(OatFileAssistantTest, StrippedDexOdexNoOat) { // Make the oat file up to date. std::string error_msg; + Runtime::Current()->AddCompilerOption("--compiler-filter=speed"); ASSERT_EQ(OatFileAssistant::kUpdateSucceeded, - oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg)) << error_msg; + oat_file_assistant.MakeUpToDate(&error_msg)) << error_msg; EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded, oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed)); @@ -768,8 +768,9 @@ TEST_F(OatFileAssistantTest, StrippedDexOdexOat) { // Make the oat file up to date. std::string error_msg; + Runtime::Current()->AddCompilerOption("--compiler-filter=speed"); ASSERT_EQ(OatFileAssistant::kUpdateSucceeded, - oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg)) << error_msg; + oat_file_assistant.MakeUpToDate(&error_msg)) << error_msg; EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded, oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed)); @@ -825,8 +826,9 @@ TEST_F(OatFileAssistantTest, ResourceOnlyDex) { // Make the oat file up to date. This should have no effect. std::string error_msg; + Runtime::Current()->AddCompilerOption("--compiler-filter=speed"); EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, - oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg)) << error_msg; + oat_file_assistant.MakeUpToDate(&error_msg)) << error_msg; EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded, oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed)); @@ -876,8 +878,9 @@ TEST_F(OatFileAssistantTest, SelfRelocation) { // Make the oat file up to date. std::string error_msg; + Runtime::Current()->AddCompilerOption("--compiler-filter=speed"); ASSERT_EQ(OatFileAssistant::kUpdateSucceeded, - oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg)) << error_msg; + oat_file_assistant.MakeUpToDate(&error_msg)) << error_msg; EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded, oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed)); @@ -920,8 +923,9 @@ TEST_F(OatFileAssistantTest, NoSelfRelocation) { // Make the oat file up to date. std::string error_msg; + Runtime::Current()->AddCompilerOption("--compiler-filter=speed"); ASSERT_EQ(OatFileAssistant::kUpdateSucceeded, - oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg)) << error_msg; + oat_file_assistant.MakeUpToDate(&error_msg)) << error_msg; EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded, oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed)); @@ -1100,8 +1104,9 @@ TEST_F(OatFileAssistantTest, LoadDexNoAlternateOat) { OatFileAssistant oat_file_assistant( dex_location.c_str(), oat_location.c_str(), kRuntimeISA, false, true); std::string error_msg; + Runtime::Current()->AddCompilerOption("--compiler-filter=speed"); ASSERT_EQ(OatFileAssistant::kUpdateSucceeded, - oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg)) << error_msg; + oat_file_assistant.MakeUpToDate(&error_msg)) << error_msg; std::unique_ptr oat_file = oat_file_assistant.GetBestOatFile(); ASSERT_TRUE(oat_file.get() != nullptr); @@ -1131,8 +1136,9 @@ TEST_F(OatFileAssistantTest, LoadDexUnwriteableAlternateOat) { OatFileAssistant oat_file_assistant( dex_location.c_str(), oat_location.c_str(), kRuntimeISA, false, true); std::string error_msg; + Runtime::Current()->AddCompilerOption("--compiler-filter=speed"); ASSERT_EQ(OatFileAssistant::kUpdateNotAttempted, - oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg)); + oat_file_assistant.MakeUpToDate(&error_msg)); std::unique_ptr oat_file = oat_file_assistant.GetBestOatFile(); ASSERT_TRUE(oat_file.get() == nullptr); @@ -1147,8 +1153,9 @@ TEST_F(OatFileAssistantTest, GenNoDex) { OatFileAssistant oat_file_assistant( dex_location.c_str(), oat_location.c_str(), kRuntimeISA, false, true); std::string error_msg; + Runtime::Current()->AddCompilerOption("--compiler-filter=speed"); EXPECT_EQ(OatFileAssistant::kUpdateNotAttempted, - oat_file_assistant.GenerateOatFile(CompilerFilter::kSpeed, &error_msg)); + oat_file_assistant.GenerateOatFile(&error_msg)); } // Turn an absolute path into a path relative to the current working @@ -1227,8 +1234,9 @@ TEST_F(OatFileAssistantTest, ShortDexLocation) { // Trying to make it up to date should have no effect. std::string error_msg; + Runtime::Current()->AddCompilerOption("--compiler-filter=speed"); EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, - oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg)); + oat_file_assistant.MakeUpToDate(&error_msg)); EXPECT_TRUE(error_msg.empty()); } @@ -1256,8 +1264,7 @@ TEST_F(OatFileAssistantTest, LongDexExtension) { class RaceGenerateTask : public Task { public: explicit RaceGenerateTask(const std::string& dex_location, const std::string& oat_location) - : dex_location_(dex_location), oat_location_(oat_location), - loaded_oat_file_(nullptr) + : dex_location_(dex_location), oat_location_(oat_location), loaded_oat_file_(nullptr) {} void Run(Thread* self ATTRIBUTE_UNUSED) { @@ -1368,6 +1375,34 @@ TEST_F(OatFileAssistantNoDex2OatTest, LoadMultiDexOdexNoOat) { EXPECT_EQ(2u, dex_files.size()); } +TEST_F(OatFileAssistantTest, RuntimeCompilerFilterOptionUsed) { + std::string dex_location = GetScratchDir() + "/RuntimeCompilerFilterOptionUsed.jar"; + Copy(GetDexSrc1(), dex_location); + + OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false, false); + + std::string error_msg; + Runtime::Current()->AddCompilerOption("--compiler-filter=interpret-only"); + EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, + oat_file_assistant.MakeUpToDate(&error_msg)) << error_msg; + EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded, + oat_file_assistant.GetDexOptNeeded(CompilerFilter::kInterpretOnly)); + EXPECT_EQ(OatFileAssistant::kDex2OatNeeded, + oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed)); + + Runtime::Current()->AddCompilerOption("--compiler-filter=speed"); + EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, + oat_file_assistant.MakeUpToDate(&error_msg)) << error_msg; + EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded, + oat_file_assistant.GetDexOptNeeded(CompilerFilter::kInterpretOnly)); + EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded, + oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed)); + + Runtime::Current()->AddCompilerOption("--compiler-filter=bogus"); + EXPECT_EQ(OatFileAssistant::kUpdateNotAttempted, + oat_file_assistant.MakeUpToDate(&error_msg)); +} + TEST(OatFileAssistantUtilsTest, DexFilenameToOdexFilename) { std::string error_msg; std::string odex_file; diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc index 9ab0072ea..0af6716af 100644 --- a/runtime/oat_file_manager.cc +++ b/runtime/oat_file_manager.cc @@ -36,16 +36,9 @@ namespace art { -// For b/21333911. -// Only enabled for debug builds to prevent bit rot. There are too many performance regressions for -// normal builds. -static constexpr bool kDuplicateClassesCheck = kIsDebugBuild; - // If true, then we attempt to load the application image if it exists. static constexpr bool kEnableAppImage = true; -CompilerFilter::Filter OatFileManager::filter_ = CompilerFilter::Filter::kSpeed; - const OatFile* OatFileManager::RegisterOatFile(std::unique_ptr oat_file) { WriterMutexLock mu(Thread::Current(), *Locks::oat_file_manager_lock_); DCHECK(oat_file != nullptr); @@ -175,7 +168,7 @@ class DexFileAndClassPair : ValueObject { void Next() { ++current_class_index_; - cached_descriptor_ = GetClassDescriptor(dex_file_.get(), current_class_index_); + cached_descriptor_ = GetClassDescriptor(dex_file_, current_class_index_); } size_t GetCurrentClassIndex() const { @@ -187,7 +180,12 @@ class DexFileAndClassPair : ValueObject { } const DexFile* GetDexFile() const { - return dex_file_.get(); + return dex_file_; + } + + void DeleteDexFile() { + delete dex_file_; + dex_file_ = nullptr; } private: @@ -198,7 +196,7 @@ class DexFileAndClassPair : ValueObject { } const char* cached_descriptor_; - std::shared_ptr dex_file_; + const DexFile* dex_file_; size_t current_class_index_; bool from_loaded_oat_; // We only need to compare mismatches between what we load now // and what was loaded before. Any old duplicates must have been @@ -221,53 +219,299 @@ static void AddDexFilesFromOat(const OatFile* oat_file, } static void AddNext(/*inout*/DexFileAndClassPair* original, - /*inout*/std::priority_queue* heap) { + /*inout*/std::priority_queue* heap, + bool owning_dex_files) { if (original->DexFileHasMoreClasses()) { original->Next(); heap->push(std::move(*original)); + } else if (owning_dex_files) { + original->DeleteDexFile(); + } +} + +static void FreeDexFilesInHeap(std::priority_queue* heap, + bool owning_dex_files) { + if (owning_dex_files) { + while (!heap->empty()) { + delete heap->top().GetDexFile(); + heap->pop(); + } + } +} + +static void IterateOverJavaDexFile(mirror::Object* dex_file, + ArtField* const cookie_field, + std::function fn) + SHARED_REQUIRES(Locks::mutator_lock_) { + if (dex_file != nullptr) { + mirror::LongArray* long_array = cookie_field->GetObject(dex_file)->AsLongArray(); + if (long_array == nullptr) { + // This should never happen so log a warning. + LOG(WARNING) << "Null DexFile::mCookie"; + return; + } + int32_t long_array_size = long_array->GetLength(); + // Start from 1 to skip the oat file. + for (int32_t j = 1; j < long_array_size; ++j) { + const DexFile* cp_dex_file = reinterpret_cast(static_cast( + long_array->GetWithoutChecks(j))); + if (!fn(cp_dex_file)) { + return; + } + } + } +} + +static void IterateOverPathClassLoader( + ScopedObjectAccessAlreadyRunnable& soa, + Handle class_loader, + MutableHandle> dex_elements, + std::function fn) SHARED_REQUIRES(Locks::mutator_lock_) { + // Handle this step. + // Handle as if this is the child PathClassLoader. + // The class loader is a PathClassLoader which inherits from BaseDexClassLoader. + // We need to get the DexPathList and loop through it. + ArtField* const cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie); + ArtField* const dex_file_field = + soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile); + mirror::Object* dex_path_list = + soa.DecodeField(WellKnownClasses::dalvik_system_PathClassLoader_pathList)-> + GetObject(class_loader.Get()); + if (dex_path_list != nullptr && dex_file_field != nullptr && cookie_field != nullptr) { + // DexPathList has an array dexElements of Elements[] which each contain a dex file. + mirror::Object* dex_elements_obj = + soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements)-> + GetObject(dex_path_list); + // Loop through each dalvik.system.DexPathList$Element's dalvik.system.DexFile and look + // at the mCookie which is a DexFile vector. + if (dex_elements_obj != nullptr) { + dex_elements.Assign(dex_elements_obj->AsObjectArray()); + for (int32_t i = 0; i < dex_elements->GetLength(); ++i) { + mirror::Object* element = dex_elements->GetWithoutChecks(i); + if (element == nullptr) { + // Should never happen, fall back to java code to throw a NPE. + break; + } + mirror::Object* dex_file = dex_file_field->GetObject(element); + IterateOverJavaDexFile(dex_file, cookie_field, fn); + } + } + } +} + +static bool GetDexFilesFromClassLoader( + ScopedObjectAccessAlreadyRunnable& soa, + mirror::ClassLoader* class_loader, + std::priority_queue* queue) SHARED_REQUIRES(Locks::mutator_lock_) { + if (ClassLinker::IsBootClassLoader(soa, class_loader)) { + // The boot class loader. We don't load any of these files, as we know we compiled against + // them correctly. + return true; + } + + // Unsupported class-loader? + if (class_loader->GetClass() != + soa.Decode(WellKnownClasses::dalvik_system_PathClassLoader)) { + VLOG(class_linker) << "Unsupported class-loader " << PrettyClass(class_loader->GetClass()); + return false; + } + + bool recursive_result = GetDexFilesFromClassLoader(soa, class_loader->GetParent(), queue); + if (!recursive_result) { + // Something wrong up the chain. + return false; + } + + // Collect all the dex files. + auto GetDexFilesFn = [&] (const DexFile* cp_dex_file) + SHARED_REQUIRES(Locks::mutator_lock_) { + if (cp_dex_file->NumClassDefs() > 0) { + queue->emplace(cp_dex_file, 0U, true); + } + return true; // Continue looking. + }; + + // Handle for dex-cache-element. + StackHandleScope<3> hs(soa.Self()); + MutableHandle> dex_elements( + hs.NewHandle>(nullptr)); + Handle h_class_loader(hs.NewHandle(class_loader)); + + IterateOverPathClassLoader(soa, h_class_loader, dex_elements, GetDexFilesFn); + + return true; +} + +static void GetDexFilesFromDexElementsArray( + ScopedObjectAccessAlreadyRunnable& soa, + Handle> dex_elements, + std::priority_queue* queue) SHARED_REQUIRES(Locks::mutator_lock_) { + if (dex_elements.Get() == nullptr) { + // Nothing to do. + return; + } + + ArtField* const cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie); + ArtField* const dex_file_field = + soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile); + const mirror::Class* const element_class = soa.Decode( + WellKnownClasses::dalvik_system_DexPathList__Element); + const mirror::Class* const dexfile_class = soa.Decode( + WellKnownClasses::dalvik_system_DexFile); + + // Collect all the dex files. + auto GetDexFilesFn = [&] (const DexFile* cp_dex_file) + SHARED_REQUIRES(Locks::mutator_lock_) { + if (cp_dex_file != nullptr && cp_dex_file->NumClassDefs() > 0) { + queue->emplace(cp_dex_file, 0U, true); + } + return true; // Continue looking. + }; + + for (int32_t i = 0; i < dex_elements->GetLength(); ++i) { + mirror::Object* element = dex_elements->GetWithoutChecks(i); + if (element == nullptr) { + continue; + } + + // We support this being dalvik.system.DexPathList$Element and dalvik.system.DexFile. + + mirror::Object* dex_file; + if (element->GetClass() == element_class) { + dex_file = dex_file_field->GetObject(element); + } else if (element->GetClass() == dexfile_class) { + dex_file = element; + } else { + LOG(WARNING) << "Unsupported element in dex_elements: " << PrettyClass(element->GetClass()); + continue; + } + + IterateOverJavaDexFile(dex_file, cookie_field, GetDexFilesFn); + } +} + +static bool AreSharedLibrariesOk(const std::string shared_libraries, + std::priority_queue& queue) { + if (shared_libraries.empty()) { + if (queue.empty()) { + // No shared libraries or oat files, as expected. + return true; + } + } else { + if (shared_libraries.compare(OatFile::kSpecialSharedLibrary) == 0) { + // If we find the special shared library, skip the shared libraries check. + return true; + } + // Shared libraries is a series of dex file paths and their checksums, each separated by '*'. + std::vector shared_libraries_split; + Split(shared_libraries, '*', &shared_libraries_split); + + size_t index = 0; + std::priority_queue temp = queue; + while (!temp.empty() && index < shared_libraries_split.size() - 1) { + DexFileAndClassPair pair(temp.top()); + const DexFile* dex_file = pair.GetDexFile(); + std::string dex_filename(dex_file->GetLocation()); + uint32_t dex_checksum = dex_file->GetLocationChecksum(); + if (dex_filename != shared_libraries_split[index] || + dex_checksum != std::stoul(shared_libraries_split[index + 1])) { + break; + } + temp.pop(); + index += 2; + } + + // Check is successful if it made it through the queue and all the shared libraries. + return temp.empty() && index == shared_libraries_split.size(); } + return false; } // Check for class-def collisions in dex files. // -// This works by maintaining a heap with one class from each dex file, sorted by the class -// descriptor. Then a dex-file/class pair is continually removed from the heap and compared +// This first walks the class loader chain, getting all the dex files from the class loader. If +// the class loader is null or one of the class loaders in the chain is unsupported, we collect +// dex files from all open non-boot oat files to be safe. +// +// This first checks whether the shared libraries are in the expected order and the oat files +// have the expected checksums. If so, we exit early. Otherwise, we do the collision check. +// +// The collision check works by maintaining a heap with one class from each dex file, sorted by the +// class descriptor. Then a dex-file/class pair is continually removed from the heap and compared // against the following top element. If the descriptor is the same, it is now checked whether // the two elements agree on whether their dex file was from an already-loaded oat-file or the // new oat file. Any disagreement indicates a collision. bool OatFileManager::HasCollisions(const OatFile* oat_file, + jobject class_loader, + jobjectArray dex_elements, std::string* error_msg /*out*/) const { DCHECK(oat_file != nullptr); DCHECK(error_msg != nullptr); - if (!kDuplicateClassesCheck) { - return false; + + std::priority_queue queue; + bool owning_dex_files = false; + + // Try to get dex files from the given class loader. If the class loader is null, or we do + // not support one of the class loaders in the chain, conservatively compare against all + // (non-boot) oat files. + bool class_loader_ok = false; + { + ScopedObjectAccess soa(Thread::Current()); + StackHandleScope<2> hs(Thread::Current()); + Handle h_class_loader = + hs.NewHandle(soa.Decode(class_loader)); + Handle> h_dex_elements = + hs.NewHandle(soa.Decode*>(dex_elements)); + if (h_class_loader.Get() != nullptr && + GetDexFilesFromClassLoader(soa, h_class_loader.Get(), &queue)) { + class_loader_ok = true; + + // In this case, also take into account the dex_elements array, if given. We don't need to + // read it otherwise, as we'll compare against all open oat files anyways. + GetDexFilesFromDexElementsArray(soa, h_dex_elements, &queue); + } else if (h_class_loader.Get() != nullptr) { + VLOG(class_linker) << "Something unsupported with " + << PrettyClass(h_class_loader->GetClass()); + } } // Dex files are registered late - once a class is actually being loaded. We have to compare // against the open oat files. Take the oat_file_manager_lock_ that protects oat_files_ accesses. ReaderMutexLock mu(Thread::Current(), *Locks::oat_file_manager_lock_); - std::priority_queue queue; + if (!class_loader_ok) { + // Add dex files from already loaded oat files, but skip boot. - // Add dex files from already loaded oat files, but skip boot. - std::vector boot_oat_files = GetBootOatFiles(); - // The same OatFile can be loaded multiple times at different addresses. In this case, we don't - // need to check both against each other since they would have resolved the same way at compile - // time. - std::unordered_set unique_locations; - for (const std::unique_ptr& loaded_oat_file : oat_files_) { - DCHECK_NE(loaded_oat_file.get(), oat_file); - const std::string& location = loaded_oat_file->GetLocation(); - if (std::find(boot_oat_files.begin(), boot_oat_files.end(), loaded_oat_file.get()) == - boot_oat_files.end() && location != oat_file->GetLocation() && - unique_locations.find(location) == unique_locations.end()) { - unique_locations.insert(location); - AddDexFilesFromOat(loaded_oat_file.get(), /*already_loaded*/true, &queue); + // Clean up the queue. + while (!queue.empty()) { + queue.pop(); + } + + // Anything we load now is something we own and must be released later. + owning_dex_files = true; + + std::vector boot_oat_files = GetBootOatFiles(); + // The same OatFile can be loaded multiple times at different addresses. In this case, we don't + // need to check both against each other since they would have resolved the same way at compile + // time. + std::unordered_set unique_locations; + for (const std::unique_ptr& loaded_oat_file : oat_files_) { + DCHECK_NE(loaded_oat_file.get(), oat_file); + const std::string& location = loaded_oat_file->GetLocation(); + if (std::find(boot_oat_files.begin(), boot_oat_files.end(), loaded_oat_file.get()) == + boot_oat_files.end() && location != oat_file->GetLocation() && + unique_locations.find(location) == unique_locations.end()) { + unique_locations.insert(location); + AddDexFilesFromOat(loaded_oat_file.get(), /*already_loaded*/true, &queue); + } } } - if (queue.empty()) { - // No other oat files, return early. + // Exit if shared libraries are ok. Do a full duplicate classes check otherwise. + const std::string + shared_libraries(oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kClassPathKey)); + if (AreSharedLibrariesOk(shared_libraries, queue)) { + FreeDexFilesInHeap(&queue, owning_dex_files); return false; } @@ -292,16 +536,17 @@ bool OatFileManager::HasCollisions(const OatFile* oat_file, compare_pop.GetCachedDescriptor(), compare_pop.GetDexFile()->GetLocation().c_str(), top.GetDexFile()->GetLocation().c_str()); + FreeDexFilesInHeap(&queue, owning_dex_files); return true; } queue.pop(); - AddNext(&top, &queue); + AddNext(&top, &queue, owning_dex_files); } else { // Something else. Done here. break; } } - AddNext(&compare_pop, &queue); + AddNext(&compare_pop, &queue, owning_dex_files); } return false; @@ -341,9 +586,10 @@ std::vector> OatFileManager::OpenDexFilesFromOat( const OatFile* source_oat_file = nullptr; - // Update the oat file on disk if we can. This may fail, but that's okay. - // Best effort is all that matters here. - switch (oat_file_assistant.MakeUpToDate(filter_, /*out*/ &error_msg)) { + // Update the oat file on disk if we can, based on the --compiler-filter + // option derived from the current runtime options. + // This may fail, but that's okay. Best effort is all that matters here. + switch (oat_file_assistant.MakeUpToDate(/*out*/ &error_msg)) { case OatFileAssistant::kUpdateFailed: LOG(WARNING) << error_msg; break; @@ -364,7 +610,8 @@ std::vector> OatFileManager::OpenDexFilesFromOat( if (oat_file != nullptr) { // Take the file only if it has no collisions, or we must take it because of preopting. - bool accept_oat_file = !HasCollisions(oat_file.get(), /*out*/ &error_msg); + bool accept_oat_file = + !HasCollisions(oat_file.get(), class_loader, dex_elements, /*out*/ &error_msg); if (!accept_oat_file) { // Failed the collision check. Print warning. if (Runtime::Current()->IsDexFileFallbackEnabled()) { diff --git a/runtime/oat_file_manager.h b/runtime/oat_file_manager.h index f98102e84..a1d1275e6 100644 --- a/runtime/oat_file_manager.h +++ b/runtime/oat_file_manager.h @@ -25,7 +25,6 @@ #include "base/macros.h" #include "base/mutex.h" -#include "compiler_filter.h" #include "jni.h" namespace art { @@ -116,14 +115,17 @@ class OatFileManager { void DumpForSigQuit(std::ostream& os); - static void SetCompilerFilter(CompilerFilter::Filter filter) { - filter_ = filter; - } - private: - // Check for duplicate class definitions of the given oat file against all open oat files. + // Check that the shared libraries in the given oat file match those in the given class loader and + // dex elements. If the class loader is null or we do not support one of the class loaders in the + // chain, compare against all non-boot oat files instead. If the shared libraries are not ok, + // check for duplicate class definitions of the given oat file against the oat files (either from + // the class loader and dex elements if possible or all non-boot oat files otherwise). // Return true if there are any class definition collisions in the oat_file. - bool HasCollisions(const OatFile* oat_file, /*out*/std::string* error_msg) const + bool HasCollisions(const OatFile* oat_file, + jobject class_loader, + jobjectArray dex_elements, + /*out*/ std::string* error_msg) const REQUIRES(!Locks::oat_file_manager_lock_); const OatFile* FindOpenedOatFileFromOatLocationLocked(const std::string& oat_location) const @@ -133,9 +135,6 @@ class OatFileManager { std::unordered_map oat_file_count_ GUARDED_BY(Locks::oat_file_count_lock_); bool have_non_pic_oat_file_; - // The compiler filter used for oat files loaded by the oat file manager. - static CompilerFilter::Filter filter_; - DISALLOW_COPY_AND_ASSIGN(OatFileManager); }; diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc index c8d429158..eac5b43ff 100644 --- a/runtime/parsed_options.cc +++ b/runtime/parsed_options.cc @@ -153,7 +153,7 @@ std::unique_ptr ParsedOptions::MakeParser(bool ignore_unrecognize .Define("-Xusejit:_") .WithType() .WithValueMap({{"false", false}, {"true", true}}) - .IntoKey(M::UseJIT) + .IntoKey(M::UseJitCompilation) .Define("-Xjitinitialsize:_") .WithType() .IntoKey(M::JITCodeCacheInitialCapacity) @@ -172,6 +172,9 @@ std::unique_ptr ParsedOptions::MakeParser(bool ignore_unrecognize .Define("-Xjitprithreadweight:_") .WithType() .IntoKey(M::JITPriorityThreadWeight) + .Define("-Xjittransitionweight:_") + .WithType() + .IntoKey(M::JITInvokeTransitionWeight) .Define("-Xjitsaveprofilinginfo") .WithValue(true) .IntoKey(M::JITSaveProfilingInfo) @@ -289,9 +292,6 @@ std::unique_ptr ParsedOptions::MakeParser(bool ignore_unrecognize .IntoKey(M::Experimental) .Define("-Xforce-nb-testing") .IntoKey(M::ForceNativeBridge) - .Define("-XOatFileManagerCompilerFilter:_") - .WithType() - .IntoKey(M::OatFileManagerCompilerFilter) .Ignore({ "-ea", "-da", "-enableassertions", "-disableassertions", "--runtime-arg", "-esa", "-dsa", "-enablesystemassertions", "-disablesystemassertions", "-Xrs", "-Xint:_", @@ -471,6 +471,11 @@ bool ParsedOptions::DoParse(const RuntimeOptions& options, LOG(INFO) << "setting boot class path to " << *args.Get(M::BootClassPath); } + if (args.GetOrDefault(M::UseJitCompilation) && args.GetOrDefault(M::Interpret)) { + Usage("-Xusejit:true and -Xint cannot be specified together"); + Exit(0); + } + // Set a default boot class path if we didn't get an explicit one via command line. if (getenv("BOOTCLASSPATH") != nullptr) { args.SetIfMissing(M::BootClassPath, std::string(getenv("BOOTCLASSPATH"))); diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc index c7ccee212..1dea562b5 100644 --- a/runtime/quick/inline_method_analyser.cc +++ b/runtime/quick/inline_method_analyser.cc @@ -434,7 +434,7 @@ static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_SHORT) == bool InlineMethodAnalyser::AnalyseMethodCode(verifier::MethodVerifier* verifier, InlineMethod* result) { DCHECK(verifier != nullptr); - if (!Runtime::Current()->UseJit()) { + if (!Runtime::Current()->UseJitCompilation()) { DCHECK_EQ(verifier->CanLoadClasses(), result != nullptr); } diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc index a785ecba3..a3e1f0020 100644 --- a/runtime/quick_exception_handler.cc +++ b/runtime/quick_exception_handler.cc @@ -509,7 +509,7 @@ void QuickExceptionHandler::DeoptimizeSingleFrame() { // Compiled code made an explicit deoptimization. ArtMethod* deopt_method = visitor.GetSingleFrameDeoptMethod(); DCHECK(deopt_method != nullptr); - if (Runtime::Current()->UseJit()) { + if (Runtime::Current()->UseJitCompilation()) { Runtime::Current()->GetJit()->GetCodeCache()->InvalidateCompiledCodeFor( deopt_method, visitor.GetSingleFrameDeoptQuickMethodHeader()); } else { @@ -611,7 +611,7 @@ void QuickExceptionHandler::DoLongJump(bool smash_caller_saves) { // Prints out methods with their type of frame. class DumpFramesWithTypeStackVisitor FINAL : public StackVisitor { public: - DumpFramesWithTypeStackVisitor(Thread* self, bool show_details = false) + explicit DumpFramesWithTypeStackVisitor(Thread* self, bool show_details = false) SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), show_details_(show_details) {} diff --git a/runtime/runtime.cc b/runtime/runtime.cc index a4d31ef12..63976d0b1 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -60,7 +60,6 @@ #include "base/unix_file/fd_file.h" #include "class_linker-inl.h" #include "compiler_callbacks.h" -#include "compiler_filter.h" #include "debugger.h" #include "elf_file.h" #include "entrypoints/runtime_asm_entrypoints.h" @@ -558,23 +557,34 @@ bool Runtime::Start() { started_ = true; - if (jit_options_->UseJIT()) { + // Create the JIT either if we have to use JIT compilation or save profiling info. + // TODO(calin): We use the JIT class as a proxy for JIT compilation and for + // recoding profiles. Maybe we should consider changing the name to be more clear it's + // not only about compiling. b/28295073. + if (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) { std::string error_msg; if (!IsZygote()) { // If we are the zygote then we need to wait until after forking to create the code cache // due to SELinux restrictions on r/w/x memory regions. CreateJit(); - } else if (!jit::Jit::LoadCompilerLibrary(&error_msg)) { - // Try to load compiler pre zygote to reduce PSS. b/27744947 - LOG(WARNING) << "Failed to load JIT compiler with error " << error_msg; + } else if (jit_options_->UseJitCompilation()) { + if (!jit::Jit::LoadCompilerLibrary(&error_msg)) { + // Try to load compiler pre zygote to reduce PSS. b/27744947 + LOG(WARNING) << "Failed to load JIT compiler with error " << error_msg; + } } } if (!IsImageDex2OatEnabled() || !GetHeap()->HasBootImageSpace()) { ScopedObjectAccess soa(self); - StackHandleScope<1> hs(soa.Self()); - auto klass(hs.NewHandle(mirror::Class::GetJavaLangClass())); - class_linker_->EnsureInitialized(soa.Self(), klass, true, true); + StackHandleScope<2> hs(soa.Self()); + + auto class_class(hs.NewHandle(mirror::Class::GetJavaLangClass())); + auto field_class(hs.NewHandle(mirror::Field::StaticClass())); + + class_linker_->EnsureInitialized(soa.Self(), class_class, true, true); + // Field class is needed for register_java_net_InetAddress in libcore, b/28153851. + class_linker_->EnsureInitialized(soa.Self(), field_class, true, true); } // InitNativeMethods needs to be after started_ so that the classes @@ -713,7 +723,11 @@ void Runtime::InitNonZygoteOrPostFork( // before fork aren't attributed to an app. heap_->ResetGcPerformanceInfo(); - if (!is_system_server && !safe_mode_ && jit_options_->UseJIT() && jit_.get() == nullptr) { + + if (!is_system_server && + !safe_mode_ && + (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) && + jit_.get() == nullptr) { // Note that when running ART standalone (not zygote, nor zygote fork), // the jit may have already been created. CreateJit(); @@ -842,7 +856,7 @@ static bool OpenDexFilesFromImage(const std::string& image_location, if (index == 0) { // First file. See if this is a multi-image environment, and if so, enqueue the other images. const OatHeader& boot_oat_header = oat_file->GetOatHeader(); - const char* boot_cp = boot_oat_header.GetStoreValueByKey(OatHeader::kBootClassPath); + const char* boot_cp = boot_oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey); if (boot_cp != nullptr) { gc::space::ImageSpace::CreateMultiImageLocations(image_locations[0], boot_cp, @@ -957,16 +971,6 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { experimental_flags_ = runtime_options.GetOrDefault(Opt::Experimental); is_low_memory_mode_ = runtime_options.Exists(Opt::LowMemoryMode); - { - CompilerFilter::Filter filter; - std::string filter_str = runtime_options.GetOrDefault(Opt::OatFileManagerCompilerFilter); - if (!CompilerFilter::ParseCompilerFilter(filter_str.c_str(), &filter)) { - LOG(ERROR) << "Cannot parse compiler filter " << filter_str; - return false; - } - OatFileManager::SetCompilerFilter(filter); - } - XGcOption xgc_option = runtime_options.GetOrDefault(Opt::GcOption); heap_ = new gc::Heap(runtime_options.GetOrDefault(Opt::MemoryInitialSize), runtime_options.GetOrDefault(Opt::HeapGrowthLimit), @@ -1016,7 +1020,8 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { // this case. // If runtime_options doesn't have UseJIT set to true then CreateFromRuntimeArguments returns // null and we don't create the jit. - jit_options_->SetUseJIT(false); + jit_options_->SetUseJitCompilation(false); + jit_options_->SetSaveProfilingInfo(false); } // Allocate a global table of boxed lambda objects <-> closures. @@ -1613,18 +1618,19 @@ void Runtime::VisitImageRoots(RootVisitor* visitor) { } } -static ImtConflictTable::Entry empty_entry = { nullptr, nullptr }; - ArtMethod* Runtime::CreateImtConflictMethod(LinearAlloc* linear_alloc) { - auto* method = Runtime::Current()->GetClassLinker()->CreateRuntimeMethod(linear_alloc); + ClassLinker* const class_linker = GetClassLinker(); + ArtMethod* method = class_linker->CreateRuntimeMethod(linear_alloc); // When compiling, the code pointer will get set later when the image is loaded. + const size_t pointer_size = GetInstructionSetPointerSize(instruction_set_); if (IsAotCompiler()) { - size_t pointer_size = GetInstructionSetPointerSize(instruction_set_); method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size); } else { method->SetEntryPointFromQuickCompiledCode(GetQuickImtConflictStub()); - method->SetImtConflictTable(reinterpret_cast(&empty_entry)); } + // Create empty conflict table. + method->SetImtConflictTable(class_linker->CreateImtConflictTable(/*count*/0u, linear_alloc), + pointer_size); return method; } @@ -1632,9 +1638,6 @@ void Runtime::SetImtConflictMethod(ArtMethod* method) { CHECK(method != nullptr); CHECK(method->IsRuntimeMethod()); imt_conflict_method_ = method; - if (!IsAotCompiler()) { - method->SetImtConflictTable(reinterpret_cast(&empty_entry)); - } } ArtMethod* Runtime::CreateResolutionMethod() { @@ -1915,9 +1918,8 @@ void Runtime::AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vectorIsForcedInterpretOnly()) { - // Don't create JIT if forced interpret only. - return; + if (kIsDebugBuild && GetInstrumentation()->IsForcedInterpretOnly()) { + DCHECK(!jit_options_->UseJitCompilation()); } std::string error_msg; jit_.reset(jit::Jit::Create(jit_options_.get(), &error_msg)); @@ -1944,8 +1946,20 @@ void Runtime::SetImtUnimplementedMethod(ArtMethod* method) { CHECK(method != nullptr); CHECK(method->IsRuntimeMethod()); imt_unimplemented_method_ = method; - if (!IsAotCompiler()) { - method->SetImtConflictTable(reinterpret_cast(&empty_entry)); +} + +void Runtime::FixupConflictTables() { + // We can only do this after the class linker is created. + const size_t pointer_size = GetClassLinker()->GetImagePointerSize(); + if (imt_unimplemented_method_->GetImtConflictTable(pointer_size) == nullptr) { + imt_unimplemented_method_->SetImtConflictTable( + ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size), + pointer_size); + } + if (imt_conflict_method_->GetImtConflictTable(pointer_size) == nullptr) { + imt_conflict_method_->SetImtConflictTable( + ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size), + pointer_size); } } @@ -1981,4 +1995,18 @@ void Runtime::UpdateProcessState(ProcessState process_state) { GetHeap()->UpdateProcessState(old_process_state, process_state); } +void Runtime::RegisterSensitiveThread() const { + Thread::SetJitSensitiveThread(); +} + +// Returns true if JIT compilations are enabled. GetJit() will be not null in this case. +bool Runtime::UseJitCompilation() const { + return (jit_ != nullptr) && jit_->UseJitCompilation(); +} + +// Returns true if profile saving is enabled. GetJit() will be not null in this case. +bool Runtime::SaveProfileInfo() const { + return (jit_ != nullptr) && jit_->SaveProfilingInfo(); +} + } // namespace art diff --git a/runtime/runtime.h b/runtime/runtime.h index ae25dd1c6..1394462fd 100644 --- a/runtime/runtime.h +++ b/runtime/runtime.h @@ -127,7 +127,7 @@ class Runtime { // IsAotCompiler for compilers that don't have a running runtime. Only dex2oat currently. bool IsAotCompiler() const { - return !UseJit() && IsCompiler(); + return !UseJitCompilation() && IsCompiler(); } // IsCompiler is any runtime which has a running compiler, either dex2oat or JIT. @@ -383,6 +383,7 @@ class Runtime { return imt_conflict_method_ != nullptr; } + void FixupConflictTables(); void SetImtConflictMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); void SetImtUnimplementedMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); @@ -451,9 +452,11 @@ class Runtime { jit::Jit* GetJit() { return jit_.get(); } - bool UseJit() const { - return jit_.get() != nullptr; - } + + // Returns true if JIT compilations are enabled. GetJit() will be not null in this case. + bool UseJitCompilation() const; + // Returns true if profile saving is enabled. GetJit() will be not null in this case. + bool SaveProfileInfo() const; void PreZygoteFork(); bool InitZygote(); @@ -635,6 +638,8 @@ class Runtime { return process_state_ == kProcessStateJankPerceptible; } + void RegisterSensitiveThread() const; + void SetZygoteNoThreadSection(bool val) { zygote_no_threads_ = val; } diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def index 6433c3352..635ff5169 100644 --- a/runtime/runtime_options.def +++ b/runtime/runtime_options.def @@ -66,12 +66,13 @@ RUNTIME_OPTIONS_KEY (Unit, IgnoreMaxFootprint) RUNTIME_OPTIONS_KEY (Unit, LowMemoryMode) RUNTIME_OPTIONS_KEY (bool, UseTLAB, (kUseTlab || kUseReadBarrier)) RUNTIME_OPTIONS_KEY (bool, EnableHSpaceCompactForOOM, true) -RUNTIME_OPTIONS_KEY (bool, UseJIT, false) +RUNTIME_OPTIONS_KEY (bool, UseJitCompilation, false) RUNTIME_OPTIONS_KEY (bool, DumpNativeStackOnSigQuit, true) RUNTIME_OPTIONS_KEY (unsigned int, JITCompileThreshold, jit::Jit::kDefaultCompileThreshold) RUNTIME_OPTIONS_KEY (unsigned int, JITWarmupThreshold) RUNTIME_OPTIONS_KEY (unsigned int, JITOsrThreshold) RUNTIME_OPTIONS_KEY (unsigned int, JITPriorityThreadWeight) +RUNTIME_OPTIONS_KEY (unsigned int, JITInvokeTransitionWeight) RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheInitialCapacity, jit::JitCodeCache::kInitialCapacity) RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheMaxCapacity, jit::JitCodeCache::kMaxCapacity) RUNTIME_OPTIONS_KEY (bool, JITSaveProfilingInfo, false) @@ -128,11 +129,12 @@ RUNTIME_OPTIONS_KEY (CompilerCallbacks*, CompilerCallbacksPtr) // TDOO: make u RUNTIME_OPTIONS_KEY (bool (*)(), HookIsSensitiveThread) RUNTIME_OPTIONS_KEY (int32_t (*)(FILE* stream, const char* format, va_list ap), \ HookVfprintf, vfprintf) +// Use _exit instead of exit so that we won't get DCHECK failures in global data +// destructors. b/28106055. RUNTIME_OPTIONS_KEY (void (*)(int32_t status), \ - HookExit, exit) + HookExit, _exit) // We don't call abort(3) by default; see // Runtime::Abort. RUNTIME_OPTIONS_KEY (void (*)(), HookAbort, nullptr) -RUNTIME_OPTIONS_KEY (std::string, OatFileManagerCompilerFilter, "speed") #undef RUNTIME_OPTIONS_KEY diff --git a/runtime/stack.cc b/runtime/stack.cc index 56ef5aaa9..a5ca527aa 100644 --- a/runtime/stack.cc +++ b/runtime/stack.cc @@ -637,8 +637,8 @@ static void AssertPcIsWithinQuickCode(ArtMethod* method, uintptr_t pc) // If we are the JIT then we may have just compiled the method after the // IsQuickToInterpreterBridge check. - jit::Jit* const jit = Runtime::Current()->GetJit(); - if (jit != nullptr && jit->GetCodeCache()->ContainsPc(code)) { + Runtime* runtime = Runtime::Current(); + if (runtime->UseJitCompilation() && runtime->GetJit()->GetCodeCache()->ContainsPc(code)) { return; } @@ -678,8 +678,10 @@ void StackVisitor::SanityCheckFrame() const { if (space->IsImageSpace()) { auto* image_space = space->AsImageSpace(); const auto& header = image_space->GetImageHeader(); - const auto* methods = &header.GetMethodsSection(); - if (methods->Contains(reinterpret_cast(method) - image_space->Begin())) { + const ImageSection& methods = header.GetMethodsSection(); + const ImageSection& runtime_methods = header.GetRuntimeMethodsSection(); + const size_t offset = reinterpret_cast(method) - image_space->Begin(); + if (methods.Contains(offset) || runtime_methods.Contains(offset)) { in_image = true; break; } @@ -948,7 +950,7 @@ int StackVisitor::GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item, } } -void LockCountData::AddMonitorInternal(Thread* self, mirror::Object* obj) { +void LockCountData::AddMonitor(Thread* self, mirror::Object* obj) { if (obj == nullptr) { return; } @@ -965,7 +967,7 @@ void LockCountData::AddMonitorInternal(Thread* self, mirror::Object* obj) { monitors_->push_back(obj); } -void LockCountData::RemoveMonitorInternal(Thread* self, const mirror::Object* obj) { +void LockCountData::RemoveMonitorOrThrow(Thread* self, const mirror::Object* obj) { if (obj == nullptr) { return; } @@ -998,7 +1000,7 @@ void MonitorExitHelper(Thread* self, mirror::Object* obj) NO_THREAD_SAFETY_ANALY obj->MonitorExit(self); } -bool LockCountData::CheckAllMonitorsReleasedInternal(Thread* self) { +bool LockCountData::CheckAllMonitorsReleasedOrThrow(Thread* self) { DCHECK(self != nullptr); if (monitors_ != nullptr) { if (!monitors_->empty()) { diff --git a/runtime/stack.h b/runtime/stack.h index 7301184a9..e77ab4647 100644 --- a/runtime/stack.h +++ b/runtime/stack.h @@ -80,39 +80,18 @@ class LockCountData { public: // Add the given object to the list of monitors, that is, objects that have been locked. This // will not throw (but be skipped if there is an exception pending on entry). - template - void AddMonitor(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) { - DCHECK(self != nullptr); - if (!kLockCounting) { - return; - } - AddMonitorInternal(self, obj); - } + void AddMonitor(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); // Try to remove the given object from the monitor list, indicating an unlock operation. // This will throw an IllegalMonitorStateException (clearing any already pending exception), in // case that there wasn't a lock recorded for the object. - template void RemoveMonitorOrThrow(Thread* self, - const mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) { - DCHECK(self != nullptr); - if (!kLockCounting) { - return; - } - RemoveMonitorInternal(self, obj); - } + const mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); // Check whether all acquired monitors have been released. This will potentially throw an // IllegalMonitorStateException, clearing any already pending exception. Returns true if the // check shows that everything is OK wrt/ lock counting, false otherwise. - template - bool CheckAllMonitorsReleasedOrThrow(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) { - DCHECK(self != nullptr); - if (!kLockCounting) { - return true; - } - return CheckAllMonitorsReleasedInternal(self); - } + bool CheckAllMonitorsReleasedOrThrow(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); template void VisitMonitors(T visitor, Args&&... args) SHARED_REQUIRES(Locks::mutator_lock_) { @@ -125,12 +104,6 @@ class LockCountData { } private: - // Internal implementations. - void AddMonitorInternal(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); - void RemoveMonitorInternal(Thread* self, const mirror::Object* obj) - SHARED_REQUIRES(Locks::mutator_lock_); - bool CheckAllMonitorsReleasedInternal(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); - // Stores references to the locked-on objects. As noted, this should be visited during thread // marking. std::unique_ptr> monitors_; diff --git a/runtime/thread.cc b/runtime/thread.cc index 7922b6096..424894439 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -89,6 +89,7 @@ pthread_key_t Thread::pthread_key_self_; ConditionVariable* Thread::resume_cond_ = nullptr; const size_t Thread::kStackOverflowImplicitCheckSize = GetStackOverflowReservedBytes(kRuntimeISA); bool (*Thread::is_sensitive_thread_hook_)() = nullptr; +Thread* Thread::jit_sensitive_thread_ = nullptr; static constexpr bool kVerifyImageObjectsMarked = kIsDebugBuild; @@ -512,14 +513,20 @@ static size_t FixStackSize(size_t stack_size) { return stack_size; } +// Return the nearest page-aligned address below the current stack top. +NO_INLINE +static uint8_t* FindStackTop() { + return reinterpret_cast( + AlignDown(__builtin_frame_address(0), kPageSize)); +} + // Install a protected region in the stack. This is used to trigger a SIGSEGV if a stack // overflow is detected. It is located right below the stack_begin_. ATTRIBUTE_NO_SANITIZE_ADDRESS void Thread::InstallImplicitProtection() { uint8_t* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize; - uint8_t* stack_himem = tlsPtr_.stack_end; - uint8_t* stack_top = reinterpret_cast(reinterpret_cast(&stack_himem) & - ~(kPageSize - 1)); // Page containing current top of stack. + // Page containing current top of stack. + uint8_t* stack_top = FindStackTop(); // Try to directly protect the stack. VLOG(threads) << "installing stack protected region at " << std::hex << @@ -739,7 +746,7 @@ Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_g { MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_); if (runtime->IsShuttingDownLocked()) { - LOG(ERROR) << "Thread attaching while runtime is shutting down: " << thread_name; + LOG(WARNING) << "Thread attaching while runtime is shutting down: " << thread_name; return nullptr; } else { Runtime::Current()->StartThreadBirth(); @@ -931,8 +938,7 @@ bool Thread::InitStackHwm() { } // Sanity check. - int stack_variable; - CHECK_GT(&stack_variable, reinterpret_cast(tlsPtr_.stack_end)); + CHECK_GT(FindStackTop(), reinterpret_cast(tlsPtr_.stack_end)); return true; } @@ -2763,7 +2769,7 @@ class ReferenceMapVisitor : public StackVisitor { VisitDeclaringClass(m); // Process register map (which native and runtime methods don't have) - if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) { + if (!m->IsNative() && !m->IsRuntimeMethod() && (!m->IsProxyMethod() || m->IsConstructor())) { const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader(); DCHECK(method_header->IsOptimized()); auto* vreg_base = reinterpret_cast*>( @@ -3010,7 +3016,6 @@ size_t Thread::NumberOfHeldMutexes() const { return count; } - void Thread::DeoptimizeWithDeoptimizationException(JValue* result) { DCHECK_EQ(GetException(), Thread::GetDeoptimizationException()); ClearException(); @@ -3031,4 +3036,11 @@ void Thread::DeoptimizeWithDeoptimizationException(JValue* result) { interpreter::EnterInterpreterFromDeoptimize(this, shadow_frame, from_code, result); } +void Thread::SetException(mirror::Throwable* new_exception) { + CHECK(new_exception != nullptr); + // TODO: DCHECK(!IsExceptionPending()); + tlsPtr_.exception = new_exception; + // LOG(ERROR) << new_exception->Dump(); +} + } // namespace art diff --git a/runtime/thread.h b/runtime/thread.h index ed42e462a..582a0cdbd 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -363,12 +363,7 @@ class Thread { void AssertNoPendingException() const; void AssertNoPendingExceptionForNewException(const char* msg) const; - void SetException(mirror::Throwable* new_exception) - SHARED_REQUIRES(Locks::mutator_lock_) { - CHECK(new_exception != nullptr); - // TODO: DCHECK(!IsExceptionPending()); - tlsPtr_.exception = new_exception; - } + void SetException(mirror::Throwable* new_exception) SHARED_REQUIRES(Locks::mutator_lock_); void ClearException() SHARED_REQUIRES(Locks::mutator_lock_) { tlsPtr_.exception = nullptr; @@ -1098,6 +1093,12 @@ class Thread { return debug_disallow_read_barrier_; } + // Returns true if the current thread is the jit sensitive thread. + bool IsJitSensitiveThread() const { + return this == jit_sensitive_thread_; + } + + // Returns true if StrictMode events are traced for the current thread. static bool IsSensitiveThread() { if (is_sensitive_thread_hook_ != nullptr) { return (*is_sensitive_thread_hook_)(); @@ -1180,6 +1181,16 @@ class Thread { ALWAYS_INLINE void PassActiveSuspendBarriers() REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_); + // Registers the current thread as the jit sensitive thread. Should be called just once. + static void SetJitSensitiveThread() { + if (jit_sensitive_thread_ == nullptr) { + jit_sensitive_thread_ = Thread::Current(); + } else { + LOG(WARNING) << "Attempt to set the sensitive thread twice. Tid:" + << Thread::Current()->GetTid(); + } + } + static void SetSensitiveThreadHook(bool (*is_sensitive_thread_hook)()) { is_sensitive_thread_hook_ = is_sensitive_thread_hook; } @@ -1229,6 +1240,8 @@ class Thread { // Hook passed by framework which returns true // when StrictMode events are traced for the current thread. static bool (*is_sensitive_thread_hook_)(); + // Stores the jit sensitive thread (which for now is the UI thread). + static Thread* jit_sensitive_thread_; /***********************************************************************************************/ // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc index 8802e6243..2b96328f8 100644 --- a/runtime/verifier/method_verifier.cc +++ b/runtime/verifier/method_verifier.cc @@ -401,8 +401,13 @@ MethodVerifier::FailureData MethodVerifier::VerifyMethod(Thread* self, method->SetAccessFlags(method->GetAccessFlags() | kAccCompileDontBother); } } - if (method != nullptr && verifier.HasInstructionThatWillThrow()) { - method->SetAccessFlags(method->GetAccessFlags() | kAccCompileDontBother); + if (method != nullptr) { + if (verifier.HasInstructionThatWillThrow()) { + method->SetAccessFlags(method->GetAccessFlags() | kAccCompileDontBother); + } + if ((verifier.encountered_failure_types_ & VerifyError::VERIFY_ERROR_LOCKING) != 0) { + method->SetAccessFlags(method->GetAccessFlags() | kAccMustCountLocks); + } } } else { // Bad method data. @@ -4096,8 +4101,8 @@ ArtMethod* MethodVerifier::VerifyInvocationArgs( << " to super " << PrettyMethod(res_method); return nullptr; } - mirror::Class* super_klass = super.GetClass(); - if (res_method->GetMethodIndex() >= super_klass->GetVTableLength()) { + if (!reference_class->IsAssignableFrom(GetDeclaringClass().GetClass()) || + (res_method->GetMethodIndex() >= super.GetClass()->GetVTableLength())) { Fail(VERIFY_ERROR_NO_METHOD) << "invalid invoke-super from " << PrettyMethod(dex_method_idx_, *dex_file_) << " to super " << super diff --git a/test/004-JniTest/jni_test.cc b/test/004-JniTest/jni_test.cc index 2bdf8d1e7..8619ff7f3 100644 --- a/test/004-JniTest/jni_test.cc +++ b/test/004-JniTest/jni_test.cc @@ -14,23 +14,22 @@ * limitations under the License. */ -#include #include #include #include #include +#include "art_method-inl.h" +#include "base/logging.h" #include "jni.h" -#if defined(NDEBUG) -#error test code compiled without NDEBUG -#endif +namespace art { static JavaVM* jvm = nullptr; extern "C" JNIEXPORT jint JNI_OnLoad(JavaVM *vm, void*) { - assert(vm != nullptr); - assert(jvm == nullptr); + CHECK(vm != nullptr); + CHECK(jvm == nullptr); jvm = vm; std::cout << "JNI_OnLoad called" << std::endl; return JNI_VERSION_1_6; @@ -39,24 +38,24 @@ extern "C" JNIEXPORT jint JNI_OnLoad(JavaVM *vm, void*) { extern "C" JNIEXPORT void JNI_OnUnload(JavaVM*, void*) { // std::cout since LOG(INFO) adds extra stuff like pid. std::cout << "JNI_OnUnload called" << std::endl; - // Clear jvm for assert in test 004-JniTest. + // Clear jvm for CHECK in test 004-JniTest. jvm = nullptr; } static void* AttachHelper(void* arg) { - assert(jvm != nullptr); + CHECK(jvm != nullptr); JNIEnv* env = nullptr; JavaVMAttachArgs args = { JNI_VERSION_1_6, __FUNCTION__, nullptr }; int attach_result = jvm->AttachCurrentThread(&env, &args); - assert(attach_result == 0); + CHECK_EQ(attach_result, 0); typedef void (*Fn)(JNIEnv*); Fn fn = reinterpret_cast(arg); fn(env); int detach_result = jvm->DetachCurrentThread(); - assert(detach_result == 0); + CHECK_EQ(detach_result, 0); return nullptr; } @@ -64,19 +63,19 @@ static void PthreadHelper(void (*fn)(JNIEnv*)) { pthread_t pthread; int pthread_create_result = pthread_create(&pthread, nullptr, AttachHelper, reinterpret_cast(fn)); - assert(pthread_create_result == 0); + CHECK_EQ(pthread_create_result, 0); int pthread_join_result = pthread_join(pthread, nullptr); - assert(pthread_join_result == 0); + CHECK_EQ(pthread_join_result, 0); } static void testFindClassOnAttachedNativeThread(JNIEnv* env) { jclass clazz = env->FindClass("Main"); - assert(clazz != nullptr); - assert(!env->ExceptionCheck()); + CHECK(clazz != nullptr); + CHECK(!env->ExceptionCheck()); jobjectArray array = env->NewObjectArray(0, clazz, nullptr); - assert(array != nullptr); - assert(!env->ExceptionCheck()); + CHECK(array != nullptr); + CHECK(!env->ExceptionCheck()); } // http://b/10994325 @@ -86,12 +85,12 @@ extern "C" JNIEXPORT void JNICALL Java_Main_testFindClassOnAttachedNativeThread( static void testFindFieldOnAttachedNativeThread(JNIEnv* env) { jclass clazz = env->FindClass("Main"); - assert(clazz != nullptr); - assert(!env->ExceptionCheck()); + CHECK(clazz != nullptr); + CHECK(!env->ExceptionCheck()); jfieldID field = env->GetStaticFieldID(clazz, "testFindFieldOnAttachedNativeThreadField", "Z"); - assert(field != nullptr); - assert(!env->ExceptionCheck()); + CHECK(field != nullptr); + CHECK(!env->ExceptionCheck()); env->SetStaticBooleanField(clazz, field, JNI_TRUE); } @@ -103,38 +102,38 @@ extern "C" JNIEXPORT void JNICALL Java_Main_testFindFieldOnAttachedNativeThreadN static void testReflectFieldGetFromAttachedNativeThread(JNIEnv* env) { jclass clazz = env->FindClass("Main"); - assert(clazz != nullptr); - assert(!env->ExceptionCheck()); + CHECK(clazz != nullptr); + CHECK(!env->ExceptionCheck()); jclass class_clazz = env->FindClass("java/lang/Class"); - assert(class_clazz != nullptr); - assert(!env->ExceptionCheck()); + CHECK(class_clazz != nullptr); + CHECK(!env->ExceptionCheck()); jmethodID getFieldMetodId = env->GetMethodID(class_clazz, "getField", "(Ljava/lang/String;)Ljava/lang/reflect/Field;"); - assert(getFieldMetodId != nullptr); - assert(!env->ExceptionCheck()); + CHECK(getFieldMetodId != nullptr); + CHECK(!env->ExceptionCheck()); jstring field_name = env->NewStringUTF("testReflectFieldGetFromAttachedNativeThreadField"); - assert(field_name != nullptr); - assert(!env->ExceptionCheck()); + CHECK(field_name != nullptr); + CHECK(!env->ExceptionCheck()); jobject field = env->CallObjectMethod(clazz, getFieldMetodId, field_name); - assert(field != nullptr); - assert(!env->ExceptionCheck()); + CHECK(field != nullptr); + CHECK(!env->ExceptionCheck()); jclass field_clazz = env->FindClass("java/lang/reflect/Field"); - assert(field_clazz != nullptr); - assert(!env->ExceptionCheck()); + CHECK(field_clazz != nullptr); + CHECK(!env->ExceptionCheck()); jmethodID getBooleanMetodId = env->GetMethodID(field_clazz, "getBoolean", "(Ljava/lang/Object;)Z"); - assert(getBooleanMetodId != nullptr); - assert(!env->ExceptionCheck()); + CHECK(getBooleanMetodId != nullptr); + CHECK(!env->ExceptionCheck()); jboolean value = env->CallBooleanMethod(field, getBooleanMetodId, /* ignored */ clazz); - assert(value == false); - assert(!env->ExceptionCheck()); + CHECK(value == false); + CHECK(!env->ExceptionCheck()); } // http://b/15539150 @@ -148,22 +147,22 @@ extern "C" JNIEXPORT void JNICALL Java_Main_testReflectFieldGetFromAttachedNativ extern "C" JNIEXPORT void JNICALL Java_Main_testCallStaticVoidMethodOnSubClassNative(JNIEnv* env, jclass) { jclass super_class = env->FindClass("Main$testCallStaticVoidMethodOnSubClass_SuperClass"); - assert(super_class != nullptr); + CHECK(super_class != nullptr); jmethodID execute = env->GetStaticMethodID(super_class, "execute", "()V"); - assert(execute != nullptr); + CHECK(execute != nullptr); jclass sub_class = env->FindClass("Main$testCallStaticVoidMethodOnSubClass_SubClass"); - assert(sub_class != nullptr); + CHECK(sub_class != nullptr); env->CallStaticVoidMethod(sub_class, execute); } extern "C" JNIEXPORT jobject JNICALL Java_Main_testGetMirandaMethodNative(JNIEnv* env, jclass) { jclass abstract_class = env->FindClass("Main$testGetMirandaMethod_MirandaAbstract"); - assert(abstract_class != nullptr); + CHECK(abstract_class != nullptr); jmethodID miranda_method = env->GetMethodID(abstract_class, "inInterface", "()Z"); - assert(miranda_method != nullptr); + CHECK(miranda_method != nullptr); return env->ToReflectedMethod(abstract_class, miranda_method, JNI_FALSE); } @@ -171,11 +170,11 @@ extern "C" JNIEXPORT jobject JNICALL Java_Main_testGetMirandaMethodNative(JNIEnv extern "C" void JNICALL Java_Main_testZeroLengthByteBuffers(JNIEnv* env, jclass) { std::vector buffer(1); jobject byte_buffer = env->NewDirectByteBuffer(&buffer[0], 0); - assert(byte_buffer != nullptr); - assert(!env->ExceptionCheck()); + CHECK(byte_buffer != nullptr); + CHECK(!env->ExceptionCheck()); - assert(env->GetDirectBufferAddress(byte_buffer) == &buffer[0]); - assert(env->GetDirectBufferCapacity(byte_buffer) == 0); + CHECK_EQ(env->GetDirectBufferAddress(byte_buffer), &buffer[0]); + CHECK_EQ(env->GetDirectBufferCapacity(byte_buffer), 0); } constexpr size_t kByteReturnSize = 7; @@ -185,18 +184,18 @@ extern "C" jbyte JNICALL Java_Main_byteMethod(JNIEnv*, jclass, jbyte b1, jbyte b jbyte b3, jbyte b4, jbyte b5, jbyte b6, jbyte b7, jbyte b8, jbyte b9, jbyte b10) { // We use b1 to drive the output. - assert(b2 == 2); - assert(b3 == -3); - assert(b4 == 4); - assert(b5 == -5); - assert(b6 == 6); - assert(b7 == -7); - assert(b8 == 8); - assert(b9 == -9); - assert(b10 == 10); - - assert(0 <= b1); - assert(b1 < static_cast(kByteReturnSize)); + CHECK_EQ(b2, 2); + CHECK_EQ(b3, -3); + CHECK_EQ(b4, 4); + CHECK_EQ(b5, -5); + CHECK_EQ(b6, 6); + CHECK_EQ(b7, -7); + CHECK_EQ(b8, 8); + CHECK_EQ(b9, -9); + CHECK_EQ(b10, 10); + + CHECK_LE(0, b1); + CHECK_LT(b1, static_cast(kByteReturnSize)); return byte_returns[b1]; } @@ -210,18 +209,18 @@ extern "C" jshort JNICALL Java_Main_shortMethod(JNIEnv*, jclass, jshort s1, jsho jshort s3, jshort s4, jshort s5, jshort s6, jshort s7, jshort s8, jshort s9, jshort s10) { // We use s1 to drive the output. - assert(s2 == 2); - assert(s3 == -3); - assert(s4 == 4); - assert(s5 == -5); - assert(s6 == 6); - assert(s7 == -7); - assert(s8 == 8); - assert(s9 == -9); - assert(s10 == 10); - - assert(0 <= s1); - assert(s1 < static_cast(kShortReturnSize)); + CHECK_EQ(s2, 2); + CHECK_EQ(s3, -3); + CHECK_EQ(s4, 4); + CHECK_EQ(s5, -5); + CHECK_EQ(s6, 6); + CHECK_EQ(s7, -7); + CHECK_EQ(s8, 8); + CHECK_EQ(s9, -9); + CHECK_EQ(s10, 10); + + CHECK_LE(0, s1); + CHECK_LT(s1, static_cast(kShortReturnSize)); return short_returns[s1]; } @@ -231,17 +230,17 @@ extern "C" jboolean JNICALL Java_Main_booleanMethod(JNIEnv*, jclass, jboolean b1 jboolean b5, jboolean b6, jboolean b7, jboolean b8, jboolean b9, jboolean b10) { // We use b1 to drive the output. - assert(b2 == JNI_TRUE); - assert(b3 == JNI_FALSE); - assert(b4 == JNI_TRUE); - assert(b5 == JNI_FALSE); - assert(b6 == JNI_TRUE); - assert(b7 == JNI_FALSE); - assert(b8 == JNI_TRUE); - assert(b9 == JNI_FALSE); - assert(b10 == JNI_TRUE); - - assert(b1 == JNI_TRUE || b1 == JNI_FALSE); + CHECK_EQ(b2, JNI_TRUE); + CHECK_EQ(b3, JNI_FALSE); + CHECK_EQ(b4, JNI_TRUE); + CHECK_EQ(b5, JNI_FALSE); + CHECK_EQ(b6, JNI_TRUE); + CHECK_EQ(b7, JNI_FALSE); + CHECK_EQ(b8, JNI_TRUE); + CHECK_EQ(b9, JNI_FALSE); + CHECK_EQ(b10, JNI_TRUE); + + CHECK(b1 == JNI_TRUE || b1 == JNI_FALSE); return b1; } @@ -252,17 +251,17 @@ extern "C" jchar JNICALL Java_Main_charMethod(JNIEnv*, jclass, jchar c1, jchar c jchar c3, jchar c4, jchar c5, jchar c6, jchar c7, jchar c8, jchar c9, jchar c10) { // We use c1 to drive the output. - assert(c2 == 'a'); - assert(c3 == 'b'); - assert(c4 == 'c'); - assert(c5 == '0'); - assert(c6 == '1'); - assert(c7 == '2'); - assert(c8 == 1234); - assert(c9 == 2345); - assert(c10 == 3456); - - assert(c1 < static_cast(kCharReturnSize)); + CHECK_EQ(c2, 'a'); + CHECK_EQ(c3, 'b'); + CHECK_EQ(c4, 'c'); + CHECK_EQ(c5, '0'); + CHECK_EQ(c6, '1'); + CHECK_EQ(c7, '2'); + CHECK_EQ(c8, 1234); + CHECK_EQ(c9, 2345); + CHECK_EQ(c10, 3456); + + CHECK_LT(c1, static_cast(kCharReturnSize)); return char_returns[c1]; } @@ -281,39 +280,39 @@ static void testShallowGetCallingClassLoader(JNIEnv* env) { // Test direct call. { jclass vmstack_clazz = env->FindClass("dalvik/system/VMStack"); - assert(vmstack_clazz != nullptr); - assert(!env->ExceptionCheck()); + CHECK(vmstack_clazz != nullptr); + CHECK(!env->ExceptionCheck()); jmethodID getCallingClassLoaderMethodId = env->GetStaticMethodID(vmstack_clazz, "getCallingClassLoader", "()Ljava/lang/ClassLoader;"); - assert(getCallingClassLoaderMethodId != nullptr); - assert(!env->ExceptionCheck()); + CHECK(getCallingClassLoaderMethodId != nullptr); + CHECK(!env->ExceptionCheck()); jobject class_loader = env->CallStaticObjectMethod(vmstack_clazz, getCallingClassLoaderMethodId); - assert(class_loader == nullptr); - assert(!env->ExceptionCheck()); + CHECK(class_loader == nullptr); + CHECK(!env->ExceptionCheck()); } // Test one-level call. Use System.loadLibrary(). { jclass system_clazz = env->FindClass("java/lang/System"); - assert(system_clazz != nullptr); - assert(!env->ExceptionCheck()); + CHECK(system_clazz != nullptr); + CHECK(!env->ExceptionCheck()); jmethodID loadLibraryMethodId = env->GetStaticMethodID(system_clazz, "loadLibrary", "(Ljava/lang/String;)V"); - assert(loadLibraryMethodId != nullptr); - assert(!env->ExceptionCheck()); + CHECK(loadLibraryMethodId != nullptr); + CHECK(!env->ExceptionCheck()); // Create a string object. jobject library_string = env->NewStringUTF("non_existing_library"); - assert(library_string != nullptr); - assert(!env->ExceptionCheck()); + CHECK(library_string != nullptr); + CHECK(!env->ExceptionCheck()); env->CallStaticVoidMethod(system_clazz, loadLibraryMethodId, library_string); - assert(env->ExceptionCheck()); + CHECK(env->ExceptionCheck()); // We expect UnsatisfiedLinkError. jthrowable thrown = env->ExceptionOccurred(); @@ -321,7 +320,7 @@ static void testShallowGetCallingClassLoader(JNIEnv* env) { jclass unsatisfied_link_error_clazz = env->FindClass("java/lang/UnsatisfiedLinkError"); jclass thrown_class = env->GetObjectClass(thrown); - assert(env->IsSameObject(unsatisfied_link_error_clazz, thrown_class)); + CHECK(env->IsSameObject(unsatisfied_link_error_clazz, thrown_class)); } } @@ -333,31 +332,31 @@ extern "C" JNIEXPORT void JNICALL Java_Main_nativeTestShallowGetCallingClassLoad static void testShallowGetStackClass2(JNIEnv* env) { jclass vmstack_clazz = env->FindClass("dalvik/system/VMStack"); - assert(vmstack_clazz != nullptr); - assert(!env->ExceptionCheck()); + CHECK(vmstack_clazz != nullptr); + CHECK(!env->ExceptionCheck()); // Test direct call. { jmethodID getStackClass2MethodId = env->GetStaticMethodID(vmstack_clazz, "getStackClass2", "()Ljava/lang/Class;"); - assert(getStackClass2MethodId != nullptr); - assert(!env->ExceptionCheck()); + CHECK(getStackClass2MethodId != nullptr); + CHECK(!env->ExceptionCheck()); jobject caller_class = env->CallStaticObjectMethod(vmstack_clazz, getStackClass2MethodId); - assert(caller_class == nullptr); - assert(!env->ExceptionCheck()); + CHECK(caller_class == nullptr); + CHECK(!env->ExceptionCheck()); } // Test one-level call. Use VMStack.getStackClass1(). { jmethodID getStackClass1MethodId = env->GetStaticMethodID(vmstack_clazz, "getStackClass1", "()Ljava/lang/Class;"); - assert(getStackClass1MethodId != nullptr); - assert(!env->ExceptionCheck()); + CHECK(getStackClass1MethodId != nullptr); + CHECK(!env->ExceptionCheck()); jobject caller_class = env->CallStaticObjectMethod(vmstack_clazz, getStackClass1MethodId); - assert(caller_class == nullptr); - assert(!env->ExceptionCheck()); + CHECK(caller_class == nullptr); + CHECK(!env->ExceptionCheck()); } // For better testing we would need to compile against libcore and have a two-deep stack @@ -416,8 +415,8 @@ class JniCallNonvirtualVoidMethodTest { env_->ExceptionDescribe(); env_->FatalError(__FUNCTION__); } - assert(!env_->ExceptionCheck()); - assert(c != nullptr); + CHECK(!env_->ExceptionCheck()); + CHECK(c != nullptr); return c; } @@ -429,7 +428,7 @@ class JniCallNonvirtualVoidMethodTest { env_->ExceptionDescribe(); env_->FatalError(__FUNCTION__); } - assert(m != nullptr); + CHECK(m != nullptr); return m; } @@ -439,7 +438,7 @@ class JniCallNonvirtualVoidMethodTest { env_->ExceptionDescribe(); env_->FatalError(__FUNCTION__); } - assert(o != nullptr); + CHECK(o != nullptr); return o; } @@ -467,7 +466,7 @@ class JniCallNonvirtualVoidMethodTest { env_->ExceptionDescribe(); env_->FatalError(__FUNCTION__); } - assert(m != nullptr); + CHECK(m != nullptr); return m; } @@ -508,21 +507,21 @@ class JniCallNonvirtualVoidMethodTest { jobject sub_super = CallConstructor(sub_, super_constructor_); jobject sub_sub = CallConstructor(sub_, sub_constructor_); - assert(env_->IsInstanceOf(super_super, super_)); - assert(!env_->IsInstanceOf(super_super, sub_)); + CHECK(env_->IsInstanceOf(super_super, super_)); + CHECK(!env_->IsInstanceOf(super_super, sub_)); // Note that even though we called (and ran) the subclass // constructor, we are not the subclass. - assert(env_->IsInstanceOf(super_sub, super_)); - assert(!env_->IsInstanceOf(super_sub, sub_)); + CHECK(env_->IsInstanceOf(super_sub, super_)); + CHECK(!env_->IsInstanceOf(super_sub, sub_)); // Note that even though we called the superclass constructor, we // are still the subclass. - assert(env_->IsInstanceOf(sub_super, super_)); - assert(env_->IsInstanceOf(sub_super, sub_)); + CHECK(env_->IsInstanceOf(sub_super, super_)); + CHECK(env_->IsInstanceOf(sub_super, sub_)); - assert(env_->IsInstanceOf(sub_sub, super_)); - assert(env_->IsInstanceOf(sub_sub, sub_)); + CHECK(env_->IsInstanceOf(sub_sub, super_)); + CHECK(env_->IsInstanceOf(sub_sub, sub_)); } void TestnonstaticCallNonvirtualMethod(bool super_object, bool super_class, bool super_method, const char* test_case) { @@ -542,8 +541,8 @@ class JniCallNonvirtualVoidMethodTest { CallMethod(o, c, m, true, test_case); jboolean super_field = GetBooleanField(o, super_field_); jboolean sub_field = GetBooleanField(o, sub_field_); - assert(super_field == super_method); - assert(sub_field != super_method); + CHECK_EQ(super_field, super_method); + CHECK_NE(sub_field, super_method); } void TestnonstaticCallNonvirtualMethod() { @@ -565,20 +564,20 @@ extern "C" void JNICALL Java_Main_testCallNonvirtual(JNIEnv* env, jclass) { extern "C" JNIEXPORT void JNICALL Java_Main_testNewStringObject(JNIEnv* env, jclass) { jclass c = env->FindClass("java/lang/String"); - assert(c != nullptr); + CHECK(c != nullptr); jmethodID mid1 = env->GetMethodID(c, "", "()V"); - assert(mid1 != nullptr); - assert(!env->ExceptionCheck()); + CHECK(mid1 != nullptr); + CHECK(!env->ExceptionCheck()); jmethodID mid2 = env->GetMethodID(c, "", "([B)V"); - assert(mid2 != nullptr); - assert(!env->ExceptionCheck()); + CHECK(mid2 != nullptr); + CHECK(!env->ExceptionCheck()); jmethodID mid3 = env->GetMethodID(c, "", "([C)V"); - assert(mid3 != nullptr); - assert(!env->ExceptionCheck()); + CHECK(mid3 != nullptr); + CHECK(!env->ExceptionCheck()); jmethodID mid4 = env->GetMethodID(c, "", "(Ljava/lang/String;)V"); - assert(mid4 != nullptr); - assert(!env->ExceptionCheck()); + CHECK(mid4 != nullptr); + CHECK(!env->ExceptionCheck()); const char* test_array = "Test"; int byte_array_length = strlen(test_array); @@ -587,22 +586,22 @@ extern "C" JNIEXPORT void JNICALL Java_Main_testNewStringObject(JNIEnv* env, jcl // Test NewObject jstring s = reinterpret_cast(env->NewObject(c, mid2, byte_array)); - assert(s != nullptr); - assert(env->GetStringLength(s) == byte_array_length); - assert(env->GetStringUTFLength(s) == byte_array_length); + CHECK(s != nullptr); + CHECK_EQ(env->GetStringLength(s), byte_array_length); + CHECK_EQ(env->GetStringUTFLength(s), byte_array_length); const char* chars = env->GetStringUTFChars(s, nullptr); - assert(strcmp(test_array, chars) == 0); + CHECK_EQ(strcmp(test_array, chars), 0); env->ReleaseStringUTFChars(s, chars); // Test AllocObject and Call(Nonvirtual)VoidMethod jstring s1 = reinterpret_cast(env->AllocObject(c)); - assert(s1 != nullptr); + CHECK(s1 != nullptr); jstring s2 = reinterpret_cast(env->AllocObject(c)); - assert(s2 != nullptr); + CHECK(s2 != nullptr); jstring s3 = reinterpret_cast(env->AllocObject(c)); - assert(s3 != nullptr); + CHECK(s3 != nullptr); jstring s4 = reinterpret_cast(env->AllocObject(c)); - assert(s4 != nullptr); + CHECK(s4 != nullptr); jcharArray char_array = env->NewCharArray(5); jstring string_arg = env->NewStringUTF("helloworld"); @@ -621,18 +620,18 @@ extern "C" JNIEXPORT void JNICALL Java_Main_testNewStringObject(JNIEnv* env, jcl // Test with global and weak global references jstring s5 = reinterpret_cast(env->AllocObject(c)); - assert(s5 != nullptr); + CHECK(s5 != nullptr); s5 = reinterpret_cast(env->NewGlobalRef(s5)); jstring s6 = reinterpret_cast(env->AllocObject(c)); - assert(s6 != nullptr); + CHECK(s6 != nullptr); s6 = reinterpret_cast(env->NewWeakGlobalRef(s6)); env->CallVoidMethod(s5, mid1); env->CallNonvirtualVoidMethod(s6, c, mid2, byte_array); - assert(env->GetStringLength(s5) == 0); - assert(env->GetStringLength(s6) == byte_array_length); + CHECK_EQ(env->GetStringLength(s5), 0); + CHECK_EQ(env->GetStringLength(s6), byte_array_length); const char* chars6 = env->GetStringUTFChars(s6, nullptr); - assert(strcmp(test_array, chars6) == 0); + CHECK_EQ(strcmp(test_array, chars6), 0); env->ReleaseStringUTFChars(s6, chars6); } @@ -664,8 +663,8 @@ class JniCallDefaultMethodsTest { public: explicit JniCallDefaultMethodsTest(JNIEnv* env) : env_(env), concrete_class_(env_->FindClass("ConcreteClass")) { - assert(!env_->ExceptionCheck()); - assert(concrete_class_ != nullptr); + CHECK(!env_->ExceptionCheck()); + CHECK(concrete_class_ != nullptr); } void Test() { @@ -688,14 +687,14 @@ class JniCallDefaultMethodsTest { void TestCalls(const char* declaring_class, std::vector methods) { jmethodID new_method = env_->GetMethodID(concrete_class_, "", "()V"); jobject obj = env_->NewObject(concrete_class_, new_method); - assert(!env_->ExceptionCheck()); - assert(obj != nullptr); + CHECK(!env_->ExceptionCheck()); + CHECK(obj != nullptr); jclass decl_class = env_->FindClass(declaring_class); - assert(!env_->ExceptionCheck()); - assert(decl_class != nullptr); + CHECK(!env_->ExceptionCheck()); + CHECK(decl_class != nullptr); for (const char* method : methods) { jmethodID method_id = env_->GetMethodID(decl_class, method, "()V"); - assert(!env_->ExceptionCheck()); + CHECK(!env_->ExceptionCheck()); printf("Calling method %s->%s on object of type ConcreteClass\n", declaring_class, method); env_->CallVoidMethod(obj, method_id); if (env_->ExceptionCheck()) { @@ -704,10 +703,10 @@ class JniCallDefaultMethodsTest { jmethodID to_string = env_->GetMethodID( env_->FindClass("java/lang/Object"), "toString", "()Ljava/lang/String;"); jstring exception_string = (jstring) env_->CallObjectMethod(thrown, to_string); - assert(!env_->ExceptionCheck()); + CHECK(!env_->ExceptionCheck()); const char* exception_string_utf8 = env_->GetStringUTFChars(exception_string, nullptr); - assert(!env_->ExceptionCheck()); - assert(exception_string_utf8 != nullptr); + CHECK(!env_->ExceptionCheck()); + CHECK(exception_string_utf8 != nullptr); printf("EXCEPTION OCCURED: %s\n", exception_string_utf8); env_->ReleaseStringUTFChars(exception_string, exception_string_utf8); } @@ -724,12 +723,12 @@ extern "C" JNIEXPORT void JNICALL Java_Main_testCallDefaultMethods(JNIEnv* env) static void InvokeSpecificMethod(JNIEnv* env, jobject obj, const char* method) { jclass lambda_class = env->FindClass("LambdaInterface"); - assert(!env->ExceptionCheck()); - assert(lambda_class != nullptr); + CHECK(!env->ExceptionCheck()); + CHECK(lambda_class != nullptr); jmethodID method_id = env->GetMethodID(lambda_class, method, "()V"); - assert(!env->ExceptionCheck()); + CHECK(!env->ExceptionCheck()); env->CallVoidMethod(obj, method_id); - assert(!env->ExceptionCheck()); + CHECK(!env->ExceptionCheck()); } extern "C" JNIEXPORT void JNICALL Java_Main_testInvokeLambdaDefaultMethod( @@ -740,3 +739,6 @@ extern "C" JNIEXPORT void JNICALL Java_Main_testInvokeLambdaDefaultMethod( extern "C" JNIEXPORT void JNICALL Java_Main_testInvokeLambdaMethod(JNIEnv* e, jclass, jobject l) { InvokeSpecificMethod(e, l, "sayHi"); } + +} // namespace art + diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc index 284e5544f..5304590ad 100644 --- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc +++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc @@ -14,6 +14,7 @@ * limitations under the License. */ +#include "art_method-inl.h" #include "check_reference_map_visitor.h" #include "jni.h" diff --git a/test/004-StackWalk/stack_walk_jni.cc b/test/004-StackWalk/stack_walk_jni.cc index 51bb68f24..420224dd2 100644 --- a/test/004-StackWalk/stack_walk_jni.cc +++ b/test/004-StackWalk/stack_walk_jni.cc @@ -14,6 +14,7 @@ * limitations under the License. */ +#include "art_method-inl.h" #include "check_reference_map_visitor.h" #include "jni.h" diff --git a/test/044-proxy/expected.txt b/test/044-proxy/expected.txt index be7023e49..2a5f0b90d 100644 --- a/test/044-proxy/expected.txt +++ b/test/044-proxy/expected.txt @@ -95,3 +95,5 @@ Proxy narrowed invocation return type passed 5.8 JNI_OnLoad called callback +Found constructor. +Found constructors with 0 exceptions diff --git a/test/044-proxy/src/ConstructorProxy.java b/test/044-proxy/src/ConstructorProxy.java new file mode 100644 index 000000000..95d150cbb --- /dev/null +++ b/test/044-proxy/src/ConstructorProxy.java @@ -0,0 +1,53 @@ +/* + * Copyright 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; + +/** + * Tests proxies when used with constructor methods. + */ +class ConstructorProxy implements InvocationHandler { + public static void main() { + try { + new ConstructorProxy().runTest(); + } catch (Exception e) { + System.out.println("Unexpected failure occured"); + e.printStackTrace(); + } + } + + public void runTest() throws Exception { + Class proxyClass = Proxy.getProxyClass( + getClass().getClassLoader(), + new Class[] { Runnable.class } + ); + Constructor constructor = proxyClass.getConstructor(InvocationHandler.class); + System.out.println("Found constructor."); + // We used to crash when asking the exception types of the constructor, because the runtime was + // not using the non-proxy ArtMethod + Object[] exceptions = constructor.getExceptionTypes(); + System.out.println("Found constructors with " + exceptions.length + " exceptions"); + } + + @Override + public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { + return args[0]; + } +} + diff --git a/test/044-proxy/src/Main.java b/test/044-proxy/src/Main.java index 1f23b95cf..9dadb7c6e 100644 --- a/test/044-proxy/src/Main.java +++ b/test/044-proxy/src/Main.java @@ -31,6 +31,7 @@ public class Main { NarrowingTest.main(null); FloatSelect.main(null); NativeProxy.main(args); + ConstructorProxy.main(); } // The following code maps from the actual proxy class names (eg $Proxy2) to their test output diff --git a/test/136-daemon-jni-shutdown/daemon_jni_shutdown.cc b/test/136-daemon-jni-shutdown/daemon_jni_shutdown.cc index 54879fbad..c9110a905 100644 --- a/test/136-daemon-jni-shutdown/daemon_jni_shutdown.cc +++ b/test/136-daemon-jni-shutdown/daemon_jni_shutdown.cc @@ -14,6 +14,7 @@ * limitations under the License. */ +#include #include #include "base/casts.h" @@ -45,6 +46,10 @@ extern "C" JNIEXPORT void JNICALL Java_Main_destroyJavaVMAndExit(JNIEnv* env, jc self->SetTopOfShadowStack(nullptr); JavaVM* vm = down_cast(env)->vm; vm->DetachCurrentThread(); + // Open ourself again to make sure the native library does not get unloaded from + // underneath us due to DestroyJavaVM. b/28406866 + void* handle = dlopen(kIsDebugBuild ? "libarttestd.so" : "libarttest.so", RTLD_NOW); + CHECK(handle != nullptr); vm->DestroyJavaVM(); vm_was_shutdown.store(true); // Give threads some time to get stuck in ExceptionCheck. diff --git a/test/138-duplicate-classes-check/src/FancyLoader.java b/test/138-duplicate-classes-check/src/FancyLoader.java deleted file mode 100644 index 03ec94876..000000000 --- a/test/138-duplicate-classes-check/src/FancyLoader.java +++ /dev/null @@ -1,229 +0,0 @@ -/* - * Copyright (C) 2008 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.io.File; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.RandomAccessFile; -import java.lang.reflect.Constructor; -import java.lang.reflect.Method; -import java.lang.reflect.InvocationTargetException; - -/** - * A class loader with atypical behavior: we try to load a private - * class implementation before asking the system or boot loader. This - * is used to create multiple classes with identical names in a single VM. - * - * If DexFile is available, we use that; if not, we assume we're not in - * Dalvik and instantiate the class with defineClass(). - * - * The location of the DEX files and class data is dependent upon the - * test framework. - */ -public class FancyLoader extends ClassLoader { - /* this is where the "alternate" .class files live */ - static final String CLASS_PATH = "classes-ex/"; - - /* this is the "alternate" DEX/Jar file */ - static final String DEX_FILE = System.getenv("DEX_LOCATION") + - "/138-duplicate-classes-check-ex.jar"; - - /* on Dalvik, this is a DexFile; otherwise, it's null */ - private Class mDexClass; - - private Object mDexFile; - - /** - * Construct FancyLoader, grabbing a reference to the DexFile class - * if we're running under Dalvik. - */ - public FancyLoader(ClassLoader parent) { - super(parent); - - try { - mDexClass = parent.loadClass("dalvik.system.DexFile"); - } catch (ClassNotFoundException cnfe) { - // ignore -- not running Dalvik - } - } - - /** - * Finds the class with the specified binary name. - * - * We search for a file in CLASS_PATH or pull an entry from DEX_FILE. - * If we don't find a match, we throw an exception. - */ - protected Class findClass(String name) throws ClassNotFoundException - { - if (mDexClass != null) { - return findClassDalvik(name); - } else { - return findClassNonDalvik(name); - } - } - - /** - * Finds the class with the specified binary name, from a DEX file. - */ - private Class findClassDalvik(String name) - throws ClassNotFoundException { - - if (mDexFile == null) { - synchronized (FancyLoader.class) { - Constructor ctor; - /* - * Construct a DexFile object through reflection. - */ - try { - ctor = mDexClass.getConstructor(new Class[] {String.class}); - } catch (NoSuchMethodException nsme) { - throw new ClassNotFoundException("getConstructor failed", - nsme); - } - - try { - mDexFile = ctor.newInstance(DEX_FILE); - } catch (InstantiationException ie) { - throw new ClassNotFoundException("newInstance failed", ie); - } catch (IllegalAccessException iae) { - throw new ClassNotFoundException("newInstance failed", iae); - } catch (InvocationTargetException ite) { - throw new ClassNotFoundException("newInstance failed", ite); - } - } - } - - /* - * Call DexFile.loadClass(String, ClassLoader). - */ - Method meth; - - try { - meth = mDexClass.getMethod("loadClass", - new Class[] { String.class, ClassLoader.class }); - } catch (NoSuchMethodException nsme) { - throw new ClassNotFoundException("getMethod failed", nsme); - } - - try { - meth.invoke(mDexFile, name, this); - } catch (IllegalAccessException iae) { - throw new ClassNotFoundException("loadClass failed", iae); - } catch (InvocationTargetException ite) { - throw new ClassNotFoundException("loadClass failed", - ite.getCause()); - } - - return null; - } - - /** - * Finds the class with the specified binary name, from .class files. - */ - private Class findClassNonDalvik(String name) - throws ClassNotFoundException { - - String pathName = CLASS_PATH + name + ".class"; - //System.out.println("--- Fancy: looking for " + pathName); - - File path = new File(pathName); - RandomAccessFile raf; - - try { - raf = new RandomAccessFile(path, "r"); - } catch (FileNotFoundException fnfe) { - throw new ClassNotFoundException("Not found: " + pathName); - } - - /* read the entire file in */ - byte[] fileData; - try { - fileData = new byte[(int) raf.length()]; - raf.readFully(fileData); - } catch (IOException ioe) { - throw new ClassNotFoundException("Read error: " + pathName); - } finally { - try { - raf.close(); - } catch (IOException ioe) { - // drop - } - } - - /* create the class */ - //System.out.println("--- Fancy: defining " + name); - try { - return defineClass(name, fileData, 0, fileData.length); - } catch (Throwable th) { - throw new ClassNotFoundException("defineClass failed", th); - } - } - - /** - * Load a class. - * - * Normally a class loader wouldn't override this, but we want our - * version of the class to take precedence over an already-loaded - * version. - * - * We still want the system classes (e.g. java.lang.Object) from the - * bootstrap class loader. - */ - protected Class loadClass(String name, boolean resolve) - throws ClassNotFoundException - { - Class res; - - /* - * 1. Invoke findLoadedClass(String) to check if the class has - * already been loaded. - * - * This doesn't change. - */ - res = findLoadedClass(name); - if (res != null) { - System.out.println("FancyLoader.loadClass: " - + name + " already loaded"); - if (resolve) - resolveClass(res); - return res; - } - - /* - * 3. Invoke the findClass(String) method to find the class. - */ - try { - res = findClass(name); - if (resolve) - resolveClass(res); - } - catch (ClassNotFoundException e) { - // we couldn't find it, so eat the exception and keep going - } - - /* - * 2. Invoke the loadClass method on the parent class loader. If - * the parent loader is null the class loader built-in to the - * virtual machine is used, instead. - * - * (Since we're not in java.lang, we can't actually invoke the - * parent's loadClass() method, but we passed our parent to the - * super-class which can take care of it for us.) - */ - res = super.loadClass(name, resolve); // returns class or throws - return res; - } -} diff --git a/test/138-duplicate-classes-check/src/Main.java b/test/138-duplicate-classes-check/src/Main.java index a9b5bb04e..a2ef28193 100644 --- a/test/138-duplicate-classes-check/src/Main.java +++ b/test/138-duplicate-classes-check/src/Main.java @@ -14,6 +14,7 @@ * limitations under the License. */ +import dalvik.system.DexClassLoader; import java.io.File; import java.lang.reflect.Method; @@ -30,7 +31,11 @@ public class Main { // Now run the class from the -ex file. - FancyLoader loader = new FancyLoader(getClass().getClassLoader()); + String dexPath = System.getenv("DEX_LOCATION") + "/138-duplicate-classes-check-ex.jar"; + String optimizedDirectory = System.getenv("DEX_LOCATION"); + String librarySearchPath = null; + DexClassLoader loader = new DexClassLoader(dexPath, optimizedDirectory, librarySearchPath, + getClass().getClassLoader()); try { Class testEx = loader.loadClass("TestEx"); diff --git a/test/141-class-unload/src/Main.java b/test/141-class-unload/src/Main.java index 15683b0b1..17a6049db 100644 --- a/test/141-class-unload/src/Main.java +++ b/test/141-class-unload/src/Main.java @@ -23,6 +23,7 @@ import java.lang.reflect.Method; public class Main { static final String DEX_FILE = System.getenv("DEX_LOCATION") + "/141-class-unload-ex.jar"; + static final String LIBRARY_SEARCH_PATH = System.getProperty("java.library.path"); static String nativeLibraryName; public static void main(String[] args) throws Exception { @@ -32,7 +33,7 @@ public class Main { throw new AssertionError("Couldn't find path class loader class"); } Constructor constructor = - pathClassLoader.getDeclaredConstructor(String.class, ClassLoader.class); + pathClassLoader.getDeclaredConstructor(String.class, String.class, ClassLoader.class); try { testUnloadClass(constructor); testUnloadLoader(constructor); @@ -49,7 +50,7 @@ public class Main { // Test that the oat files are unloaded. testOatFilesUnloaded(getPid()); } catch (Exception e) { - System.out.println(e); + e.printStackTrace(); } } @@ -118,7 +119,7 @@ public class Main { private static void testNoUnloadInvoke(Constructor constructor) throws Exception { WeakReference loader = new WeakReference((ClassLoader) constructor.newInstance( - DEX_FILE, ClassLoader.getSystemClassLoader())); + DEX_FILE, LIBRARY_SEARCH_PATH, ClassLoader.getSystemClassLoader())); WeakReference intHolder = new WeakReference(loader.get().loadClass("IntHolder")); intHolder.get().getDeclaredMethod("runGC").invoke(intHolder.get()); boolean isNull = loader.get() == null; @@ -128,7 +129,7 @@ public class Main { private static void testNoUnloadInstance(Constructor constructor) throws Exception { WeakReference loader = new WeakReference((ClassLoader) constructor.newInstance( - DEX_FILE, ClassLoader.getSystemClassLoader())); + DEX_FILE, LIBRARY_SEARCH_PATH, ClassLoader.getSystemClassLoader())); WeakReference intHolder = new WeakReference(loader.get().loadClass("IntHolder")); Object o = intHolder.get().newInstance(); Runtime.getRuntime().gc(); @@ -138,7 +139,7 @@ public class Main { private static WeakReference setUpUnloadClass(Constructor constructor) throws Exception { ClassLoader loader = (ClassLoader) constructor.newInstance( - DEX_FILE, ClassLoader.getSystemClassLoader()); + DEX_FILE, LIBRARY_SEARCH_PATH, ClassLoader.getSystemClassLoader()); Class intHolder = loader.loadClass("IntHolder"); Method getValue = intHolder.getDeclaredMethod("getValue"); Method setValue = intHolder.getDeclaredMethod("setValue", Integer.TYPE); @@ -155,7 +156,7 @@ public class Main { boolean waitForCompilation) throws Exception { ClassLoader loader = (ClassLoader) constructor.newInstance( - DEX_FILE, ClassLoader.getSystemClassLoader()); + DEX_FILE, LIBRARY_SEARCH_PATH, ClassLoader.getSystemClassLoader()); Class intHolder = loader.loadClass("IntHolder"); Method setValue = intHolder.getDeclaredMethod("setValue", Integer.TYPE); setValue.invoke(intHolder, 2); @@ -177,7 +178,7 @@ public class Main { private static WeakReference setUpLoadLibrary(Constructor constructor) throws Exception { ClassLoader loader = (ClassLoader) constructor.newInstance( - DEX_FILE, ClassLoader.getSystemClassLoader()); + DEX_FILE, LIBRARY_SEARCH_PATH, ClassLoader.getSystemClassLoader()); Class intHolder = loader.loadClass("IntHolder"); Method loadLibrary = intHolder.getDeclaredMethod("loadLibrary", String.class); loadLibrary.invoke(intHolder, nativeLibraryName); diff --git a/test/148-multithread-gc-annotations/check b/test/148-multithread-gc-annotations/check new file mode 100755 index 000000000..842bdc6ae --- /dev/null +++ b/test/148-multithread-gc-annotations/check @@ -0,0 +1,22 @@ +#!/bin/bash +# +# Copyright (C) 2015 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Check that the string "error" isn't present +if grep error "$2"; then + exit 1 +else + exit 0 +fi diff --git a/test/148-multithread-gc-annotations/expected.txt b/test/148-multithread-gc-annotations/expected.txt new file mode 100644 index 000000000..e69de29bb diff --git a/test/148-multithread-gc-annotations/gc_coverage.cc b/test/148-multithread-gc-annotations/gc_coverage.cc new file mode 100644 index 000000000..263eefd3a --- /dev/null +++ b/test/148-multithread-gc-annotations/gc_coverage.cc @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "gc/heap.h" +#include "jni.h" +#include "runtime.h" +#include "scoped_thread_state_change.h" +#include "thread-inl.h" + +namespace art { +namespace { + +extern "C" JNIEXPORT jboolean JNICALL Java_MovingGCThread_performHomogeneousSpaceCompact(JNIEnv*, jclass) { + return Runtime::Current()->GetHeap()->PerformHomogeneousSpaceCompact() == gc::kSuccess ? + JNI_TRUE : JNI_FALSE; +} + +extern "C" JNIEXPORT jboolean JNICALL Java_MovingGCThread_supportHomogeneousSpaceCompact(JNIEnv*, jclass) { + return Runtime::Current()->GetHeap()->SupportHomogeneousSpaceCompactAndCollectorTransitions() ? + JNI_TRUE : JNI_FALSE; +} + +extern "C" JNIEXPORT jlong JNICALL Java_MovingGCThread_objectAddress(JNIEnv* env, jclass, jobject object) { + ScopedObjectAccess soa(env); + return reinterpret_cast(soa.Decode(object)); +} + +} // namespace +} // namespace art diff --git a/test/148-multithread-gc-annotations/info.txt b/test/148-multithread-gc-annotations/info.txt new file mode 100644 index 000000000..c62e544e0 --- /dev/null +++ b/test/148-multithread-gc-annotations/info.txt @@ -0,0 +1 @@ +Tests that getting annotations works during moving gc. diff --git a/test/148-multithread-gc-annotations/src/AnnoClass1.java b/test/148-multithread-gc-annotations/src/AnnoClass1.java new file mode 100644 index 000000000..b82c61fd5 --- /dev/null +++ b/test/148-multithread-gc-annotations/src/AnnoClass1.java @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.lang.annotation.*; + +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.TYPE) +public @interface AnnoClass1 { + Class value(); +} diff --git a/test/148-multithread-gc-annotations/src/AnnoClass2.java b/test/148-multithread-gc-annotations/src/AnnoClass2.java new file mode 100644 index 000000000..c75d950e2 --- /dev/null +++ b/test/148-multithread-gc-annotations/src/AnnoClass2.java @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.lang.annotation.*; + +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.TYPE) +public @interface AnnoClass2 { + Class value(); +} diff --git a/test/148-multithread-gc-annotations/src/AnnoClass3.java b/test/148-multithread-gc-annotations/src/AnnoClass3.java new file mode 100644 index 000000000..5b4a37809 --- /dev/null +++ b/test/148-multithread-gc-annotations/src/AnnoClass3.java @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.lang.annotation.*; + +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.TYPE) +public @interface AnnoClass3 { + Class value(); +} diff --git a/test/148-multithread-gc-annotations/src/AnnotationThread.java b/test/148-multithread-gc-annotations/src/AnnotationThread.java new file mode 100644 index 000000000..ebc14e96b --- /dev/null +++ b/test/148-multithread-gc-annotations/src/AnnotationThread.java @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.lang.annotation.*; + +@AnnoClass1(AnnoClass2.class) +@AnnoClass2(AnnoClass3.class) +@AnnoClass3(AnnoClass1.class) +public class AnnotationThread implements Runnable { + public void run() { + for (int i = 0; i < 20; i++) { + Annotation[] annotations = AnnotationThread.class.getAnnotations(); + if (annotations == null) { + System.out.println("error: AnnotationThread class has no annotations"); + return; + } + } + } +} diff --git a/test/148-multithread-gc-annotations/src/Main.java b/test/148-multithread-gc-annotations/src/Main.java new file mode 100644 index 000000000..b652ed651 --- /dev/null +++ b/test/148-multithread-gc-annotations/src/Main.java @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main { + public static void main(String[] args) { + System.loadLibrary(args[0]); + Thread annoThread = new Thread(new AnnotationThread(), "Annotation thread"); + Thread gcThread = new Thread(new MovingGCThread(), "Moving GC thread"); + annoThread.start(); + gcThread.start(); + try { + annoThread.join(); + gcThread.join(); + } catch (InterruptedException e) { + System.out.println("error: " + e); + } + System.out.println("Done."); + } +} diff --git a/test/148-multithread-gc-annotations/src/MovingGCThread.java b/test/148-multithread-gc-annotations/src/MovingGCThread.java new file mode 100644 index 000000000..87de9f4ec --- /dev/null +++ b/test/148-multithread-gc-annotations/src/MovingGCThread.java @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.util.TreeMap; + +public class MovingGCThread implements Runnable { + private static TreeMap treeMap = new TreeMap(); + + public void run() { + for (int i = 0; i < 20; i++) { + testHomogeneousCompaction(); + } + } + + public static void testHomogeneousCompaction() { + final boolean supportHSC = supportHomogeneousSpaceCompact(); + if (!supportHSC) { + return; + } + Object o = new Object(); + long addressBefore = objectAddress(o); + allocateStuff(); + final boolean success = performHomogeneousSpaceCompact(); + allocateStuff(); + if (!success) { + System.out.println("error: Expected " + supportHSC + " but got " + success); + } + allocateStuff(); + long addressAfter = objectAddress(o); + // This relies on the compaction copying from one space to another space and there being + // no overlap. + if (addressBefore == addressAfter) { + System.out.println("error: Expected different adddress " + addressBefore + " vs " + + addressAfter); + } + } + + private static void allocateStuff() { + for (int i = 0; i < 1000; ++i) { + Object o = new Object(); + treeMap.put(o.hashCode(), o); + } + } + + // Methods to get access to ART internals. + private static native boolean supportHomogeneousSpaceCompact(); + private static native boolean performHomogeneousSpaceCompact(); + private static native long objectAddress(Object object); +} diff --git a/test/449-checker-bce/src/Main.java b/test/449-checker-bce/src/Main.java index 66e1d92cc..41771b52c 100644 --- a/test/449-checker-bce/src/Main.java +++ b/test/449-checker-bce/src/Main.java @@ -927,6 +927,32 @@ public class Main { } } + /// CHECK-START: void Main.nonzeroLength(int[]) BCE (before) + /// CHECK-DAG: BoundsCheck + // + /// CHECK-START: void Main.nonzeroLength(int[]) BCE (after) + /// CHECK-NOT: BoundsCheck + /// CHECK-NOT: Deoptimize + public static void nonzeroLength(int[] a) { + if (a.length != 0) { + a[0] = 112; + } + } + + /// CHECK-START: void Main.knownLength(int[]) BCE (before) + /// CHECK-DAG: BoundsCheck + /// CHECK-DAG: BoundsCheck + // + /// CHECK-START: void Main.knownLength(int[]) BCE (after) + /// CHECK-NOT: BoundsCheck + /// CHECK-NOT: Deoptimize + public static void knownLength(int[] a) { + if (a.length == 2) { + a[0] = -1; + a[1] = -2; + } + } + static int[][] mA; /// CHECK-START: void Main.dynamicBCEAndIntrinsic(int) BCE (before) @@ -1586,6 +1612,26 @@ public class Main { } } + nonzeroLength(array); + if (array[0] != 112) { + System.out.println("nonzero length failed!"); + } + + knownLength(array); + if (array[0] != 112 || array[1] != 1) { + System.out.println("nonzero length failed!"); + } + array = new int[2]; + knownLength(array); + if (array[0] != -1 || array[1] != -2) { + System.out.println("nonzero length failed!"); + } + + // Zero length array does not break. + array = new int[0]; + nonzeroLength(array); + knownLength(array); + mA = new int[4][4]; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { diff --git a/test/530-checker-loops2/src/Main.java b/test/530-checker-loops2/src/Main.java index c644692f0..b12fbd609 100644 --- a/test/530-checker-loops2/src/Main.java +++ b/test/530-checker-loops2/src/Main.java @@ -710,8 +710,8 @@ public class Main { // making them a candidate for deoptimization based on constant indices. // Compiler should ensure the array loads are not subsequently hoisted // "above" the deoptimization "barrier" on the bounds. - a[0][i] = 1; - a[1][i] = 2; + a[1][i] = 1; + a[2][i] = 2; a[99][i] = 3; } } @@ -1042,11 +1042,11 @@ public class Main { a = new int[100][10]; expectEquals(55, dynamicBCEAndConstantIndices(x, a, 0, 10)); for (int i = 0; i < 10; i++) { - expectEquals((i % 10) != 0 ? 1 : 0, a[0][i]); - expectEquals((i % 10) != 0 ? 2 : 0, a[1][i]); + expectEquals((i % 10) != 0 ? 1 : 0, a[1][i]); + expectEquals((i % 10) != 0 ? 2 : 0, a[2][i]); expectEquals((i % 10) != 0 ? 3 : 0, a[99][i]); } - a = new int[2][10]; + a = new int[3][10]; sResult = 0; try { expectEquals(55, dynamicBCEAndConstantIndices(x, a, 0, 10)); @@ -1054,8 +1054,8 @@ public class Main { sResult = 1; } expectEquals(1, sResult); - expectEquals(a[0][1], 1); - expectEquals(a[1][1], 2); + expectEquals(a[1][1], 1); + expectEquals(a[2][1], 2); // Dynamic BCE combined with constant indices of all types. boolean[] x1 = { true }; diff --git a/test/536-checker-intrinsic-optimization/src/Main.java b/test/536-checker-intrinsic-optimization/src/Main.java index be666e94f..15a9504ac 100644 --- a/test/536-checker-intrinsic-optimization/src/Main.java +++ b/test/536-checker-intrinsic-optimization/src/Main.java @@ -16,9 +16,69 @@ public class Main { + public static boolean doThrow = false; + + public static void assertIntEquals(int expected, int result) { + if (expected != result) { + throw new Error("Expected: " + expected + ", found: " + result); + } + } + + public static void assertBooleanEquals(boolean expected, boolean result) { + if (expected != result) { + throw new Error("Expected: " + expected + ", found: " + result); + } + } + public static void main(String[] args) { stringEqualsSame(); stringArgumentNotNull("Foo"); + + assertIntEquals(0, $opt$noinline$getStringLength("")); + assertIntEquals(3, $opt$noinline$getStringLength("abc")); + assertIntEquals(10, $opt$noinline$getStringLength("0123456789")); + + assertBooleanEquals(true, $opt$noinline$isStringEmpty("")); + assertBooleanEquals(false, $opt$noinline$isStringEmpty("abc")); + assertBooleanEquals(false, $opt$noinline$isStringEmpty("0123456789")); + } + + /// CHECK-START: int Main.$opt$noinline$getStringLength(java.lang.String) instruction_simplifier (before) + /// CHECK-DAG: <> InvokeVirtual intrinsic:StringLength + /// CHECK-DAG: Return [<>] + + /// CHECK-START: int Main.$opt$noinline$getStringLength(java.lang.String) instruction_simplifier (after) + /// CHECK-DAG: <> ParameterValue + /// CHECK-DAG: <> NullCheck [<>] + /// CHECK-DAG: <> ArrayLength [<>] is_string_length:true + /// CHECK-DAG: Return [<>] + + /// CHECK-START: int Main.$opt$noinline$getStringLength(java.lang.String) instruction_simplifier (after) + /// CHECK-NOT: InvokeVirtual intrinsic:StringLength + + static public int $opt$noinline$getStringLength(String s) { + if (doThrow) { throw new Error(); } + return s.length(); + } + + /// CHECK-START: boolean Main.$opt$noinline$isStringEmpty(java.lang.String) instruction_simplifier (before) + /// CHECK-DAG: <> InvokeVirtual intrinsic:StringIsEmpty + /// CHECK-DAG: Return [<>] + + /// CHECK-START: boolean Main.$opt$noinline$isStringEmpty(java.lang.String) instruction_simplifier (after) + /// CHECK-DAG: <> ParameterValue + /// CHECK-DAG: <> IntConstant 0 + /// CHECK-DAG: <> NullCheck [<>] + /// CHECK-DAG: <> ArrayLength [<>] is_string_length:true + /// CHECK-DAG: <> Equal [<>,<>] + /// CHECK-DAG: Return [<>] + + /// CHECK-START: boolean Main.$opt$noinline$isStringEmpty(java.lang.String) instruction_simplifier (after) + /// CHECK-NOT: InvokeVirtual intrinsic:StringIsEmpty + + static public boolean $opt$noinline$isStringEmpty(String s) { + if (doThrow) { throw new Error(); } + return s.isEmpty(); } /// CHECK-START: boolean Main.stringEqualsSame() instruction_simplifier (before) diff --git a/test/551-checker-shifter-operand/src/Main.java b/test/551-checker-shifter-operand/src/Main.java index edb8a68b4..a4561b83d 100644 --- a/test/551-checker-shifter-operand/src/Main.java +++ b/test/551-checker-shifter-operand/src/Main.java @@ -500,9 +500,9 @@ public class Main { assertIntEquals(a + $noinline$IntShl(b, 16), a + (b << 16)); assertIntEquals(a + $noinline$IntShl(b, 30), a + (b << 30)); assertIntEquals(a + $noinline$IntShl(b, 31), a + (b << 31)); - assertIntEquals(a + $noinline$IntShl(b, 32), a + (b << 32)); - assertIntEquals(a + $noinline$IntShl(b, 62), a + (b << 62)); - assertIntEquals(a + $noinline$IntShl(b, 63), a + (b << 63)); + assertIntEquals(a + $noinline$IntShl(b, 32), a + (b << $opt$inline$IntConstant32())); + assertIntEquals(a + $noinline$IntShl(b, 62), a + (b << $opt$inline$IntConstant62())); + assertIntEquals(a + $noinline$IntShl(b, 63), a + (b << $opt$inline$IntConstant63())); assertIntEquals(a - $noinline$IntShr(b, 1), a - (b >> 1)); assertIntEquals(a - $noinline$IntShr(b, 6), a - (b >> 6)); @@ -513,9 +513,9 @@ public class Main { assertIntEquals(a - $noinline$IntShr(b, 16), a - (b >> 16)); assertIntEquals(a - $noinline$IntShr(b, 30), a - (b >> 30)); assertIntEquals(a - $noinline$IntShr(b, 31), a - (b >> 31)); - assertIntEquals(a - $noinline$IntShr(b, 32), a - (b >> 32)); - assertIntEquals(a - $noinline$IntShr(b, 62), a - (b >> 62)); - assertIntEquals(a - $noinline$IntShr(b, 63), a - (b >> 63)); + assertIntEquals(a - $noinline$IntShr(b, 32), a - (b >> $opt$inline$IntConstant32())); + assertIntEquals(a - $noinline$IntShr(b, 62), a - (b >> $opt$inline$IntConstant62())); + assertIntEquals(a - $noinline$IntShr(b, 63), a - (b >> $opt$inline$IntConstant63())); assertIntEquals(a ^ $noinline$IntUshr(b, 1), a ^ (b >>> 1)); assertIntEquals(a ^ $noinline$IntUshr(b, 6), a ^ (b >>> 6)); @@ -526,11 +526,17 @@ public class Main { assertIntEquals(a ^ $noinline$IntUshr(b, 16), a ^ (b >>> 16)); assertIntEquals(a ^ $noinline$IntUshr(b, 30), a ^ (b >>> 30)); assertIntEquals(a ^ $noinline$IntUshr(b, 31), a ^ (b >>> 31)); - assertIntEquals(a ^ $noinline$IntUshr(b, 32), a ^ (b >>> 32)); - assertIntEquals(a ^ $noinline$IntUshr(b, 62), a ^ (b >>> 62)); - assertIntEquals(a ^ $noinline$IntUshr(b, 63), a ^ (b >>> 63)); + assertIntEquals(a ^ $noinline$IntUshr(b, 32), a ^ (b >>> $opt$inline$IntConstant32())); + assertIntEquals(a ^ $noinline$IntUshr(b, 62), a ^ (b >>> $opt$inline$IntConstant62())); + assertIntEquals(a ^ $noinline$IntUshr(b, 63), a ^ (b >>> $opt$inline$IntConstant63())); } + // Hiding constants outside the range [0, 32) used for int shifts from Jack. + // (Jack extracts only the low 5 bits.) + public static int $opt$inline$IntConstant32() { return 32; } + public static int $opt$inline$IntConstant62() { return 62; } + public static int $opt$inline$IntConstant63() { return 63; } + static long $noinline$LongShl(long b, long c) { if (doThrow) throw new Error(); diff --git a/test/557-checker-instruction-simplifier-ror/src/Main.java b/test/557-checker-instruction-simplifier-ror/src/Main.java index 310611bba..6d8b74d1e 100644 --- a/test/557-checker-instruction-simplifier-ror/src/Main.java +++ b/test/557-checker-instruction-simplifier-ror/src/Main.java @@ -175,28 +175,32 @@ public class Main { // (i >>> #distance) | (i << #-distance) - /// CHECK-START: int Main.ror_int_constant_c_negc(int) instruction_simplifier (before) + /// CHECK-START: int Main.ror_int_constant_c_negc(int) instruction_simplifier_after_bce (before) /// CHECK: <> ParameterValue /// CHECK: <> IntConstant 2 - /// CHECK: <> IntConstant {{-2|30}} + /// CHECK: <> IntConstant -2 /// CHECK-DAG: <> UShr [<>,<>] /// CHECK-DAG: <> Shl [<>,<>] /// CHECK: <> Or [<>,<>] /// CHECK: Return [<>] - /// CHECK-START: int Main.ror_int_constant_c_negc(int) instruction_simplifier (after) + /// CHECK-START: int Main.ror_int_constant_c_negc(int) instruction_simplifier_after_bce (after) /// CHECK: <> ParameterValue /// CHECK: <> IntConstant 2 /// CHECK: <> Ror [<>,<>] /// CHECK: Return [<>] - /// CHECK-START: int Main.ror_int_constant_c_negc(int) instruction_simplifier (after) + /// CHECK-START: int Main.ror_int_constant_c_negc(int) instruction_simplifier_after_bce (after) /// CHECK-NOT: UShr /// CHECK-NOT: Shl public static int ror_int_constant_c_negc(int value) { - return (value >>> 2) | (value << -2); + return (value >>> 2) | (value << $opt$inline$IntConstantM2()); } + // Hiding constants outside the range [0, 32) used for int shifts from Jack. + // (Jack extracts only the low 5 bits.) + public static int $opt$inline$IntConstantM2() { return -2; } + // (j >>> #distance) | (j << #-distance) /// CHECK-START: long Main.ror_long_constant_c_negc(long) instruction_simplifier (before) diff --git a/test/566-polymorphic-inlining/polymorphic_inline.cc b/test/566-polymorphic-inlining/polymorphic_inline.cc index 167a5757f..7b2c6cbcd 100644 --- a/test/566-polymorphic-inlining/polymorphic_inline.cc +++ b/test/566-polymorphic-inlining/polymorphic_inline.cc @@ -60,6 +60,7 @@ extern "C" JNIEXPORT void JNICALL Java_Main_ensureJittedAndPolymorphicInline(JNI do_checks(cls, "testInvokeVirtual"); do_checks(cls, "testInvokeInterface"); + do_checks(cls, "$noinline$testInlineToSameTarget"); } } // namespace art diff --git a/test/566-polymorphic-inlining/src/Main.java b/test/566-polymorphic-inlining/src/Main.java index 7283e8622..a59ce5b34 100644 --- a/test/566-polymorphic-inlining/src/Main.java +++ b/test/566-polymorphic-inlining/src/Main.java @@ -25,6 +25,12 @@ public class Main implements Itf { } } + public static void assertEquals(int expected, int actual) { + if (expected != actual) { + throw new Error("Expected " + expected + ", got " + actual); + } + } + public static void main(String[] args) throws Exception { System.loadLibrary(args[0]); Main[] mains = new Main[3]; @@ -41,6 +47,8 @@ public class Main implements Itf { testInvokeVirtual(mains[1]); testInvokeInterface(itfs[0]); testInvokeInterface(itfs[1]); + $noinline$testInlineToSameTarget(mains[0]); + $noinline$testInlineToSameTarget(mains[1]); } ensureJittedAndPolymorphicInline(); @@ -56,6 +64,10 @@ public class Main implements Itf { // This will trigger a deoptimization of the compiled code. assertEquals(OtherSubclass.class, testInvokeVirtual(mains[2])); assertEquals(OtherSubclass.class, testInvokeInterface(itfs[2])); + + // Run this once to make sure we execute the JITted code. + $noinline$testInlineToSameTarget(mains[0]); + assertEquals(20001, counter); } public Class sameInvokeVirtual() { @@ -76,9 +88,21 @@ public class Main implements Itf { return m.sameInvokeVirtual(); } + public static void $noinline$testInlineToSameTarget(Main m) { + if (doThrow) throw new Error(""); + m.increment(); + } + public Object field = new Object(); public static native void ensureJittedAndPolymorphicInline(); + + public void increment() { + field.getClass(); // null check to ensure we get an inlined frame in the CodeInfo + counter++; + } + public static int counter = 0; + public static boolean doThrow = false; } class Subclass extends Main { diff --git a/test/570-checker-osr/osr.cc b/test/570-checker-osr/osr.cc index 2a5b2c954..2fa5800e5 100644 --- a/test/570-checker-osr/osr.cc +++ b/test/570-checker-osr/osr.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "art_method.h" +#include "art_method-inl.h" #include "jit/jit.h" #include "jit/jit_code_cache.h" #include "jit/profiling_info.h" @@ -75,7 +75,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInOsrCode(JNIEnv* env, extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInInterpreter(JNIEnv* env, jclass, jstring method_name) { - if (!Runtime::Current()->UseJit()) { + if (!Runtime::Current()->UseJitCompilation()) { // The return value is irrelevant if we're not using JIT. return false; } @@ -111,7 +111,7 @@ class ProfilingInfoVisitor : public StackVisitor { extern "C" JNIEXPORT void JNICALL Java_Main_ensureHasProfilingInfo(JNIEnv* env, jclass, jstring method_name) { - if (!Runtime::Current()->UseJit()) { + if (!Runtime::Current()->UseJitCompilation()) { return; } ScopedUtfChars chars(env, method_name); @@ -151,7 +151,7 @@ class OsrCheckVisitor : public StackVisitor { extern "C" JNIEXPORT void JNICALL Java_Main_ensureHasOsrCode(JNIEnv* env, jclass, jstring method_name) { - if (!Runtime::Current()->UseJit()) { + if (!Runtime::Current()->UseJitCompilation()) { return; } ScopedUtfChars chars(env, method_name); diff --git a/test/570-checker-osr/src/Main.java b/test/570-checker-osr/src/Main.java index 200b54a44..15c232d6a 100644 --- a/test/570-checker-osr/src/Main.java +++ b/test/570-checker-osr/src/Main.java @@ -61,6 +61,18 @@ public class Main { throw new Error("Unexpected return value"); } + $noinline$inlineCache2(new Main(), /* isSecondInvocation */ false); + if ($noinline$inlineCache2(new SubMain(), /* isSecondInvocation */ true) != SubMain.class) { + throw new Error("Unexpected return value"); + } + + // Test polymorphic inline cache to the same target (inlineCache3). + $noinline$inlineCache3(new Main(), /* isSecondInvocation */ false); + $noinline$inlineCache3(new SubMain(), /* isSecondInvocation */ false); + if ($noinline$inlineCache3(new SubMain(), /* isSecondInvocation */ true) != null) { + throw new Error("Unexpected return value"); + } + $noinline$stackOverflow(new Main(), /* isSecondInvocation */ false); $noinline$stackOverflow(new SubMain(), /* isSecondInvocation */ true); @@ -147,10 +159,76 @@ public class Main { return other.returnClass(); } + public static Class $noinline$inlineCache2(Main m, boolean isSecondInvocation) { + // If we are running in non-JIT mode, or were unlucky enough to get this method + // already JITted, just return the expected value. + if (!isInInterpreter("$noinline$inlineCache2")) { + return SubMain.class; + } + + ensureHasProfilingInfo("$noinline$inlineCache2"); + + // Ensure that we have OSR code to jump to. + if (isSecondInvocation) { + ensureHasOsrCode("$noinline$inlineCache2"); + } + + // This call will be optimized in the OSR compiled code + // to check and deoptimize if m is not of type 'Main'. + Main other = m.inlineCache2(); + + // Jump to OSR compiled code. The second run + // of this method will have 'm' as a SubMain, and the compiled + // code we are jumping to will have wrongly optimize other as being null. + if (isSecondInvocation) { + while (!isInOsrCode("$noinline$inlineCache2")) {} + } + + // We used to wrongly optimize this code and assume 'other' was always null. + return (other == null) ? null : other.returnClass(); + } + + public static Class $noinline$inlineCache3(Main m, boolean isSecondInvocation) { + // If we are running in non-JIT mode, or were unlucky enough to get this method + // already JITted, just return the expected value. + if (!isInInterpreter("$noinline$inlineCache3")) { + return null; + } + + ensureHasProfilingInfo("$noinline$inlineCache3"); + + // Ensure that we have OSR code to jump to. + if (isSecondInvocation) { + ensureHasOsrCode("$noinline$inlineCache3"); + } + + // This call will be optimized in the OSR compiled code + // to check and deoptimize if m is not of type 'Main'. + Main other = m.inlineCache3(); + + // Jump to OSR compiled code. The second run + // of this method will have 'm' as a SubMain, and the compiled + // code we are jumping to will have wrongly optimize other as being null. + if (isSecondInvocation) { + while (!isInOsrCode("$noinline$inlineCache3")) {} + } + + // We used to wrongly optimize this code and assume 'other' was always null. + return (other == null) ? null : other.returnClass(); + } + public Main inlineCache() { return new Main(); } + public Main inlineCache2() { + return null; + } + + public Main inlineCache3() { + return null; + } + public Class returnClass() { return Main.class; } @@ -235,6 +313,10 @@ class SubMain extends Main { return new SubMain(); } + public Main inlineCache2() { + return new SubMain(); + } + public void otherInlineCache() { return; } diff --git a/test/586-checker-null-array-get/src/Main.java b/test/586-checker-null-array-get/src/Main.java index 332cfb0a5..e0782bc84 100644 --- a/test/586-checker-null-array-get/src/Main.java +++ b/test/586-checker-null-array-get/src/Main.java @@ -14,10 +14,20 @@ * limitations under the License. */ +class Test1 { + int[] iarr; +} + +class Test2 { + float[] farr; +} + public class Main { public static Object[] getObjectArray() { return null; } public static long[] getLongArray() { return null; } public static Object getNull() { return null; } + public static Test1 getNullTest1() { return null; } + public static Test2 getNullTest2() { return null; } public static void main(String[] args) { try { @@ -26,13 +36,25 @@ public class Main { } catch (NullPointerException e) { // Expected. } + try { + bar(); + throw new Error("Expected NullPointerException"); + } catch (NullPointerException e) { + // Expected. + } + try { + test1(); + throw new Error("Expected NullPointerException"); + } catch (NullPointerException e) { + // Expected. + } } /// CHECK-START: void Main.foo() load_store_elimination (after) - /// CHECK-DAG: <> NullConstant - /// CHECK-DAG: <> NullCheck [<>] - /// CHECK-DAG: <> ArrayGet [<>,{{i\d+}}] - /// CHECK-DAG: <> ArrayGet [<>,{{i\d+}}] + /// CHECK-DAG: <> NullConstant + /// CHECK-DAG: <> NullCheck [<>] + /// CHECK-DAG: <> ArrayGet [<>,{{i\d+}}] + /// CHECK-DAG: <> ArrayGet [<>,{{i\d+}}] public static void foo() { longField = getLongArray()[0]; objectField = getObjectArray()[0]; @@ -56,7 +78,7 @@ public class Main { // elimination pass to add a HDeoptimize. Not having the bounds check helped // the load store elimination think it could merge two ArrayGet with different // types. - String[] array = ((String[])getNull()); + String[] array = (String[])getNull(); objectField = array[0]; objectField = array[1]; objectField = array[2]; @@ -68,6 +90,23 @@ public class Main { longField = longArray[3]; } + /// CHECK-START: float Main.test1() load_store_elimination (after) + /// CHECK-DAG: <> NullConstant + /// CHECK-DAG: <> NullCheck [<>] + /// CHECK-DAG: <> InstanceFieldGet [<>] field_name:Test1.iarr + /// CHECK-DAG: <> NullCheck [<>] + /// CHECK-DAG: <> ArrayGet [<>,{{i\d+}}] + /// CHECK-DAG: <> ArrayGet [<>,{{i\d+}}] + /// CHECK-DAG: Return [<>] + public static float test1() { + Test1 test1 = getNullTest1(); + Test2 test2 = getNullTest2();; + int[] iarr = test1.iarr; + float[] farr = test2.farr; + iarr[0] = iarr[1]; + return farr[0]; + } + public static long longField; public static Object objectField; } diff --git a/test/594-checker-irreducible-linorder/smali/IrreducibleLoop.smali b/test/594-checker-irreducible-linorder/smali/IrreducibleLoop.smali index 366c7b9b6..ef53ee867 100644 --- a/test/594-checker-irreducible-linorder/smali/IrreducibleLoop.smali +++ b/test/594-checker-irreducible-linorder/smali/IrreducibleLoop.smali @@ -67,3 +67,57 @@ return p3 .end method + +## CHECK-START: int IrreducibleLoop.liveness2(boolean, boolean, boolean, int) builder (after) +## CHECK-DAG: Mul loop:<> +## CHECK-DAG: Not loop:<> + +## CHECK-START: int IrreducibleLoop.liveness2(boolean, boolean, boolean, int) liveness (after) +## CHECK-DAG: Mul liveness:<> +## CHECK-DAG: Not liveness:<> +## CHECK-EVAL: <> < <> + +.method public liveness2(ZZZI)I + .registers 10 + + const v1, 1 + + :header1 + if-eqz p0, :body1 + + :exit + return p3 + + :body1 + # The test will generate an incorrect linear order when the following IF swaps + # its successors. To do that, load a boolean value and compare NotEqual to 1. + sget-boolean v2, LIrreducibleLoop;->f:Z + const v3, 1 + if-ne v2, v3, :pre_header2 + + :pre_entry2 + # This constant has a use in a phi in :back_edge2 and a back edge use in + # :back_edge1. Because the linear order is wrong, the back edge use has + # a lower liveness than the phi use. + const v0, 42 + mul-int/2addr p3, p3 + goto :back_edge2 + + :back_edge2 + add-int/2addr p3, v0 + add-int/2addr v0, v1 + goto :header2 + + :header2 + if-eqz p2, :back_edge2 + + :back_edge1 + not-int p3, p3 + goto :header1 + + :pre_header2 + const v0, 42 + goto :header2 +.end method + +.field public static f:Z diff --git a/test/594-invoke-super/expected.txt b/test/594-invoke-super/expected.txt new file mode 100644 index 000000000..de26026c9 --- /dev/null +++ b/test/594-invoke-super/expected.txt @@ -0,0 +1,7 @@ +new A +I am A's foo +new B +I am B's foo +new A +new B +passed diff --git a/test/594-invoke-super/info.txt b/test/594-invoke-super/info.txt new file mode 100644 index 000000000..440d8b8c6 --- /dev/null +++ b/test/594-invoke-super/info.txt @@ -0,0 +1 @@ +Invoke-super on various references. diff --git a/test/594-invoke-super/smali/invoke-super.smali b/test/594-invoke-super/smali/invoke-super.smali new file mode 100644 index 000000000..6f787dd17 --- /dev/null +++ b/test/594-invoke-super/smali/invoke-super.smali @@ -0,0 +1,31 @@ +# +# Copyright (C) 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public LZ; +.super LA; + +.method public constructor ()V +.registers 1 + invoke-direct {v0}, LA;->()V + return-void +.end method + +.method public foo()V +.registers 3 + new-instance v0, LY; + invoke-direct {v0}, LY;->()V + invoke-super {v0}, LY;->foo()V + return-void +.end method diff --git a/test/594-invoke-super/src/Main.java b/test/594-invoke-super/src/Main.java new file mode 100644 index 000000000..53f2bbf67 --- /dev/null +++ b/test/594-invoke-super/src/Main.java @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; + +// +// Two classes A and B with method foo(). +// + +class A { + A() { System.out.println("new A"); } + + public void foo() { System.out.println("I am A's foo"); } + + // We previously used to invoke this method with a Y instance, due + // to invoke-super underspecified behavior. + public void bar() { System.out.println("I am A's bar"); } +} + +class B { + B() { System.out.println("new B"); } + + public void foo() { System.out.println("I am B's foo"); } +} + +// +// Two subclasses X and Y that call foo() on super. +// + +class X extends A { + public void foo() { super.foo(); } +} + +class Y extends B { + public void foo() { super.foo(); } +} + +// +// Driver class. +// + +public class Main { + + public static void main(String[] args) throws Exception { + // The normal stuff, X's super goes to A, Y's super goes to B. + new X().foo(); + new Y().foo(); + + // And now it gets interesting. + + // In bytecode, we define a class Z that is a subclass of A, and we call + // invoke-super on an instance of Y. + Class z = Class.forName("Z"); + Method m = z.getMethod("foo"); + try { + m.invoke(z.newInstance()); + throw new Error("Expected InvocationTargetException"); + } catch (InvocationTargetException e) { + if (!(e.getCause() instanceof NoSuchMethodError)) { + throw new Error("Expected NoSuchMethodError"); + } + } + + System.out.println("passed"); + } +} diff --git a/test/594-load-string-regression/expected.txt b/test/594-load-string-regression/expected.txt new file mode 100644 index 000000000..365b0e168 --- /dev/null +++ b/test/594-load-string-regression/expected.txt @@ -0,0 +1 @@ +String: "" diff --git a/test/594-load-string-regression/info.txt b/test/594-load-string-regression/info.txt new file mode 100644 index 000000000..6a07ace81 --- /dev/null +++ b/test/594-load-string-regression/info.txt @@ -0,0 +1,2 @@ +Regression test for LoadString listing side effects when it doesn't have any +and triggering a DCHECK() failure when merging ClinitCheck into NewInstance. diff --git a/test/594-load-string-regression/src/Main.java b/test/594-load-string-regression/src/Main.java new file mode 100644 index 000000000..0b9f7b52a --- /dev/null +++ b/test/594-load-string-regression/src/Main.java @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main { + static boolean doThrow = false; + + // Note: We're not doing checker tests as we cannot do them specifically for a non-PIC + // configuration. The check here would be "prepare_for_register_allocation (before)" + // CHECK: LoadClass + // CHECK-NEXT: ClinitCheck + // CHECK-NEXT: LoadString load_kind:BootImageAddress + // CHECK-NEXT: NewInstance + // and "prepare_for_register_allocation (after)" + // CHECK: LoadString + // CHECK-NEXT: NewInstance + // but the order of instructions for non-PIC mode is different. + public static int $noinline$test() { + if (doThrow) { throw new Error(); } + + int r = 0x12345678; + do { + // LICM pulls the LoadClass and ClinitCheck out of the loop, leaves NewInstance in the loop. + Helper h = new Helper(); + // For non-PIC mode, LICM pulls the boot image LoadString out of the loop. + // (For PIC mode, the LoadString can throw and will not be moved out of the loop.) + String s = ""; // Empty string is known to be in the boot image. + r = r ^ (r >> 5); + h.$noinline$printString(s); + // During DCE after inlining, the loop back-edge disappears and the pre-header is + // merged with the body, leaving consecutive LoadClass, ClinitCheck, LoadString + // and NewInstance in non-PIC mode. The prepare_for_register_allocation pass + // merges the LoadClass and ClinitCheck with the NewInstance and checks that + // there are no instructions with side effects in between. This check used to + // fail because LoadString was always listing SideEffects::CanTriggerGC() even + // when it doesn't really have any side effects, i.e. for direct references to + // boot image Strings or for Strings known to be in the dex cache. + } while ($inline$shouldContinue()); + return r; + } + + static boolean $inline$shouldContinue() { + return false; + } + + public static void main(String[] args) { + assertIntEquals(0x12345678 ^ (0x12345678 >> 5), $noinline$test()); + } + + public static void assertIntEquals(int expected, int result) { + if (expected != result) { + throw new Error("Expected: " + expected + ", found: " + result); + } + } +} + +class Helper { + static boolean doThrow = false; + + public void $noinline$printString(String s) { + if (doThrow) { throw new Error(); } + + System.out.println("String: \"" + s + "\""); + } +} diff --git a/test/595-error-class/expected.txt b/test/595-error-class/expected.txt new file mode 100644 index 000000000..b0aad4deb --- /dev/null +++ b/test/595-error-class/expected.txt @@ -0,0 +1 @@ +passed diff --git a/test/595-error-class/info.txt b/test/595-error-class/info.txt new file mode 100644 index 000000000..a58b8b31b --- /dev/null +++ b/test/595-error-class/info.txt @@ -0,0 +1 @@ +Regression test on merging array type with error component type. diff --git a/test/595-error-class/smali/error.smali b/test/595-error-class/smali/error.smali new file mode 100644 index 000000000..925c34b29 --- /dev/null +++ b/test/595-error-class/smali/error.smali @@ -0,0 +1,23 @@ +# Copyright (C) 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public final LAnError; + +.super LSuperOfAnError; + +# Override a final method to put this class in the error state. +.method public foo()V + .registers 1 + return-void +.end method diff --git a/test/595-error-class/smali/merge.smali b/test/595-error-class/smali/merge.smali new file mode 100644 index 000000000..2f8b41504 --- /dev/null +++ b/test/595-error-class/smali/merge.smali @@ -0,0 +1,31 @@ +# +# Copyright (C) 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public LMerge; +.super Ljava/lang/Object; + +# Method that selects between x = new Integer[] or new AnError[], +# Reference type propagation should correctly see error in component type. +.method public static select(Z)Ljava/lang/Object; + .registers 2 + const/16 v0, 10 + if-eqz v1, :Skip + new-array v0, v0, [LAnError; + goto :Done +:Skip + new-array v0, v0, [Ljava/lang/Integer; +:Done + return-object v0 +.end method diff --git a/test/595-error-class/smali/super.smali b/test/595-error-class/smali/super.smali new file mode 100644 index 000000000..da7467d16 --- /dev/null +++ b/test/595-error-class/smali/super.smali @@ -0,0 +1,22 @@ +# Copyright (C) 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public LSuperOfAnError; + +.super Ljava/lang/Object; + +.method public final foo()V + .registers 1 + return-void +.end method diff --git a/test/595-error-class/src/Main.java b/test/595-error-class/src/Main.java new file mode 100644 index 000000000..655fa4336 --- /dev/null +++ b/test/595-error-class/src/Main.java @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.lang.reflect.*; + +public class Main { + + public static void main(String args[]) throws Throwable { + Class c = Class.forName("Merge"); + Method m = c.getMethod("select", boolean.class); + Object x = m.invoke(null, true); + if (x == null) { + throw new Error("Did not get array"); + } + System.out.println("passed"); + } +} diff --git a/test/595-profile-saving/expected.txt b/test/595-profile-saving/expected.txt new file mode 100644 index 000000000..6a5618ebc --- /dev/null +++ b/test/595-profile-saving/expected.txt @@ -0,0 +1 @@ +JNI_OnLoad called diff --git a/test/595-profile-saving/info.txt b/test/595-profile-saving/info.txt new file mode 100644 index 000000000..5d318f5b1 --- /dev/null +++ b/test/595-profile-saving/info.txt @@ -0,0 +1 @@ +Check that profile recording works even when JIT compilation is not enabled. diff --git a/test/595-profile-saving/profile-saving.cc b/test/595-profile-saving/profile-saving.cc new file mode 100644 index 000000000..0d26f454c --- /dev/null +++ b/test/595-profile-saving/profile-saving.cc @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "dex_file.h" + +#include "art_method-inl.h" +#include "jit/offline_profiling_info.h" +#include "jit/profile_saver.h" +#include "jni.h" +#include "method_reference.h" +#include "mirror/class-inl.h" +#include "oat_file_assistant.h" +#include "oat_file_manager.h" +#include "scoped_thread_state_change.h" +#include "ScopedUtfChars.h" +#include "thread.h" + +namespace art { +namespace { + +class CreateProfilingInfoVisitor : public StackVisitor { + public: + explicit CreateProfilingInfoVisitor(Thread* thread, const char* method_name) + SHARED_REQUIRES(Locks::mutator_lock_) + : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), + method_name_(method_name) {} + + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { + ArtMethod* m = GetMethod(); + std::string m_name(m->GetName()); + + if (m_name.compare(method_name_) == 0) { + ProfilingInfo::Create(Thread::Current(), m, /* retry_allocation */ true); + method_index_ = m->GetDexMethodIndex(); + return false; + } + return true; + } + + int method_index_ = -1; + const char* const method_name_; +}; + +extern "C" JNIEXPORT jint JNICALL Java_Main_ensureProfilingInfo(JNIEnv* env, + jclass, + jstring method_name) { + ScopedUtfChars chars(env, method_name); + CHECK(chars.c_str() != nullptr); + ScopedObjectAccess soa(Thread::Current()); + CreateProfilingInfoVisitor visitor(soa.Self(), chars.c_str()); + visitor.WalkStack(); + return visitor.method_index_; +} + +extern "C" JNIEXPORT void JNICALL Java_Main_ensureProfileProcessing(JNIEnv*, jclass) { + ProfileSaver::ForceProcessProfiles(); +} + +extern "C" JNIEXPORT jboolean JNICALL Java_Main_presentInProfile( + JNIEnv* env, jclass cls, jstring filename, jint method_index) { + ScopedUtfChars filename_chars(env, filename); + CHECK(filename_chars.c_str() != nullptr); + ScopedObjectAccess soa(Thread::Current()); + const DexFile* dex_file = soa.Decode(cls)->GetDexCache()->GetDexFile(); + return ProfileSaver::HasSeenMethod(std::string(filename_chars.c_str()), + dex_file, + static_cast(method_index)); +} + +} // namespace +} // namespace art diff --git a/test/595-profile-saving/run b/test/595-profile-saving/run new file mode 100644 index 000000000..068ad03ce --- /dev/null +++ b/test/595-profile-saving/run @@ -0,0 +1,27 @@ +#!/bin/bash +# +# Copyright 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Use +# --compiler-filter=interpret-only to make sure that the test is not compiled AOT +# and to make sure the test is not compiled when loaded (by PathClassLoader) +# -Xjitsaveprofilinginfo to enable profile saving +# -Xusejit:false to disable jit and only test profiles. +exec ${RUN} \ + -Xcompiler-option --compiler-filter=interpret-only \ + --runtime-option '-Xcompiler-option --compiler-filter=interpret-only' \ + --runtime-option -Xjitsaveprofilinginfo \ + --runtime-option -Xusejit:false \ + "${@}" diff --git a/test/595-profile-saving/src/Main.java b/test/595-profile-saving/src/Main.java new file mode 100644 index 000000000..039503f7a --- /dev/null +++ b/test/595-profile-saving/src/Main.java @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.File; +import java.io.IOException; +import java.lang.reflect.Method; + +public class Main { + + public static void main(String[] args) throws Exception { + System.loadLibrary(args[0]); + + File file = null; + try { + file = createTempFile(); + // String codePath = getDexBaseLocation(); + String codePath = System.getenv("DEX_LOCATION") + "/595-profile-saving.jar"; + VMRuntime.registerAppInfo(file.getPath(), + System.getenv("DEX_LOCATION"), + new String[] {codePath}, + /* foreignProfileDir */ null); + + int methodIdx = $opt$noinline$testProfile(); + ensureProfileProcessing(); + if (!presentInProfile(file.getPath(), methodIdx)) { + throw new RuntimeException("Method with index " + methodIdx + " not in the profile"); + } + } finally { + if (file != null) { + file.delete(); + } + } + } + + public static int $opt$noinline$testProfile() { + if (doThrow) throw new Error(); + // Make sure we have a profile info for this method without the need to loop. + return ensureProfilingInfo("$opt$noinline$testProfile"); + } + + // Return the dex method index. + public static native int ensureProfilingInfo(String methodName); + // Ensures the profile saver does its usual processing. + public static native void ensureProfileProcessing(); + // Checks if the profiles saver knows about the method. + public static native boolean presentInProfile(String profile, int methodIdx); + + public static boolean doThrow = false; + private static final String TEMP_FILE_NAME_PREFIX = "dummy"; + private static final String TEMP_FILE_NAME_SUFFIX = "-file"; + + static native String getProfileInfoDump( + String filename); + + private static File createTempFile() throws Exception { + try { + return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX); + } catch (IOException e) { + System.setProperty("java.io.tmpdir", "/data/local/tmp"); + try { + return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX); + } catch (IOException e2) { + System.setProperty("java.io.tmpdir", "/sdcard"); + return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX); + } + } + } + + private static class VMRuntime { + private static final Method registerAppInfoMethod; + static { + try { + Class c = Class.forName("dalvik.system.VMRuntime"); + registerAppInfoMethod = c.getDeclaredMethod("registerAppInfo", + String.class, String.class, String[].class, String.class); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public static void registerAppInfo(String profile, String appDir, + String[] codePaths, String foreignDir) throws Exception { + registerAppInfoMethod.invoke(null, profile, appDir, codePaths, foreignDir); + } + } +} diff --git a/test/596-app-images/app_images.cc b/test/596-app-images/app_images.cc new file mode 100644 index 000000000..a5bbf5fba --- /dev/null +++ b/test/596-app-images/app_images.cc @@ -0,0 +1,68 @@ +/* + * Copyright 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include "gc/heap.h" +#include "gc/space/image_space.h" +#include "gc/space/space-inl.h" +#include "image.h" +#include "jni.h" +#include "mirror/class.h" +#include "runtime.h" +#include "scoped_thread_state_change.h" + +namespace art { + +namespace { + +extern "C" JNIEXPORT jboolean JNICALL Java_Main_checkAppImageLoaded(JNIEnv*, jclass) { + ScopedObjectAccess soa(Thread::Current()); + for (auto* space : Runtime::Current()->GetHeap()->GetContinuousSpaces()) { + if (space->IsImageSpace()) { + auto* image_space = space->AsImageSpace(); + const auto& image_header = image_space->GetImageHeader(); + if (image_header.IsAppImage()) { + return JNI_TRUE; + } + } + } + return JNI_FALSE; +} + +extern "C" JNIEXPORT jboolean JNICALL Java_Main_checkAppImageContains(JNIEnv*, jclass, jclass c) { + ScopedObjectAccess soa(Thread::Current()); + mirror::Class* klass_ptr = soa.Decode(c); + for (auto* space : Runtime::Current()->GetHeap()->GetContinuousSpaces()) { + if (space->IsImageSpace()) { + auto* image_space = space->AsImageSpace(); + const auto& image_header = image_space->GetImageHeader(); + if (image_header.IsAppImage()) { + if (image_space->HasAddress(klass_ptr)) { + return JNI_TRUE; + } + } + } + } + return JNI_FALSE; +} + +} // namespace + +} // namespace art diff --git a/test/596-app-images/expected.txt b/test/596-app-images/expected.txt new file mode 100644 index 000000000..6a5618ebc --- /dev/null +++ b/test/596-app-images/expected.txt @@ -0,0 +1 @@ +JNI_OnLoad called diff --git a/test/596-app-images/info.txt b/test/596-app-images/info.txt new file mode 100644 index 000000000..a3d5e7ea7 --- /dev/null +++ b/test/596-app-images/info.txt @@ -0,0 +1 @@ +Tests that app-images are loaded and used. diff --git a/test/596-app-images/src/Main.java b/test/596-app-images/src/Main.java new file mode 100644 index 000000000..75b31b806 --- /dev/null +++ b/test/596-app-images/src/Main.java @@ -0,0 +1,33 @@ +/* + * Copyright 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +class Main { + static class Inner { + public static int abc = 0; + } + + public static void main(String[] args) { + System.loadLibrary(args[0]); + if (!checkAppImageLoaded()) { + System.out.println("App image is not loaded!"); + } else if (!checkAppImageContains(Inner.class)) { + System.out.println("App image does not contain Inner!"); + } + } + + public static native boolean checkAppImageLoaded(); + public static native boolean checkAppImageContains(Class klass); +} diff --git a/test/596-checker-dead-phi/expected.txt b/test/596-checker-dead-phi/expected.txt new file mode 100644 index 000000000..d81cc0710 --- /dev/null +++ b/test/596-checker-dead-phi/expected.txt @@ -0,0 +1 @@ +42 diff --git a/test/596-checker-dead-phi/info.txt b/test/596-checker-dead-phi/info.txt new file mode 100644 index 000000000..7f7cf0f9e --- /dev/null +++ b/test/596-checker-dead-phi/info.txt @@ -0,0 +1,2 @@ +Regression test for optimizing where we used to replace a dead loop +phi with its first incoming input. diff --git a/test/596-checker-dead-phi/smali/IrreducibleLoop.smali b/test/596-checker-dead-phi/smali/IrreducibleLoop.smali new file mode 100644 index 000000000..bab2ba99c --- /dev/null +++ b/test/596-checker-dead-phi/smali/IrreducibleLoop.smali @@ -0,0 +1,74 @@ +# Copyright (C) 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public LIrreducibleLoop; + +.super Ljava/lang/Object; + +# Test case where liveness analysis produces linear order where loop blocks are +# not adjacent. This revealed a bug in our SSA builder, where a dead loop phi would +# be replaced by its incoming input during SsaRedundantPhiElimination. + +# Check that the outer loop suspend check environment only has the parameter vreg. +## CHECK-START: int IrreducibleLoop.liveness(int) builder (after) +## CHECK-DAG: <> Phi reg:4 loop:{{B\d+}} irreducible:false +## CHECK-DAG: SuspendCheck env:[[_,_,_,_,<>]] loop:{{B\d+}} irreducible:false + +# Check that the linear order has non-adjacent loop blocks. +## CHECK-START: int IrreducibleLoop.liveness(int) liveness (after) +## CHECK-DAG: Mul liveness:<> +## CHECK-DAG: Add liveness:<> +## CHECK-EVAL: <> < <> + +.method public static liveness(I)I + .registers 5 + + const-string v1, "MyString" + + :header1 + if-eqz p0, :body1 + + :exit + return p0 + + :body1 + # The test will generate an incorrect linear order when the following IF swaps + # its successors. To do that, load a boolean value and compare NotEqual to 1. + sget-boolean v2, LIrreducibleLoop;->f:Z + const v3, 1 + if-ne v2, v3, :pre_header2 + + :pre_entry2 + # Add a marker on the irreducible loop entry. + mul-int/2addr p0, p0 + goto :back_edge2 + + :back_edge2 + goto :header2 + + :header2 + if-eqz p0, :back_edge2 + + :back_edge1 + # Add a marker on the outer loop back edge. + add-int/2addr p0, p0 + # Set a wide register, to have v1 undefined at the back edge. + const-wide/16 v0, 0x1 + goto :header1 + + :pre_header2 + goto :header2 +.end method + +.field public static f:Z diff --git a/test/596-checker-dead-phi/src/Main.java b/test/596-checker-dead-phi/src/Main.java new file mode 100644 index 000000000..5a3fffc8f --- /dev/null +++ b/test/596-checker-dead-phi/src/Main.java @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.lang.reflect.Method; + +public class Main { + // Workaround for b/18051191. + class InnerClass {} + + public static void main(String[] args) throws Exception { + Class c = Class.forName("IrreducibleLoop"); + // Note that we don't actually enter the loops in the 'liveness' + // method, so this is just a sanity check that part of the code we + // generated for that method is correct. + Method m = c.getMethod("liveness", int.class); + Object[] arguments = { 42 }; + System.out.println(m.invoke(null, arguments)); + } +} diff --git a/test/597-deopt-new-string/deopt.cc b/test/597-deopt-new-string/deopt.cc new file mode 100644 index 000000000..844a786e1 --- /dev/null +++ b/test/597-deopt-new-string/deopt.cc @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "jni.h" +#include "mirror/class-inl.h" +#include "runtime.h" +#include "thread_list.h" +#include "thread_state.h" +#include "gc/gc_cause.h" +#include "gc/scoped_gc_critical_section.h" + +namespace art { + +extern "C" JNIEXPORT void JNICALL Java_Main_deoptimizeAll( + JNIEnv* env, + jclass cls ATTRIBUTE_UNUSED) { + ScopedObjectAccess soa(env); + ScopedThreadSuspension sts(Thread::Current(), kWaitingForDeoptimization); + gc::ScopedGCCriticalSection gcs(Thread::Current(), + gc::kGcCauseInstrumentation, + gc::kCollectorTypeInstrumentation); + // We need to suspend mutator threads first. + ScopedSuspendAll ssa(__FUNCTION__); + static bool first = true; + if (first) { + // We need to enable deoptimization once in order to call DeoptimizeEverything(). + Runtime::Current()->GetInstrumentation()->EnableDeoptimization(); + first = false; + } + Runtime::Current()->GetInstrumentation()->DeoptimizeEverything("test"); +} + +extern "C" JNIEXPORT void JNICALL Java_Main_undeoptimizeAll( + JNIEnv* env, + jclass cls ATTRIBUTE_UNUSED) { + ScopedObjectAccess soa(env); + ScopedThreadSuspension sts(Thread::Current(), kWaitingForDeoptimization); + gc::ScopedGCCriticalSection gcs(Thread::Current(), + gc::kGcCauseInstrumentation, + gc::kCollectorTypeInstrumentation); + // We need to suspend mutator threads first. + ScopedSuspendAll ssa(__FUNCTION__); + Runtime::Current()->GetInstrumentation()->UndeoptimizeEverything("test"); +} + +} // namespace art diff --git a/test/597-deopt-new-string/expected.txt b/test/597-deopt-new-string/expected.txt new file mode 100644 index 000000000..f993efcda --- /dev/null +++ b/test/597-deopt-new-string/expected.txt @@ -0,0 +1,2 @@ +JNI_OnLoad called +Finishing diff --git a/test/597-deopt-new-string/info.txt b/test/597-deopt-new-string/info.txt new file mode 100644 index 000000000..1bd1f79c0 --- /dev/null +++ b/test/597-deopt-new-string/info.txt @@ -0,0 +1 @@ +Regression test for b/28555675 diff --git a/test/597-deopt-new-string/run b/test/597-deopt-new-string/run new file mode 100644 index 000000000..9776ab3eb --- /dev/null +++ b/test/597-deopt-new-string/run @@ -0,0 +1,18 @@ +#!/bin/bash +# +# Copyright (C) 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# We want to run in debuggable mode which keeps the call into StringFactory.newEmptyString(). +exec ${RUN} -Xcompiler-option --debuggable "${@}" diff --git a/test/597-deopt-new-string/src/Main.java b/test/597-deopt-new-string/src/Main.java new file mode 100644 index 000000000..1224e407b --- /dev/null +++ b/test/597-deopt-new-string/src/Main.java @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main implements Runnable { + static final int numberOfThreads = 2; + static final int totalOperations = 40000; + static boolean sFlag = false; + static volatile boolean done = false; + int threadIndex; + + public static native void deoptimizeAll(); + public static native void undeoptimizeAll(); + + Main(int index) { + threadIndex = index; + } + + public static void main(String[] args) throws Exception { + System.loadLibrary(args[0]); + + final Thread[] threads = new Thread[numberOfThreads]; + for (int t = 0; t < threads.length; t++) { + threads[t] = new Thread(new Main(t)); + threads[t].start(); + } + for (Thread t : threads) { + t.join(); + } + System.out.println("Finishing"); + } + + public String $noinline$run0() { + // Prevent inlining. + if (sFlag) { + throw new Error(); + } + char[] arr = {'a', 'b', 'c'}; + return new String(arr, 0, arr.length); + } + + public void run() { + if (threadIndex == 0) { + // This thread keeps doing deoptimization of all threads. + // Hopefully that will trigger one deoptimization when returning from + // StringFactory.newEmptyString() in one of the other threads. + for (int i = 0; i < totalOperations; ++i) { + if (i % 50 == 0) { + deoptimizeAll(); + } + if (i % 50 == 25) { + undeoptimizeAll(); + } + } + done = true; + } else { + // This thread keeps doing new String() from a char array. + while (!done) { + $noinline$run0(); + } + } + } +} diff --git a/test/598-checker-irreducible-dominance/expected.txt b/test/598-checker-irreducible-dominance/expected.txt new file mode 100644 index 000000000..e69de29bb diff --git a/test/598-checker-irreducible-dominance/info.txt b/test/598-checker-irreducible-dominance/info.txt new file mode 100644 index 000000000..8ca4e63be --- /dev/null +++ b/test/598-checker-irreducible-dominance/info.txt @@ -0,0 +1,2 @@ +Regression test for HGraphBuilder which would compute wrong dominance information +in the presence of irreducible loops. \ No newline at end of file diff --git a/test/598-checker-irreducible-dominance/smali/IrreducibleLoop.smali b/test/598-checker-irreducible-dominance/smali/IrreducibleLoop.smali new file mode 100644 index 000000000..4d8b5152f --- /dev/null +++ b/test/598-checker-irreducible-dominance/smali/IrreducibleLoop.smali @@ -0,0 +1,52 @@ +# Copyright (C) 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public LIrreducibleLoop; +.super Ljava/lang/Object; + +# Test case in which `inner_back_edge` is not dominated by `inner_header` and +# causes `outer_back_edge` to not be dominated by `outer_header`. HGraphBuilder +# not do a fix-point iteration and would miss the path to `outer_back_edge` +# through `inner_back_edge` and incorrectly label the outer loop non-irreducible. + +## CHECK-START: int IrreducibleLoop.dominance(int) builder (after) +## CHECK: Add irreducible:true + +.method public static dominance(I)I + .registers 2 + + if-eqz p0, :outer_header + goto :inner_back_edge + + :outer_header + if-eqz p0, :inner_header + + :outer_branch_exit + if-eqz p0, :outer_merge + return p0 + + :inner_header + goto :outer_merge + + :inner_back_edge + goto :inner_header + + :outer_merge + if-eqz p0, :inner_back_edge + + :outer_back_edge + add-int/2addr p0, p0 + goto :outer_header + +.end method diff --git a/test/598-checker-irreducible-dominance/src/Main.java b/test/598-checker-irreducible-dominance/src/Main.java new file mode 100644 index 000000000..38b2ab438 --- /dev/null +++ b/test/598-checker-irreducible-dominance/src/Main.java @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main { + // Workaround for b/18051191. + class InnerClass {} + + public static void main(String[] args) { + // Nothing to run. This regression test merely makes sure the smali test + // case successfully compiles. + } +} diff --git a/test/599-checker-irreducible-loop/expected.txt b/test/599-checker-irreducible-loop/expected.txt new file mode 100644 index 000000000..573541ac9 --- /dev/null +++ b/test/599-checker-irreducible-loop/expected.txt @@ -0,0 +1 @@ +0 diff --git a/test/599-checker-irreducible-loop/info.txt b/test/599-checker-irreducible-loop/info.txt new file mode 100644 index 000000000..1e0dd0228 --- /dev/null +++ b/test/599-checker-irreducible-loop/info.txt @@ -0,0 +1,2 @@ +Regression test for optimizing in the presence of +an irreducible loop. diff --git a/test/599-checker-irreducible-loop/smali/IrreducibleLoop.smali b/test/599-checker-irreducible-loop/smali/IrreducibleLoop.smali new file mode 100644 index 000000000..5331fd6a3 --- /dev/null +++ b/test/599-checker-irreducible-loop/smali/IrreducibleLoop.smali @@ -0,0 +1,56 @@ +# Copyright (C) 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public LIrreducibleLoop; + +.super Ljava/lang/Object; + +## CHECK-START: int IrreducibleLoop.test(int) GVN (before) +## CHECK-DAG: LoadClass loop:none +## CHECK-DAG: LoadClass loop:{{B\d+}} outer_loop:none + +## CHECK-START: int IrreducibleLoop.test(int) GVN (after) +## CHECK-DAG: LoadClass loop:none +## CHECK-DAG: LoadClass loop:{{B\d+}} outer_loop:none +.method public static test(I)I + .registers 2 + + sget v0, LIrreducibleLoop;->field1:I + sput v0, LIrreducibleLoop;->field2:I + + if-eqz p0, :loop_entry + goto :exit + + :loop_entry + if-eqz p0, :irreducible_loop_entry + sget v0, LIrreducibleLoop;->field2:I + sput v0, LIrreducibleLoop;->field1:I + if-eqz v0, :exit + goto :irreducible_other_loop_entry + + :irreducible_loop_entry + if-eqz p0, :loop_back_edge + :irreducible_other_loop_entry + if-eqz v0, :loop_back_edge + goto :irreducible_loop_entry + + :loop_back_edge + goto :loop_entry + + :exit + return v0 +.end method + +.field public static field1:I +.field public static field2:I diff --git a/test/599-checker-irreducible-loop/src/Main.java b/test/599-checker-irreducible-loop/src/Main.java new file mode 100644 index 000000000..b47721f72 --- /dev/null +++ b/test/599-checker-irreducible-loop/src/Main.java @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.lang.reflect.Method; + +public class Main { + // Workaround for b/18051191. + class InnerClass {} + + public static void main(String[] args) throws Exception { + Class c = Class.forName("IrreducibleLoop"); + Method m = c.getMethod("test", int.class); + Object[] arguments = { 42 }; + // Invoke the code just for sanity checking. + System.out.println(m.invoke(null, arguments)); + } +} diff --git a/test/800-smali/smali/b_28187158.smali b/test/800-smali/smali/b_28187158.smali index 14d5cec7e..47e5ef64f 100644 --- a/test/800-smali/smali/b_28187158.smali +++ b/test/800-smali/smali/b_28187158.smali @@ -9,4 +9,3 @@ iget v0, p0, Ljava/lang/System;->in:Ljava/io/InputStream; return-void .end method - diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java index c883b7f0f..b2fc00562 100644 --- a/test/800-smali/src/Main.java +++ b/test/800-smali/src/Main.java @@ -174,7 +174,7 @@ public class Main { testCases.add(new TestCase("b/27799205 (5)", "B27799205Helper", "run5", null, new VerifyError(), null)); testCases.add(new TestCase("b/27799205 (6)", "B27799205Helper", "run6", null, null, null)); - testCases.add(new TestCase("b/28187158", "B28187158", "run", new Object[] { null} , + testCases.add(new TestCase("b/28187158", "B28187158", "run", new Object[] { null }, new VerifyError(), null)); } diff --git a/test/803-no-super/expected.txt b/test/803-no-super/expected.txt new file mode 100644 index 000000000..503699139 --- /dev/null +++ b/test/803-no-super/expected.txt @@ -0,0 +1,2 @@ +java.lang.ClassNotFoundException: NoSuper1 +Done! diff --git a/test/803-no-super/info.txt b/test/803-no-super/info.txt new file mode 100644 index 000000000..0178a446e --- /dev/null +++ b/test/803-no-super/info.txt @@ -0,0 +1,3 @@ +Regression test that temp (erroneous) classes don't get conflict tables created. + +Obviously needs to run under Dalvik or ART. diff --git a/test/803-no-super/smali/nosuper1.smali b/test/803-no-super/smali/nosuper1.smali new file mode 100644 index 000000000..df2eaa5ca --- /dev/null +++ b/test/803-no-super/smali/nosuper1.smali @@ -0,0 +1,3 @@ +.class public LNoSuper1; + +.super LNoClass; diff --git a/test/803-no-super/src/Main.java b/test/803-no-super/src/Main.java new file mode 100644 index 000000000..a07e042c3 --- /dev/null +++ b/test/803-no-super/src/Main.java @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Attempt to load class with no superclass. + */ +public class Main { + public static void main(String[] args) throws Exception { + try { + Class c = Class.forName("NoSuper1"); + } catch (Exception e) { + System.out.println(e); + } + System.out.println("Done!"); + } +} diff --git a/test/804-class-extends-itself/expected.txt b/test/804-class-extends-itself/expected.txt new file mode 100644 index 000000000..b98f963ce --- /dev/null +++ b/test/804-class-extends-itself/expected.txt @@ -0,0 +1,2 @@ +Caught ClassCircularityError +Done! diff --git a/test/804-class-extends-itself/info.txt b/test/804-class-extends-itself/info.txt new file mode 100644 index 000000000..c48934c21 --- /dev/null +++ b/test/804-class-extends-itself/info.txt @@ -0,0 +1 @@ +Exercise class linker check for classes extending themselves (b/28685551). diff --git a/test/804-class-extends-itself/smali/Main.smali b/test/804-class-extends-itself/smali/Main.smali new file mode 100644 index 000000000..5c349edcc --- /dev/null +++ b/test/804-class-extends-itself/smali/Main.smali @@ -0,0 +1,57 @@ +# Copyright (C) 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# We cannot implement Main in Java, as this would require to run +# dexmerger (to merge the Dex file produced from Smali code and the +# Dex file produced from Java code), which loops indefinitely when +# processing class B28685551, as this class inherits from itself. As +# a workaround, implement Main using Smali (we could also have used +# multidex, but this requires a custom build script). + +.class public LMain; +.super Ljava/lang/Object; + +.method public static main([Ljava/lang/String;)V + .registers 3 + .param p0, "args" + + invoke-static {}, LMain;->test()V + sget-object v0, Ljava/lang/System;->out:Ljava/io/PrintStream; + const-string v1, "Done!" + invoke-virtual {v0, v1}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V + return-void +.end method + +.method static test()V + .registers 4 + + :try_start + const-string v2, "B28685551" + invoke-static {v2}, Ljava/lang/Class;->forName(Ljava/lang/String;)Ljava/lang/Class; + :try_end + .catch Ljava/lang/ClassCircularityError; {:try_start .. :try_end} :catch + + move-result-object v0 + + :goto_7 + return-void + + :catch + move-exception v1 + .local v1, "e":Ljava/lang/ClassCircularityError; + sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream; + const-string v3, "Caught ClassCircularityError" + invoke-virtual {v2, v3}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V + goto :goto_7 +.end method diff --git a/test/804-class-extends-itself/smali/b_28685551.smali b/test/804-class-extends-itself/smali/b_28685551.smali new file mode 100644 index 000000000..d98c6e3b3 --- /dev/null +++ b/test/804-class-extends-itself/smali/b_28685551.smali @@ -0,0 +1,18 @@ +# Copyright (C) 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Regression test for a class inheriting from itself. + +.class public LB28685551; +.super LB28685551; diff --git a/test/960-default-smali/expected.txt b/test/960-default-smali/expected.txt index 7671eed5d..f3db93f87 100644 --- a/test/960-default-smali/expected.txt +++ b/test/960-default-smali/expected.txt @@ -82,3 +82,19 @@ J-virtual A.SayHiTwice()='Hi Hi ' J-interface Greeter.SayHiTwice()='Hi Hi ' J-virtual J.SayHiTwice()='Hi Hi ' End testing for type J +Testing for type K +K-interface Foo.bar()='foobar' +K-virtual K.bar()='foobar' +End testing for type K +Testing for type L +L-interface Foo.bar()='foobar' +L-virtual K.bar()='foobar' +L-virtual L.bar()='foobar' +End testing for type L +Testing for type M +M-interface Foo.bar()='BAZ!' +M-interface Fooer.bar()='BAZ!' +M-virtual K.bar()='BAZ!' +M-virtual L.bar()='BAZ!' +M-virtual M.bar()='BAZ!' +End testing for type M diff --git a/test/960-default-smali/src/Foo.java b/test/960-default-smali/src/Foo.java new file mode 100644 index 000000000..ed5b35f47 --- /dev/null +++ b/test/960-default-smali/src/Foo.java @@ -0,0 +1,20 @@ +/* + * Copyright 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +interface Foo { + public default String bar() { + return "foobar"; + } +} diff --git a/test/960-default-smali/src/Fooer.java b/test/960-default-smali/src/Fooer.java new file mode 100644 index 000000000..d8a5f6163 --- /dev/null +++ b/test/960-default-smali/src/Fooer.java @@ -0,0 +1,19 @@ +/* + * Copyright 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +interface Fooer extends Foo { + public String bar(); +} diff --git a/test/960-default-smali/src/K.java b/test/960-default-smali/src/K.java new file mode 100644 index 000000000..4426be719 --- /dev/null +++ b/test/960-default-smali/src/K.java @@ -0,0 +1,17 @@ +/* + * Copyright 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +class K implements Foo { } diff --git a/test/960-default-smali/src/L.java b/test/960-default-smali/src/L.java new file mode 100644 index 000000000..c08ab72a9 --- /dev/null +++ b/test/960-default-smali/src/L.java @@ -0,0 +1,17 @@ +/* + * Copyright 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +class L extends K { } diff --git a/test/960-default-smali/src/M.java b/test/960-default-smali/src/M.java new file mode 100644 index 000000000..affe7e9c9 --- /dev/null +++ b/test/960-default-smali/src/M.java @@ -0,0 +1,21 @@ +/* + * Copyright 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +class M extends L implements Fooer { + public String bar() { + return "BAZ!"; + } +} diff --git a/test/960-default-smali/src/classes.xml b/test/960-default-smali/src/classes.xml index 0aa41f7fb..f3e50c570 100644 --- a/test/960-default-smali/src/classes.xml +++ b/test/960-default-smali/src/classes.xml @@ -81,6 +81,27 @@ + + + + Foo + + + + + + + + + + + + Fooer + + + bar + + @@ -123,5 +144,22 @@ GetPlace + + + + + + bar + + + + + + Foo + + + bar + + diff --git a/test/976-conflict-no-methods/expected.txt b/test/976-conflict-no-methods/expected.txt new file mode 100644 index 000000000..656dfc57d --- /dev/null +++ b/test/976-conflict-no-methods/expected.txt @@ -0,0 +1 @@ +Pass diff --git a/test/976-conflict-no-methods/info.txt b/test/976-conflict-no-methods/info.txt new file mode 100644 index 000000000..cdc314903 --- /dev/null +++ b/test/976-conflict-no-methods/info.txt @@ -0,0 +1 @@ +Regression test for classes that have conflict tables but no methods. b/28707801 \ No newline at end of file diff --git a/test/976-conflict-no-methods/smali/Iface.smali b/test/976-conflict-no-methods/smali/Iface.smali new file mode 100644 index 000000000..aa4ec3768 --- /dev/null +++ b/test/976-conflict-no-methods/smali/Iface.smali @@ -0,0 +1,281 @@ +# /* +# * Copyright (C) 2016 The Android Open Source Project +# * +# * Licensed under the Apache License, Version 2.0 (the "License"); +# * you may not use this file except in compliance with the License. +# * You may obtain a copy of the License at +# * +# * http://www.apache.org/licenses/LICENSE-2.0 +# * +# * Unless required by applicable law or agreed to in writing, software +# * distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. +# */ +# +# public interface Iface2 { +# public void abstractMethod0(); +# public void abstractMethod1(); +# public void abstractMethod2(); +# public void abstractMethod3(); +# public void abstractMethod4(); +# public void abstractMethod5(); +# public void abstractMethod6(); +# public void abstractMethod7(); +# public void abstractMethod8(); +# public void abstractMethod9(); +# public void abstractMethod10(); +# public void abstractMethod11(); +# public void abstractMethod12(); +# public void abstractMethod13(); +# public void abstractMethod14(); +# public void abstractMethod15(); +# public void abstractMethod16(); +# public void abstractMethod17(); +# public void abstractMethod18(); +# public void abstractMethod19(); +# public void abstractMethod20(); +# public void abstractMethod21(); +# public void abstractMethod22(); +# public void abstractMethod23(); +# public void abstractMethod24(); +# public void abstractMethod25(); +# public void abstractMethod26(); +# public void abstractMethod27(); +# public void abstractMethod28(); +# public void abstractMethod29(); +# public void abstractMethod30(); +# public void abstractMethod31(); +# public void abstractMethod32(); +# public void abstractMethod33(); +# public void abstractMethod34(); +# public void abstractMethod35(); +# public void abstractMethod36(); +# public void abstractMethod37(); +# public void abstractMethod38(); +# public void abstractMethod39(); +# public void abstractMethod40(); +# public void abstractMethod41(); +# public void abstractMethod42(); +# public void abstractMethod43(); +# public void abstractMethod44(); +# public void abstractMethod45(); +# public void abstractMethod46(); +# public void abstractMethod47(); +# public void abstractMethod48(); +# public void abstractMethod49(); +# public void abstractMethod50(); +# public void abstractMethod51(); +# public void abstractMethod52(); +# public void abstractMethod53(); +# public void abstractMethod54(); +# public void abstractMethod55(); +# public void abstractMethod56(); +# public void abstractMethod57(); +# public void abstractMethod58(); +# public void abstractMethod59(); +# public void abstractMethod60(); +# public void abstractMethod61(); +# public void abstractMethod62(); +# public void abstractMethod63(); +# public void abstractMethod64(); +# } + +.class public abstract interface LIface; +.super Ljava/lang/Object; + +.method public abstract abstractMethod0()V +.end method + +.method public abstract abstractMethod1()V +.end method + +.method public abstract abstractMethod2()V +.end method + +.method public abstract abstractMethod3()V +.end method + +.method public abstract abstractMethod4()V +.end method + +.method public abstract abstractMethod5()V +.end method + +.method public abstract abstractMethod6()V +.end method + +.method public abstract abstractMethod7()V +.end method + +.method public abstract abstractMethod8()V +.end method + +.method public abstract abstractMethod9()V +.end method + +.method public abstract abstractMethod10()V +.end method + +.method public abstract abstractMethod11()V +.end method + +.method public abstract abstractMethod12()V +.end method + +.method public abstract abstractMethod13()V +.end method + +.method public abstract abstractMethod14()V +.end method + +.method public abstract abstractMethod15()V +.end method + +.method public abstract abstractMethod16()V +.end method + +.method public abstract abstractMethod17()V +.end method + +.method public abstract abstractMethod18()V +.end method + +.method public abstract abstractMethod19()V +.end method + +.method public abstract abstractMethod20()V +.end method + +.method public abstract abstractMethod21()V +.end method + +.method public abstract abstractMethod22()V +.end method + +.method public abstract abstractMethod23()V +.end method + +.method public abstract abstractMethod24()V +.end method + +.method public abstract abstractMethod25()V +.end method + +.method public abstract abstractMethod26()V +.end method + +.method public abstract abstractMethod27()V +.end method + +.method public abstract abstractMethod28()V +.end method + +.method public abstract abstractMethod29()V +.end method + +.method public abstract abstractMethod30()V +.end method + +.method public abstract abstractMethod31()V +.end method + +.method public abstract abstractMethod32()V +.end method + +.method public abstract abstractMethod33()V +.end method + +.method public abstract abstractMethod34()V +.end method + +.method public abstract abstractMethod35()V +.end method + +.method public abstract abstractMethod36()V +.end method + +.method public abstract abstractMethod37()V +.end method + +.method public abstract abstractMethod38()V +.end method + +.method public abstract abstractMethod39()V +.end method + +.method public abstract abstractMethod40()V +.end method + +.method public abstract abstractMethod41()V +.end method + +.method public abstract abstractMethod42()V +.end method + +.method public abstract abstractMethod43()V +.end method + +.method public abstract abstractMethod44()V +.end method + +.method public abstract abstractMethod45()V +.end method + +.method public abstract abstractMethod46()V +.end method + +.method public abstract abstractMethod47()V +.end method + +.method public abstract abstractMethod48()V +.end method + +.method public abstract abstractMethod49()V +.end method + +.method public abstract abstractMethod50()V +.end method + +.method public abstract abstractMethod51()V +.end method + +.method public abstract abstractMethod52()V +.end method + +.method public abstract abstractMethod53()V +.end method + +.method public abstract abstractMethod54()V +.end method + +.method public abstract abstractMethod55()V +.end method + +.method public abstract abstractMethod56()V +.end method + +.method public abstract abstractMethod57()V +.end method + +.method public abstract abstractMethod58()V +.end method + +.method public abstract abstractMethod59()V +.end method + +.method public abstract abstractMethod60()V +.end method + +.method public abstract abstractMethod61()V +.end method + +.method public abstract abstractMethod62()V +.end method + +.method public abstract abstractMethod63()V +.end method + +.method public abstract abstractMethod64()V +.end method diff --git a/test/976-conflict-no-methods/smali/Main.smali b/test/976-conflict-no-methods/smali/Main.smali new file mode 100644 index 000000000..7dd11605b --- /dev/null +++ b/test/976-conflict-no-methods/smali/Main.smali @@ -0,0 +1,358 @@ +# /* +# * Copyright (C) 2016 The Android Open Source Project +# * +# * Licensed under the Apache License, Version 2.0 (the "License"); +# * you may not use this file except in compliance with the License. +# * You may obtain a copy of the License at +# * +# * http://www.apache.org/licenses/LICENSE-2.0 +# * +# * Unless required by applicable law or agreed to in writing, software +# * distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. +# */ +# +.class public LMain; +.super Ljava/lang/Object; +.implements LIface; + +.method public constructor ()V + .registers 1 + invoke-direct {p0}, Ljava/lang/Object;->()V + return-void +.end method + +.method public static main([Ljava/lang/String;)V + .locals 2 + sget-object v0, Ljava/lang/System;->out:Ljava/io/PrintStream; + const-string v1, "Pass" + invoke-virtual {v0, v1}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V + return-void +.end method + +.method public abstractMethod0()V + .locals 0 + return-void +.end method + +.method public abstractMethod1()V + .locals 0 + return-void +.end method + +.method public abstractMethod2()V + .locals 0 + return-void +.end method + +.method public abstractMethod3()V + .locals 0 + return-void +.end method + +.method public abstractMethod4()V + .locals 0 + return-void +.end method + +.method public abstractMethod5()V + .locals 0 + return-void +.end method + +.method public abstractMethod6()V + .locals 0 + return-void +.end method + +.method public abstractMethod7()V + .locals 0 + return-void +.end method + +.method public abstractMethod8()V + .locals 0 + return-void +.end method + +.method public abstractMethod9()V + .locals 0 + return-void +.end method + +.method public abstractMethod10()V + .locals 0 + return-void +.end method + +.method public abstractMethod11()V + .locals 0 + return-void +.end method + +.method public abstractMethod12()V + .locals 0 + return-void +.end method + +.method public abstractMethod13()V + .locals 0 + return-void +.end method + +.method public abstractMethod14()V + .locals 0 + return-void +.end method + +.method public abstractMethod15()V + .locals 0 + return-void +.end method + +.method public abstractMethod16()V + .locals 0 + return-void +.end method + +.method public abstractMethod17()V + .locals 0 + return-void +.end method + +.method public abstractMethod18()V + .locals 0 + return-void +.end method + +.method public abstractMethod19()V + .locals 0 + return-void +.end method + +.method public abstractMethod20()V + .locals 0 + return-void +.end method + +.method public abstractMethod21()V + .locals 0 + return-void +.end method + +.method public abstractMethod22()V + .locals 0 + return-void +.end method + +.method public abstractMethod23()V + .locals 0 + return-void +.end method + +.method public abstractMethod24()V + .locals 0 + return-void +.end method + +.method public abstractMethod25()V + .locals 0 + return-void +.end method + +.method public abstractMethod26()V + .locals 0 + return-void +.end method + +.method public abstractMethod27()V + .locals 0 + return-void +.end method + +.method public abstractMethod28()V + .locals 0 + return-void +.end method + +.method public abstractMethod29()V + .locals 0 + return-void +.end method + +.method public abstractMethod30()V + .locals 0 + return-void +.end method + +.method public abstractMethod31()V + .locals 0 + return-void +.end method + +.method public abstractMethod32()V + .locals 0 + return-void +.end method + +.method public abstractMethod33()V + .locals 0 + return-void +.end method + +.method public abstractMethod34()V + .locals 0 + return-void +.end method + +.method public abstractMethod35()V + .locals 0 + return-void +.end method + +.method public abstractMethod36()V + .locals 0 + return-void +.end method + +.method public abstractMethod37()V + .locals 0 + return-void +.end method + +.method public abstractMethod38()V + .locals 0 + return-void +.end method + +.method public abstractMethod39()V + .locals 0 + return-void +.end method + +.method public abstractMethod40()V + .locals 0 + return-void +.end method + +.method public abstractMethod41()V + .locals 0 + return-void +.end method + +.method public abstractMethod42()V + .locals 0 + return-void +.end method + +.method public abstractMethod43()V + .locals 0 + return-void +.end method + +.method public abstractMethod44()V + .locals 0 + return-void +.end method + +.method public abstractMethod45()V + .locals 0 + return-void +.end method + +.method public abstractMethod46()V + .locals 0 + return-void +.end method + +.method public abstractMethod47()V + .locals 0 + return-void +.end method + +.method public abstractMethod48()V + .locals 0 + return-void +.end method + +.method public abstractMethod49()V + .locals 0 + return-void +.end method + +.method public abstractMethod50()V + .locals 0 + return-void +.end method + +.method public abstractMethod51()V + .locals 0 + return-void +.end method + +.method public abstractMethod52()V + .locals 0 + return-void +.end method + +.method public abstractMethod53()V + .locals 0 + return-void +.end method + +.method public abstractMethod54()V + .locals 0 + return-void +.end method + +.method public abstractMethod55()V + .locals 0 + return-void +.end method + +.method public abstractMethod56()V + .locals 0 + return-void +.end method + +.method public abstractMethod57()V + .locals 0 + return-void +.end method + +.method public abstractMethod58()V + .locals 0 + return-void +.end method + +.method public abstractMethod59()V + .locals 0 + return-void +.end method + +.method public abstractMethod60()V + .locals 0 + return-void +.end method + +.method public abstractMethod61()V + .locals 0 + return-void +.end method + +.method public abstractMethod62()V + .locals 0 + return-void +.end method + +.method public abstractMethod63()V + .locals 0 + return-void +.end method + +.method public abstractMethod64()V + .locals 0 + return-void +.end method diff --git a/test/976-conflict-no-methods/smali/NoMethods.smali b/test/976-conflict-no-methods/smali/NoMethods.smali new file mode 100644 index 000000000..787e34a42 --- /dev/null +++ b/test/976-conflict-no-methods/smali/NoMethods.smali @@ -0,0 +1,19 @@ +# /* +# * Copyright (C) 2016 The Android Open Source Project +# * +# * Licensed under the Apache License, Version 2.0 (the "License"); +# * you may not use this file except in compliance with the License. +# * You may obtain a copy of the License at +# * +# * http://www.apache.org/licenses/LICENSE-2.0 +# * +# * Unless required by applicable law or agreed to in writing, software +# * distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. +# */ +# + +.class public LNoMethods; +.super LMain; diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk index e547c72c0..97204d34c 100644 --- a/test/Android.libarttest.mk +++ b/test/Android.libarttest.mk @@ -34,6 +34,7 @@ LIBARTTEST_COMMON_SRC_FILES := \ 137-cfi/cfi.cc \ 139-register-natives/regnative.cc \ 141-class-unload/jni_unload.cc \ + 148-multithread-gc-annotations/gc_coverage.cc \ 454-get-vreg/get_vreg_jni.cc \ 457-regs/regs_jni.cc \ 461-get-reference-vreg/get_reference_vreg_jni.cc \ @@ -41,7 +42,10 @@ LIBARTTEST_COMMON_SRC_FILES := \ 497-inlining-and-class-loader/clear_dex_cache.cc \ 543-env-long-ref/env_long_ref.cc \ 566-polymorphic-inlining/polymorphic_inline.cc \ - 570-checker-osr/osr.cc + 570-checker-osr/osr.cc \ + 595-profile-saving/profile-saving.cc \ + 596-app-images/app_images.cc \ + 597-deopt-new-string/deopt.cc ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttest.so ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttestd.so @@ -90,7 +94,12 @@ define build-libarttest include $(BUILD_SHARED_LIBRARY) else # host LOCAL_CLANG := $(ART_HOST_CLANG) - LOCAL_CFLAGS := $(ART_HOST_CFLAGS) $(ART_HOST_DEBUG_CFLAGS) + LOCAL_CFLAGS := $(ART_HOST_CFLAGS) + ifeq ($$(suffix),d) + LOCAL_CFLAGS += $(ART_HOST_DEBUG_CFLAGS) + else + LOCAL_CFLAGS += $(ART_HOST_NON_DEBUG_CFLAGS) + endif LOCAL_ASFLAGS := $(ART_HOST_ASFLAGS) LOCAL_LDLIBS := $(ART_HOST_LDLIBS) -ldl -lpthread LOCAL_IS_HOST_MODULE := true diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk index f3cda479d..ee651b549 100644 --- a/test/Android.run-test.mk +++ b/test/Android.run-test.mk @@ -563,6 +563,13 @@ endif TEST_ART_BROKEN_OPTIMIZING_READ_BARRIER_RUN_TESTS := TEST_ART_BROKEN_JIT_READ_BARRIER_RUN_TESTS := +TEST_ART_BROKEN_NPIC_RUN_TESTS := 596-app-images +ifneq (,$(filter npictest,$(PICTEST_TYPES))) + ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \ + ${COMPILER_TYPES},$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \ + $(IMAGE_TYPES),npictest,$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_NPIC_RUN_TESTS),$(ALL_ADDRESS_SIZES)) +endif + # Tests that should fail in the heap poisoning configuration with the Optimizing compiler. # 055: Exceeds run time limits due to heap poisoning instrumentation (on ARM and ARM64 devices). TEST_ART_BROKEN_OPTIMIZING_HEAP_POISONING_RUN_TESTS := \ diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar index d61fc8f8f..aa45d40cb 100755 --- a/test/etc/run-test-jar +++ b/test/etc/run-test-jar @@ -323,11 +323,14 @@ fi if [ "$INTERPRETER" = "y" ]; then INT_OPTS="-Xint" if [ "$VERIFY" = "y" ] ; then + INT_OPTS="${INT_OPTS} -Xcompiler-option --compiler-filter=interpret-only" COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=interpret-only" elif [ "$VERIFY" = "s" ]; then + INT_OPTS="${INT_OPTS} -Xcompiler-option --compiler-filter=verify-at-runtime" COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=verify-at-runtime" DEX_VERIFY="${DEX_VERIFY} -Xverify:softfail" else # VERIFY = "n" + INT_OPTS="${INT_OPTS} -Xcompiler-option --compiler-filter=verify-none" COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=verify-none" DEX_VERIFY="${DEX_VERIFY} -Xverify:none" fi @@ -336,18 +339,12 @@ fi if [ "$JIT" = "y" ]; then INT_OPTS="-Xusejit:true" if [ "$VERIFY" = "y" ] ; then + INT_OPTS="${INT_OPTS} -Xcompiler-option --compiler-filter=verify-at-runtime" COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=verify-at-runtime" - if [ "$PREBUILD" = "n" ]; then - # Make sure that if we have noprebuild we still JIT as DexClassLoader will - # try to compile the dex file. - INT_OPTS="${INT_OPTS} -Xcompiler-option --compiler-filter=verify-at-runtime" - fi else + INT_OPTS="${INT_OPTS} -Xcompiler-option --compiler-filter=verify-none" COMPILE_FLAGS="${COMPILE_FLAGS} --compiler-filter=verify-none" DEX_VERIFY="${DEX_VERIFY} -Xverify:none" - if [ "$PREBUILD" = "n" ]; then - INT_OPTS="${INT_OPTS} -Xcompiler-option --compiler-filter=verify-none" - fi fi fi @@ -476,11 +473,14 @@ if [ "$HOST" = "n" ]; then LD_LIBRARY_PATH=$ANDROID_ROOT/$LIBRARY_DIRECTORY fi + PUBLIC_LIBS=libart.so:libartd.so + # Create a script with the command. The command can get longer than the longest # allowed adb command and there is no way to get the exit status from a adb shell # command. cmdline="cd $DEX_LOCATION && \ export ANDROID_DATA=$DEX_LOCATION && \ + export ANDROID_ADDITIONAL_PUBLIC_LIBRARIES=$PUBLIC_LIBS && \ export DEX_LOCATION=$DEX_LOCATION && \ export ANDROID_ROOT=$ANDROID_ROOT && \ $mkdir_cmdline && \ diff --git a/test/run-test b/test/run-test index fc57d0914..2710ea32b 100755 --- a/test/run-test +++ b/test/run-test @@ -122,10 +122,12 @@ never_clean="no" have_dex2oat="yes" have_patchoat="yes" have_image="yes" -image_suffix="" pic_image_suffix="" multi_image_suffix="" android_root="/system" +# By default we will use optimizing. +image_args="" +image_suffix="-optimizing" while true; do if [ "x$1" = "x--host" ]; then @@ -148,6 +150,7 @@ while true; do elif [ "x$1" = "x--jvm" ]; then target_mode="no" runtime="jvm" + image_args="" prebuild_mode="no" NEED_DEX="false" USE_JACK="false" @@ -244,22 +247,22 @@ while true; do run_args="${run_args} --zygote" shift elif [ "x$1" = "x--interpreter" ]; then - run_args="${run_args} --interpreter --runtime-option -XOatFileManagerCompilerFilter:verify-at-runtime" + run_args="${run_args} --interpreter" image_suffix="-interpreter" shift elif [ "x$1" = "x--jit" ]; then - run_args="${run_args} --jit --runtime-option -XOatFileManagerCompilerFilter:verify-at-runtime" + image_args="--jit" image_suffix="-jit" shift elif [ "x$1" = "x--optimizing" ]; then - run_args="${run_args} -Xcompiler-option --compiler-backend=Optimizing" + image_args="-Xcompiler-option --compiler-backend=Optimizing" image_suffix="-optimizing" shift elif [ "x$1" = "x--no-verify" ]; then - run_args="${run_args} --no-verify --runtime-option -XOatFileManagerCompilerFilter:verify-none" + run_args="${run_args} --no-verify" shift elif [ "x$1" = "x--verify-soft-fail" ]; then - run_args="${run_args} --verify-soft-fail --runtime-option -XOatFileManagerCompilerFilter:verify-at-runtime" + image_args="--verify-soft-fail" image_suffix="-interp-ac" shift elif [ "x$1" = "x--no-optimize" ]; then @@ -348,6 +351,7 @@ while true; do fi done +run_args="${run_args} ${image_args}" # Allocate file descriptor real_stderr and redirect it to the shell's error # output (fd 2). if [ ${BASH_VERSINFO[1]} -ge 4 ] && [ ${BASH_VERSINFO[2]} -ge 1 ]; then @@ -467,7 +471,7 @@ elif [ "$runtime" = "art" ]; then run_args="${run_args} --runtime-option -Djava.library.path=${ANDROID_HOST_OUT}/lib${suffix64}" else guess_target_arch_name - run_args="${run_args} --runtime-option -Djava.library.path=/data/art-test/${target_arch_name}" + run_args="${run_args} --runtime-option -Djava.library.path=/data/art-test/${target_arch_name}:/system/lib${suffix64}" run_args="${run_args} --boot /data/art-test/core${image_suffix}${pic_image_suffix}${multi_image_suffix}.art" fi if [ "$relocate" = "yes" ]; then diff --git a/tools/ahat/test-dump/Main.java b/tools/ahat/test-dump/Main.java index d61a98da5..3936f296d 100644 --- a/tools/ahat/test-dump/Main.java +++ b/tools/ahat/test-dump/Main.java @@ -50,7 +50,8 @@ public class Main { bigArray[i] = (byte)((i*i) & 0xFF); } - NativeAllocationRegistry registry = new NativeAllocationRegistry(0x12345, 42); + NativeAllocationRegistry registry = new NativeAllocationRegistry( + Main.class.getClassLoader(), 0x12345, 42); registry.registerNativeAllocation(anObject, 0xABCDABCD); } } diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh index 2eb52bcad..304c2a939 100755 --- a/tools/buildbot-build.sh +++ b/tools/buildbot-build.sh @@ -46,9 +46,14 @@ while true; do done if [[ $mode == "host" ]]; then - make_command="make $j_arg $showcommands build-art-host-tests $common_targets ${out_dir}/host/linux-x86/lib/libjavacoretests.so ${out_dir}/host/linux-x86/lib64/libjavacoretests.so" + make_command="make $j_arg $showcommands build-art-host-tests $common_targets" + make_command+=" ${out_dir}/host/linux-x86/lib/libjavacoretests.so " + make_command+=" ${out_dir}/host/linux-x86/lib64/libjavacoretests.so" elif [[ $mode == "target" ]]; then - make_command="make $j_arg $showcommands build-art-target-tests $common_targets libjavacrypto libjavacoretests linker toybox toolbox sh ${out_dir}/host/linux-x86/bin/adb libstdc++" + make_command="make $j_arg $showcommands build-art-target-tests $common_targets" + make_command+=" libjavacrypto libjavacoretests linker toybox toolbox sh" + make_command+=" ${out_dir}/host/linux-x86/bin/adb libstdc++ " + make_command+=" ${out_dir}/target/product/${TARGET_PRODUCT}/system/etc/public.libraries.txt" fi echo "Executing $make_command" diff --git a/tools/dmtracedump/tracedump.cc b/tools/dmtracedump/tracedump.cc index f70e2c220..3afee6fdc 100644 --- a/tools/dmtracedump/tracedump.cc +++ b/tools/dmtracedump/tracedump.cc @@ -512,10 +512,10 @@ int32_t compareUniqueExclusive(const void* a, const void* b) { void freeDataKeys(DataKeys* pKeys) { if (pKeys == nullptr) return; - free(pKeys->fileData); - free(pKeys->threads); - free(pKeys->methods); - free(pKeys); + delete[] pKeys->fileData; + delete[] pKeys->threads; + delete[] pKeys->methods; + delete pKeys; } /* @@ -822,8 +822,8 @@ void sortMethodList(DataKeys* pKeys) { DataKeys* parseKeys(FILE* fp, int32_t verbose) { int64_t offset; DataKeys* pKeys = new DataKeys(); - memset(pKeys, 0, sizeof(DataKeys)); if (pKeys == nullptr) return nullptr; + memset(pKeys, 0, sizeof(DataKeys)); /* * We load the entire file into memory. We do this, rather than memory- @@ -865,9 +865,13 @@ DataKeys* parseKeys(FILE* fp, int32_t verbose) { return nullptr; } - /* Reduce our allocation now that we know where the end of the key section is. */ - pKeys->fileData = reinterpret_cast(realloc(pKeys->fileData, offset)); - pKeys->fileLen = offset; + /* + * Although it is tempting to reduce our allocation now that we know where the + * end of the key section is, there is a pitfall. The method names and + * signatures in the method list contain pointers into the fileData area. + * Realloc or free will result in corruption. + */ + /* Leave fp pointing to the beginning of the data section. */ fseek(fp, offset, SEEK_SET); @@ -2607,7 +2611,7 @@ int32_t main(int32_t argc, char** argv) { if (gOptions.graphFileName != nullptr) { createInclusiveProfileGraphNew(dataKeys); } - free(methods); + delete[] methods; } freeDataKeys(dataKeys); diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt index 38b6ea60f..bdcf86d85 100644 --- a/tools/libcore_failures.txt +++ b/tools/libcore_failures.txt @@ -243,51 +243,14 @@ "org.apache.harmony.tests.java.util.prefs.FilePreferencesImplTest#testPutGet"] }, { - description: "libnativehelper_compat_libc++ loading issue", - result: EXEC_FAILED, - modes: [device], - names: ["dalvik.system.JniTest#testGetSuperclass", - "dalvik.system.JniTest#testPassingBooleans", - "dalvik.system.JniTest#testPassingBytes", - "dalvik.system.JniTest#testPassingChars", - "dalvik.system.JniTest#testPassingClass", - "dalvik.system.JniTest#testPassingDoubles", - "dalvik.system.JniTest#testPassingFloats", - "dalvik.system.JniTest#testPassingInts", - "dalvik.system.JniTest#testPassingLongs", - "dalvik.system.JniTest#testPassingObjectReferences", - "dalvik.system.JniTest#testPassingShorts", - "dalvik.system.JniTest#testPassingThis", - "libcore.util.NativeAllocationRegistryTest#testBadSize", - "libcore.util.NativeAllocationRegistryTest#testEarlyFree", - "libcore.util.NativeAllocationRegistryTest#testNativeAllocationAllocatorAndNoSharedRegistry", - "libcore.util.NativeAllocationRegistryTest#testNativeAllocationAllocatorAndSharedRegistry", - "libcore.util.NativeAllocationRegistryTest#testNativeAllocationNoAllocatorAndNoSharedRegistry", - "libcore.util.NativeAllocationRegistryTest#testNativeAllocationNoAllocatorAndSharedRegistry", - "libcore.util.NativeAllocationRegistryTest#testNullArguments"] -}, -{ - description: "libnativehelper_compat_libc++.so not found by dlopen on ARM64", + description: "Only work with --mode=activity", result: EXEC_FAILED, - modes: [device], - bug: 28082914, - names: ["libcore.java.lang.ThreadTest#testContextClassLoaderIsInherited", - "libcore.java.lang.ThreadTest#testContextClassLoaderIsNotNull", - "libcore.java.lang.ThreadTest#testGetAllStackTracesIncludesAllGroups", - "libcore.java.lang.ThreadTest#testGetStackTrace", - "libcore.java.lang.ThreadTest#testJavaContextClassLoader", - "libcore.java.lang.ThreadTest#testLeakingStartedThreads", - "libcore.java.lang.ThreadTest#testLeakingUnstartedThreads", - "libcore.java.lang.ThreadTest#testNativeThreadNames", - "libcore.java.lang.ThreadTest#testThreadInterrupted", - "libcore.java.lang.ThreadTest#testThreadSleep", - "libcore.java.lang.ThreadTest#testThreadSleepIllegalArguments", - "libcore.java.lang.ThreadTest#testThreadWakeup"] + names: [ "libcore.java.io.FileTest#testJavaIoTmpdirMutable" ] }, { - description: "Only work with --mode=activity", + description: "Investigate whether the test is making a wrong assumption with the newly enforced classpath.", result: EXEC_FAILED, - names: [ "libcore.java.io.FileTest#testJavaIoTmpdirMutable" ] + names: ["dalvik.system.DexClassLoaderTest#testDexThenPathClassLoader"] }, { description: "Made for extending, shouldn't be run", diff --git a/tools/public.libraries.buildbot.txt b/tools/public.libraries.buildbot.txt new file mode 100644 index 000000000..4b01796a0 --- /dev/null +++ b/tools/public.libraries.buildbot.txt @@ -0,0 +1,8 @@ +libart.so +libartd.so +libbacktrace.so +libc.so +libc++.so +libdl.so +libm.so +libnativehelper.so