From 0866f4ed6338faa4a193b7e819fc7cd72bd7b0ae Mon Sep 17 00:00:00 2001 From: Andreas Gampe Date: Mon, 22 Feb 2016 10:03:12 -0800 Subject: [PATCH] ART: Add unstarted-runtime functions Add more functions to allow compile-time initialization of code. Bug: 27248115 Change-Id: Iaf8d92deb73547ccd31c0d6dde68da3bc14c3985 --- runtime/atomic.cc | 4 +- runtime/atomic.h | 23 ++- runtime/base/mutex.h | 2 +- runtime/interpreter/unstarted_runtime.cc | 188 +++++++++++++++++++++ runtime/interpreter/unstarted_runtime_list.h | 11 +- runtime/mirror/abstract_method.cc | 25 ++- runtime/mirror/abstract_method.h | 2 + runtime/mirror/class.cc | 84 +++++++++ runtime/mirror/class.h | 8 + runtime/mirror/method.cc | 6 +- runtime/mirror/method.h | 1 + runtime/native/java_lang_Class.cc | 69 +------- .../java_util_concurrent_atomic_AtomicLong.cc | 3 +- 13 files changed, 337 insertions(+), 89 deletions(-) diff --git a/runtime/atomic.cc b/runtime/atomic.cc index e766a8d77..d5ae570c3 100644 --- a/runtime/atomic.cc +++ b/runtime/atomic.cc @@ -28,7 +28,7 @@ Mutex* QuasiAtomic::GetSwapMutex(const volatile int64_t* addr) { } void QuasiAtomic::Startup() { - if (kNeedSwapMutexes) { + if (NeedSwapMutexes(kRuntimeISA)) { gSwapMutexes = new std::vector; for (size_t i = 0; i < kSwapMutexCount; ++i) { gSwapMutexes->push_back(new Mutex("QuasiAtomic stripe", kSwapMutexesLock)); @@ -37,7 +37,7 @@ void QuasiAtomic::Startup() { } void QuasiAtomic::Shutdown() { - if (kNeedSwapMutexes) { + if (NeedSwapMutexes(kRuntimeISA)) { STLDeleteElements(gSwapMutexes); delete gSwapMutexes; } diff --git a/runtime/atomic.h b/runtime/atomic.h index d4a7f37bc..e2a725978 100644 --- a/runtime/atomic.h +++ b/runtime/atomic.h @@ -22,6 +22,7 @@ #include #include +#include "arch/instruction_set.h" #include "base/logging.h" #include "base/macros.h" @@ -44,14 +45,10 @@ class Mutex; // quasiatomic operations that are performed on partially-overlapping // memory. class QuasiAtomic { -#if defined(__mips__) && !defined(__LP64__) - static constexpr bool kNeedSwapMutexes = true; -#elif defined(__mips__) && defined(__LP64__) - // TODO - mips64 still need this for Cas64 ??? - static constexpr bool kNeedSwapMutexes = true; -#else - static constexpr bool kNeedSwapMutexes = false; -#endif + static constexpr bool NeedSwapMutexes(InstructionSet isa) { + // TODO - mips64 still need this for Cas64 ??? + return (isa == kMips) || (isa == kMips64); + } public: static void Startup(); @@ -60,7 +57,7 @@ class QuasiAtomic { // Reads the 64-bit value at "addr" without tearing. static int64_t Read64(volatile const int64_t* addr) { - if (!kNeedSwapMutexes) { + if (!NeedSwapMutexes(kRuntimeISA)) { int64_t value; #if defined(__LP64__) value = *addr; @@ -96,7 +93,7 @@ class QuasiAtomic { // Writes to the 64-bit value at "addr" without tearing. static void Write64(volatile int64_t* addr, int64_t value) { - if (!kNeedSwapMutexes) { + if (!NeedSwapMutexes(kRuntimeISA)) { #if defined(__LP64__) *addr = value; #else @@ -142,7 +139,7 @@ class QuasiAtomic { // at some point during the execution of Cas64, *addr was not equal to // old_value. static bool Cas64(int64_t old_value, int64_t new_value, volatile int64_t* addr) { - if (!kNeedSwapMutexes) { + if (!NeedSwapMutexes(kRuntimeISA)) { return __sync_bool_compare_and_swap(addr, old_value, new_value); } else { return SwapMutexCas64(old_value, new_value, addr); @@ -150,8 +147,8 @@ class QuasiAtomic { } // Does the architecture provide reasonable atomic long operations or do we fall back on mutexes? - static bool LongAtomicsUseMutexes() { - return kNeedSwapMutexes; + static bool LongAtomicsUseMutexes(InstructionSet isa) { + return NeedSwapMutexes(isa); } static void ThreadFenceAcquire() { diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h index e72f2a2e7..293451c4b 100644 --- a/runtime/base/mutex.h +++ b/runtime/base/mutex.h @@ -66,8 +66,8 @@ enum LockLevel { kRosAllocGlobalLock, kRosAllocBracketLock, kRosAllocBulkFreeLock, - kTransactionLogLock, kMarkSweepMarkStackLock, + kTransactionLogLock, kJniWeakGlobalsLock, kReferenceQueueSoftReferencesLock, kReferenceQueuePhantomReferencesLock, diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc index 0e175b85e..b21f1ecc8 100644 --- a/runtime/interpreter/unstarted_runtime.cc +++ b/runtime/interpreter/unstarted_runtime.cc @@ -22,11 +22,13 @@ #include "ScopedLocalRef.h" #include "art_method-inl.h" +#include "base/casts.h" #include "base/logging.h" #include "base/macros.h" #include "class_linker.h" #include "common_throws.h" #include "entrypoints/entrypoint_utils-inl.h" +#include "gc/reference_processor.h" #include "handle_scope-inl.h" #include "interpreter/interpreter_common.h" #include "mirror/array-inl.h" @@ -261,6 +263,25 @@ void UnstartedRuntime::UnstartedClassGetDeclaredField( } } +// This is required for Enum(Set) code, as that uses reflection to inspect enum classes. +void UnstartedRuntime::UnstartedClassGetDeclaredMethod( + Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { + // Special managed code cut-out to allow method lookup in a un-started runtime. + mirror::Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass(); + if (klass == nullptr) { + ThrowNullPointerExceptionForMethodAccess(shadow_frame->GetMethod(), InvokeType::kVirtual); + return; + } + mirror::String* name = shadow_frame->GetVRegReference(arg_offset + 1)->AsString(); + mirror::ObjectArray* args = + shadow_frame->GetVRegReference(arg_offset + 2)->AsObjectArray(); + if (Runtime::Current()->IsActiveTransaction()) { + result->SetL(mirror::Class::GetDeclaredMethodInternal(self, klass, name, args)); + } else { + result->SetL(mirror::Class::GetDeclaredMethodInternal(self, klass, name, args)); + } +} + void UnstartedRuntime::UnstartedClassGetEnclosingClass( Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { StackHandleScope<1> hs(self); @@ -860,6 +881,155 @@ void UnstartedRuntime::UnstartedStringToCharArray( result->SetL(string->ToCharArray(self)); } +// This allows statically initializing ConcurrentHashMap and SynchronousQueue. +void UnstartedRuntime::UnstartedReferenceGetReferent( + Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { + mirror::Reference* const ref = down_cast( + shadow_frame->GetVRegReference(arg_offset)); + if (ref == nullptr) { + AbortTransactionOrFail(self, "Reference.getReferent() with null object"); + return; + } + mirror::Object* const referent = + Runtime::Current()->GetHeap()->GetReferenceProcessor()->GetReferent(self, ref); + result->SetL(referent); +} + +// This allows statically initializing ConcurrentHashMap and SynchronousQueue. We use a somewhat +// conservative upper bound. We restrict the callers to SynchronousQueue and ConcurrentHashMap, +// where we can predict the behavior (somewhat). +// Note: this is required (instead of lazy initialization) as these classes are used in the static +// initialization of other classes, so will *use* the value. +void UnstartedRuntime::UnstartedRuntimeAvailableProcessors( + Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset ATTRIBUTE_UNUSED) { + std::string caller(PrettyMethod(shadow_frame->GetLink()->GetMethod())); + if (caller == "void java.util.concurrent.SynchronousQueue.()") { + // SynchronousQueue really only separates between single- and multiprocessor case. Return + // 8 as a conservative upper approximation. + result->SetI(8); + } else if (caller == "void java.util.concurrent.ConcurrentHashMap.()") { + // ConcurrentHashMap uses it for striding. 8 still seems an OK general value, as it's likely + // a good upper bound. + // TODO: Consider resetting in the zygote? + result->SetI(8); + } else { + // Not supported. + AbortTransactionOrFail(self, "Accessing availableProcessors not allowed"); + } +} + +// This allows accessing ConcurrentHashMap/SynchronousQueue. + +void UnstartedRuntime::UnstartedUnsafeCompareAndSwapLong( + Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { + // Argument 0 is the Unsafe instance, skip. + mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1); + if (obj == nullptr) { + AbortTransactionOrFail(self, "Cannot access null object, retry at runtime."); + return; + } + int64_t offset = shadow_frame->GetVRegLong(arg_offset + 2); + int64_t expectedValue = shadow_frame->GetVRegLong(arg_offset + 4); + int64_t newValue = shadow_frame->GetVRegLong(arg_offset + 6); + + // Must use non transactional mode. + if (kUseReadBarrier) { + // Need to make sure the reference stored in the field is a to-space one before attempting the + // CAS or the CAS could fail incorrectly. + mirror::HeapReference* field_addr = + reinterpret_cast*>( + reinterpret_cast(obj) + static_cast(offset)); + ReadBarrier::Barrier( + obj, + MemberOffset(offset), + field_addr); + } + bool success; + // Check whether we're in a transaction, call accordingly. + if (Runtime::Current()->IsActiveTransaction()) { + success = obj->CasFieldStrongSequentiallyConsistent64(MemberOffset(offset), + expectedValue, + newValue); + } else { + success = obj->CasFieldStrongSequentiallyConsistent64(MemberOffset(offset), + expectedValue, + newValue); + } + result->SetZ(success ? 1 : 0); +} + +void UnstartedRuntime::UnstartedUnsafeCompareAndSwapObject( + Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { + // Argument 0 is the Unsafe instance, skip. + mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1); + if (obj == nullptr) { + AbortTransactionOrFail(self, "Cannot access null object, retry at runtime."); + return; + } + int64_t offset = shadow_frame->GetVRegLong(arg_offset + 2); + mirror::Object* expected_value = shadow_frame->GetVRegReference(arg_offset + 4); + mirror::Object* newValue = shadow_frame->GetVRegReference(arg_offset + 5); + + // Must use non transactional mode. + if (kUseReadBarrier) { + // Need to make sure the reference stored in the field is a to-space one before attempting the + // CAS or the CAS could fail incorrectly. + mirror::HeapReference* field_addr = + reinterpret_cast*>( + reinterpret_cast(obj) + static_cast(offset)); + ReadBarrier::Barrier( + obj, + MemberOffset(offset), + field_addr); + } + bool success; + // Check whether we're in a transaction, call accordingly. + if (Runtime::Current()->IsActiveTransaction()) { + success = obj->CasFieldStrongSequentiallyConsistentObject(MemberOffset(offset), + expected_value, + newValue); + } else { + success = obj->CasFieldStrongSequentiallyConsistentObject(MemberOffset(offset), + expected_value, + newValue); + } + result->SetZ(success ? 1 : 0); +} + +void UnstartedRuntime::UnstartedUnsafeGetObjectVolatile( + Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) + SHARED_REQUIRES(Locks::mutator_lock_) { + // Argument 0 is the Unsafe instance, skip. + mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1); + if (obj == nullptr) { + AbortTransactionOrFail(self, "Cannot access null object, retry at runtime."); + return; + } + int64_t offset = shadow_frame->GetVRegLong(arg_offset + 2); + mirror::Object* value = obj->GetFieldObjectVolatile(MemberOffset(offset)); + result->SetL(value); +} + +void UnstartedRuntime::UnstartedUnsafePutOrderedObject( + Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset) + SHARED_REQUIRES(Locks::mutator_lock_) { + // Argument 0 is the Unsafe instance, skip. + mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1); + if (obj == nullptr) { + AbortTransactionOrFail(self, "Cannot access null object, retry at runtime."); + return; + } + int64_t offset = shadow_frame->GetVRegLong(arg_offset + 2); + mirror::Object* newValue = shadow_frame->GetVRegReference(arg_offset + 4); + QuasiAtomic::ThreadFenceRelease(); + if (Runtime::Current()->IsActiveTransaction()) { + obj->SetFieldObject(MemberOffset(offset), newValue); + } else { + obj->SetFieldObject(MemberOffset(offset), newValue); + } +} + + void UnstartedRuntime::UnstartedJNIVMRuntimeNewUnpaddedArray( Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) { @@ -906,6 +1076,17 @@ void UnstartedRuntime::UnstartedJNIMathExp( result->SetD(exp(value.GetD())); } +void UnstartedRuntime::UnstartedJNIAtomicLongVMSupportsCS8( + Thread* self ATTRIBUTE_UNUSED, + ArtMethod* method ATTRIBUTE_UNUSED, + mirror::Object* receiver ATTRIBUTE_UNUSED, + uint32_t* args ATTRIBUTE_UNUSED, + JValue* result) { + result->SetZ(QuasiAtomic::LongAtomicsUseMutexes(Runtime::Current()->GetInstructionSet()) + ? 0 + : 1); +} + void UnstartedRuntime::UnstartedJNIClassGetNameNative( Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver, uint32_t* args ATTRIBUTE_UNUSED, JValue* result) { @@ -913,6 +1094,13 @@ void UnstartedRuntime::UnstartedJNIClassGetNameNative( result->SetL(mirror::Class::ComputeName(hs.NewHandle(receiver->AsClass()))); } +void UnstartedRuntime::UnstartedJNIDoubleLongBitsToDouble( + Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, + mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) { + uint64_t long_input = args[0] | (static_cast(args[1]) << 32); + result->SetD(bit_cast(long_input)); +} + void UnstartedRuntime::UnstartedJNIFloatFloatToRawIntBits( Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) { diff --git a/runtime/interpreter/unstarted_runtime_list.h b/runtime/interpreter/unstarted_runtime_list.h index 6d4d71164..29f2197a0 100644 --- a/runtime/interpreter/unstarted_runtime_list.h +++ b/runtime/interpreter/unstarted_runtime_list.h @@ -24,6 +24,7 @@ V(ClassClassForName, "java.lang.Class java.lang.Class.classForName(java.lang.String, boolean, java.lang.ClassLoader)") \ V(ClassNewInstance, "java.lang.Object java.lang.Class.newInstance()") \ V(ClassGetDeclaredField, "java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String)") \ + V(ClassGetDeclaredMethod, "java.lang.reflect.Method java.lang.Class.getDeclaredMethodInternal(java.lang.String, java.lang.Class[])") \ V(ClassGetEnclosingClass, "java.lang.Class java.lang.Class.getEnclosingClass()") \ V(VmClassLoaderFindLoadedClass, "java.lang.Class java.lang.VMClassLoader.findLoadedClass(java.lang.ClassLoader, java.lang.String)") \ V(VoidLookupType, "java.lang.Class java.lang.Void.lookupType()") \ @@ -40,6 +41,8 @@ V(MemoryPeekInt, "int libcore.io.Memory.peekIntNative(long)") \ V(MemoryPeekLong, "long libcore.io.Memory.peekLongNative(long)") \ V(MemoryPeekByteArray, "void libcore.io.Memory.peekByteArray(long, byte[], int, int)") \ + V(ReferenceGetReferent, "java.lang.Object java.lang.ref.Reference.getReferent()") \ + V(RuntimeAvailableProcessors, "int java.lang.Runtime.availableProcessors()") \ V(SecurityGetSecurityPropertiesReader, "java.io.Reader java.security.Security.getSecurityPropertiesReader()") \ V(StringGetCharsNoCheck, "void java.lang.String.getCharsNoCheck(int, int, char[], int)") \ V(StringCharAt, "char java.lang.String.charAt(int)") \ @@ -47,7 +50,11 @@ V(StringFactoryNewStringFromChars, "java.lang.String java.lang.StringFactory.newStringFromChars(int, int, char[])") \ V(StringFactoryNewStringFromString, "java.lang.String java.lang.StringFactory.newStringFromString(java.lang.String)") \ V(StringFastSubstring, "java.lang.String java.lang.String.fastSubstring(int, int)") \ - V(StringToCharArray, "char[] java.lang.String.toCharArray()") + V(StringToCharArray, "char[] java.lang.String.toCharArray()") \ + V(UnsafeCompareAndSwapLong, "boolean sun.misc.Unsafe.compareAndSwapLong(java.lang.Object, long, long, long)") \ + V(UnsafeCompareAndSwapObject, "boolean sun.misc.Unsafe.compareAndSwapObject(java.lang.Object, long, java.lang.Object, java.lang.Object)") \ + V(UnsafeGetObjectVolatile, "java.lang.Object sun.misc.Unsafe.getObjectVolatile(java.lang.Object, long)") \ + V(UnsafePutOrderedObject, "void sun.misc.Unsafe.putOrderedObject(java.lang.Object, long, java.lang.Object)") // Methods that are native. #define UNSTARTED_RUNTIME_JNI_LIST(V) \ @@ -56,7 +63,9 @@ V(VMStackGetStackClass2, "java.lang.Class dalvik.system.VMStack.getStackClass2()") \ V(MathLog, "double java.lang.Math.log(double)") \ V(MathExp, "double java.lang.Math.exp(double)") \ + V(AtomicLongVMSupportsCS8, "boolean java.util.concurrent.atomic.AtomicLong.VMSupportsCS8()") \ V(ClassGetNameNative, "java.lang.String java.lang.Class.getNameNative()") \ + V(DoubleLongBitsToDouble, "double java.lang.Double.longBitsToDouble(long)") \ V(FloatFloatToRawIntBits, "int java.lang.Float.floatToRawIntBits(float)") \ V(FloatIntBitsToFloat, "float java.lang.Float.intBitsToFloat(int)") \ V(ObjectInternalClone, "java.lang.Object java.lang.Object.internalClone()") \ diff --git a/runtime/mirror/abstract_method.cc b/runtime/mirror/abstract_method.cc index 91a98707c..5a07deee5 100644 --- a/runtime/mirror/abstract_method.cc +++ b/runtime/mirror/abstract_method.cc @@ -21,25 +21,36 @@ namespace art { namespace mirror { +template bool AbstractMethod::CreateFromArtMethod(ArtMethod* method) { - auto* interface_method = method->GetInterfaceMethodIfProxy(sizeof(void*)); - SetArtMethod(method); - SetFieldObject(DeclaringClassOffset(), method->GetDeclaringClass()); - SetFieldObject( + auto* interface_method = method->GetInterfaceMethodIfProxy( + kTransactionActive + ? Runtime::Current()->GetClassLinker()->GetImagePointerSize() + : sizeof(void*)); + SetArtMethod(method); + SetFieldObject(DeclaringClassOffset(), method->GetDeclaringClass()); + SetFieldObject( DeclaringClassOfOverriddenMethodOffset(), interface_method->GetDeclaringClass()); - SetField32(AccessFlagsOffset(), method->GetAccessFlags()); - SetField32(DexMethodIndexOffset(), method->GetDexMethodIndex()); + SetField32(AccessFlagsOffset(), method->GetAccessFlags()); + SetField32(DexMethodIndexOffset(), method->GetDexMethodIndex()); return true; } +template bool AbstractMethod::CreateFromArtMethod(ArtMethod* method); +template bool AbstractMethod::CreateFromArtMethod(ArtMethod* method); + ArtMethod* AbstractMethod::GetArtMethod() { return reinterpret_cast(GetField64(ArtMethodOffset())); } +template void AbstractMethod::SetArtMethod(ArtMethod* method) { - SetField64(ArtMethodOffset(), reinterpret_cast(method)); + SetField64(ArtMethodOffset(), reinterpret_cast(method)); } +template void AbstractMethod::SetArtMethod(ArtMethod* method); +template void AbstractMethod::SetArtMethod(ArtMethod* method); + mirror::Class* AbstractMethod::GetDeclaringClass() { return GetFieldObject(DeclaringClassOffset()); } diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h index dc084be06..a39f94d39 100644 --- a/runtime/mirror/abstract_method.h +++ b/runtime/mirror/abstract_method.h @@ -34,11 +34,13 @@ namespace mirror { class MANAGED AbstractMethod : public AccessibleObject { public: // Called from Constructor::CreateFromArtMethod, Method::CreateFromArtMethod. + template bool CreateFromArtMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); ArtMethod* GetArtMethod() SHARED_REQUIRES(Locks::mutator_lock_); // Only used by the image writer. + template void SetArtMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_); diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc index 9190e4414..7900eac4e 100644 --- a/runtime/mirror/class.cc +++ b/runtime/mirror/class.cc @@ -1054,5 +1054,89 @@ uint32_t Class::FindTypeIndexInOtherDexFile(const DexFile& dex_file) { return (type_id == nullptr) ? DexFile::kDexNoIndex : dex_file.GetIndexForTypeId(*type_id); } +template +mirror::Method* Class::GetDeclaredMethodInternal(Thread* self, + mirror::Class* klass, + mirror::String* name, + mirror::ObjectArray* args) { + // Covariant return types permit the class to define multiple + // methods with the same name and parameter types. Prefer to + // return a non-synthetic method in such situations. We may + // still return a synthetic method to handle situations like + // escalated visibility. We never return miranda methods that + // were synthesized by the runtime. + constexpr uint32_t kSkipModifiers = kAccMiranda | kAccSynthetic; + StackHandleScope<3> hs(self); + auto h_method_name = hs.NewHandle(name); + if (UNLIKELY(h_method_name.Get() == nullptr)) { + ThrowNullPointerException("name == null"); + return nullptr; + } + auto h_args = hs.NewHandle(args); + Handle h_klass = hs.NewHandle(klass); + ArtMethod* result = nullptr; + const size_t pointer_size = kTransactionActive + ? Runtime::Current()->GetClassLinker()->GetImagePointerSize() + : sizeof(void*); + for (auto& m : h_klass->GetDeclaredVirtualMethods(pointer_size)) { + auto* np_method = m.GetInterfaceMethodIfProxy(pointer_size); + // May cause thread suspension. + mirror::String* np_name = np_method->GetNameAsString(self); + if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) { + if (UNLIKELY(self->IsExceptionPending())) { + return nullptr; + } + continue; + } + auto modifiers = m.GetAccessFlags(); + if ((modifiers & kSkipModifiers) == 0) { + return mirror::Method::CreateFromArtMethod(self, &m); + } + if ((modifiers & kAccMiranda) == 0) { + result = &m; // Remember as potential result if it's not a miranda method. + } + } + if (result == nullptr) { + for (auto& m : h_klass->GetDirectMethods(pointer_size)) { + auto modifiers = m.GetAccessFlags(); + if ((modifiers & kAccConstructor) != 0) { + continue; + } + auto* np_method = m.GetInterfaceMethodIfProxy(pointer_size); + // May cause thread suspension. + mirror::String* np_name = np_method->GetNameAsString(self); + if (np_name == nullptr) { + self->AssertPendingException(); + return nullptr; + } + if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) { + if (UNLIKELY(self->IsExceptionPending())) { + return nullptr; + } + continue; + } + if ((modifiers & kSkipModifiers) == 0) { + return mirror::Method::CreateFromArtMethod(self, &m); + } + // Direct methods cannot be miranda methods, so this potential result must be synthetic. + result = &m; + } + } + return result != nullptr + ? mirror::Method::CreateFromArtMethod(self, result) + : nullptr; +} + +template +mirror::Method* Class::GetDeclaredMethodInternal(Thread* self, + mirror::Class* klass, + mirror::String* name, + mirror::ObjectArray* args); +template +mirror::Method* Class::GetDeclaredMethodInternal(Thread* self, + mirror::Class* klass, + mirror::String* name, + mirror::ObjectArray* args); + } // namespace mirror } // namespace art diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h index 6e3463c25..7082c886a 100644 --- a/runtime/mirror/class.h +++ b/runtime/mirror/class.h @@ -55,6 +55,7 @@ class ClassLoader; class Constructor; class DexCache; class IfTable; +class Method; // C++ mirror of java.lang.Class class MANAGED Class FINAL : public Object { @@ -759,6 +760,13 @@ class MANAGED Class FINAL : public Object { size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_); + template + static Method* GetDeclaredMethodInternal(Thread* self, + mirror::Class* klass, + mirror::String* name, + mirror::ObjectArray* args) + SHARED_REQUIRES(Locks::mutator_lock_); + template ALWAYS_INLINE ArraySlice GetDeclaredVirtualMethodsSlice(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_); diff --git a/runtime/mirror/method.cc b/runtime/mirror/method.cc index 85c52e95d..97973e627 100644 --- a/runtime/mirror/method.cc +++ b/runtime/mirror/method.cc @@ -51,15 +51,19 @@ void Method::ResetArrayClass() { array_class_ = GcRoot(nullptr); } +template Method* Method::CreateFromArtMethod(Thread* self, ArtMethod* method) { DCHECK(!method->IsConstructor()) << PrettyMethod(method); auto* ret = down_cast(StaticClass()->AllocObject(self)); if (LIKELY(ret != nullptr)) { - static_cast(ret)->CreateFromArtMethod(method); + static_cast(ret)->CreateFromArtMethod(method); } return ret; } +template Method* Method::CreateFromArtMethod(Thread* self, ArtMethod* method); +template Method* Method::CreateFromArtMethod(Thread* self, ArtMethod* method); + void Method::VisitRoots(RootVisitor* visitor) { static_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass)); array_class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass)); diff --git a/runtime/mirror/method.h b/runtime/mirror/method.h index 0c28e4f58..12a72fe44 100644 --- a/runtime/mirror/method.h +++ b/runtime/mirror/method.h @@ -28,6 +28,7 @@ class Class; // C++ mirror of java.lang.reflect.Method. class MANAGED Method : public AbstractMethod { public: + template static Method* CreateFromArtMethod(Thread* self, ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc index b5d859b6b..bf24de528 100644 --- a/runtime/native/java_lang_Class.cc +++ b/runtime/native/java_lang_Class.cc @@ -371,70 +371,13 @@ static jobjectArray Class_getDeclaredConstructorsInternal( static jobject Class_getDeclaredMethodInternal(JNIEnv* env, jobject javaThis, jobject name, jobjectArray args) { - // Covariant return types permit the class to define multiple - // methods with the same name and parameter types. Prefer to - // return a non-synthetic method in such situations. We may - // still return a synthetic method to handle situations like - // escalated visibility. We never return miranda methods that - // were synthesized by the runtime. - constexpr uint32_t kSkipModifiers = kAccMiranda | kAccSynthetic; ScopedFastNativeObjectAccess soa(env); - StackHandleScope<3> hs(soa.Self()); - auto h_method_name = hs.NewHandle(soa.Decode(name)); - if (UNLIKELY(h_method_name.Get() == nullptr)) { - ThrowNullPointerException("name == null"); - return nullptr; - } - auto h_args = hs.NewHandle(soa.Decode*>(args)); - Handle h_klass = hs.NewHandle(DecodeClass(soa, javaThis)); - ArtMethod* result = nullptr; - for (auto& m : h_klass->GetDeclaredVirtualMethods(sizeof(void*))) { - auto* np_method = m.GetInterfaceMethodIfProxy(sizeof(void*)); - // May cause thread suspension. - mirror::String* np_name = np_method->GetNameAsString(soa.Self()); - if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) { - if (UNLIKELY(soa.Self()->IsExceptionPending())) { - return nullptr; - } - continue; - } - auto modifiers = m.GetAccessFlags(); - if ((modifiers & kSkipModifiers) == 0) { - return soa.AddLocalReference(mirror::Method::CreateFromArtMethod(soa.Self(), &m)); - } - if ((modifiers & kAccMiranda) == 0) { - result = &m; // Remember as potential result if it's not a miranda method. - } - } - if (result == nullptr) { - for (auto& m : h_klass->GetDirectMethods(sizeof(void*))) { - auto modifiers = m.GetAccessFlags(); - if ((modifiers & kAccConstructor) != 0) { - continue; - } - auto* np_method = m.GetInterfaceMethodIfProxy(sizeof(void*)); - // May cause thread suspension. - mirror::String* np_name = np_method->GetNameAsString(soa.Self()); - if (np_name == nullptr) { - soa.Self()->AssertPendingException(); - return nullptr; - } - if (!np_name->Equals(h_method_name.Get()) || !np_method->EqualParameters(h_args)) { - if (UNLIKELY(soa.Self()->IsExceptionPending())) { - return nullptr; - } - continue; - } - if ((modifiers & kSkipModifiers) == 0) { - return soa.AddLocalReference(mirror::Method::CreateFromArtMethod(soa.Self(), &m)); - } - // Direct methods cannot be miranda methods, so this potential result must be synthetic. - result = &m; - } - } - return result != nullptr ? - soa.AddLocalReference(mirror::Method::CreateFromArtMethod(soa.Self(), result)) : - nullptr; + mirror::Method* result = mirror::Class::GetDeclaredMethodInternal( + soa.Self(), + DecodeClass(soa, javaThis), + soa.Decode(name), + soa.Decode*>(args)); + return soa.AddLocalReference(result); } static jobjectArray Class_getDeclaredMethodsUnchecked(JNIEnv* env, jobject javaThis, diff --git a/runtime/native/java_util_concurrent_atomic_AtomicLong.cc b/runtime/native/java_util_concurrent_atomic_AtomicLong.cc index 04f0ba0c1..4d2ea6794 100644 --- a/runtime/native/java_util_concurrent_atomic_AtomicLong.cc +++ b/runtime/native/java_util_concurrent_atomic_AtomicLong.cc @@ -16,13 +16,14 @@ #include "java_util_concurrent_atomic_AtomicLong.h" +#include "arch/instruction_set.h" #include "atomic.h" #include "jni_internal.h" namespace art { static jboolean AtomicLong_VMSupportsCS8(JNIEnv*, jclass) { - return QuasiAtomic::LongAtomicsUseMutexes() ? JNI_FALSE : JNI_TRUE; + return QuasiAtomic::LongAtomicsUseMutexes(kRuntimeISA) ? JNI_FALSE : JNI_TRUE; } static JNINativeMethod gMethods[] = { -- 2.11.0