namespace art {
-extern "C" NO_RETURN void artDeoptimize(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
- ScopedQuickEntrypointChecks sqec(self);
-
+NO_RETURN static void artDeoptimizeImpl(Thread* self, bool single_frame)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
if (VLOG_IS_ON(deopt)) {
- LOG(INFO) << "Deopting:";
- self->Dump(LOG(INFO));
+ if (single_frame) {
+ // Deopt logging will be in DeoptimizeSingleFrame. It is there to take advantage of the
+ // specialized visitor that will show whether a method is Quick or Shadow.
+ } else {
+ LOG(INFO) << "Deopting:";
+ self->Dump(LOG(INFO));
+ }
}
self->AssertHasDeoptimizationContext();
- self->SetException(Thread::GetDeoptimizationException());
- self->QuickDeliverException();
+ QuickExceptionHandler exception_handler(self, true);
+ if (single_frame) {
+ exception_handler.DeoptimizeSingleFrame();
+ } else {
+ exception_handler.DeoptimizeStack();
+ }
+ uintptr_t return_pc = exception_handler.UpdateInstrumentationStack();
+ if (exception_handler.IsFullFragmentDone()) {
+ exception_handler.DoLongJump(true);
+ } else {
+ exception_handler.DeoptimizePartialFragmentFixup(return_pc);
+ // We cannot smash the caller-saves, as we need the ArtMethod in a parameter register that would
+ // be caller-saved. This has the downside that we cannot track incorrect register usage down the
+ // line.
+ exception_handler.DoLongJump(false);
+ }
+}
+
+extern "C" NO_RETURN void artDeoptimize(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
+ artDeoptimizeImpl(self, false);
}
+// This is called directly from compiled code by an HDepptimize.
extern "C" NO_RETURN void artDeoptimizeFromCompiledCode(Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
-
- // Deopt logging will be in DeoptimizeSingleFrame. It is there to take advantage of the
- // specialized visitor that will show whether a method is Quick or Shadow.
-
// Before deoptimizing to interpreter, we must push the deoptimization context.
JValue return_value;
return_value.SetJ(0); // we never deoptimize from compiled code with an invoke result.
self->PushDeoptimizationContext(return_value, false, /* from_code */ true, self->GetException());
-
- QuickExceptionHandler exception_handler(self, true);
- exception_handler.DeoptimizeSingleFrame();
- exception_handler.UpdateInstrumentationStack();
- exception_handler.DeoptimizeSingleFrameArchDependentFixup();
- // We cannot smash the caller-saves, as we need the ArtMethod in a parameter register that would
- // be caller-saved. This has the downside that we cannot track incorrect register usage down the
- // line.
- exception_handler.DoLongJump(false);
+ artDeoptimizeImpl(self, true);
}
} // namespace art
JValue tmp_value;
ShadowFrame* deopt_frame = self->PopStackedShadowFrame(
- StackedShadowFrameType::kSingleFrameDeoptimizationShadowFrame, false);
+ StackedShadowFrameType::kDeoptimizationShadowFrame, false);
ManagedStack fragment;
DCHECK(!method->IsNative()) << PrettyMethod(method);
JValue result;
if (deopt_frame != nullptr) {
- // Coming from single-frame deopt.
+ // Coming from partial-fragment deopt.
if (kIsDebugBuild) {
// Sanity-check: are the methods as expected? We check that the last shadow frame (the bottom
}
if (VLOG_IS_ON(deopt)) {
- // Print out the stack to verify that it was a single-frame deopt.
+ // Print out the stack to verify that it was a partial-fragment deopt.
LOG(INFO) << "Continue-ing from deopt. Stack is:";
QuickExceptionHandler::DumpFramesWithType(self, true);
}
mirror::Throwable* pending_exception = nullptr;
bool from_code = false;
self->PopDeoptimizationContext(&result, &pending_exception, /* out */ &from_code);
- CHECK(from_code);
// Push a transition back into managed code onto the linked list in thread.
self->PushManagedStackFragment(&fragment);
// Request a stack deoptimization if needed
ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp);
- if (UNLIKELY(Dbg::IsForcedInterpreterNeededForUpcall(self, caller))) {
+ uintptr_t caller_pc = QuickArgumentVisitor::GetCallingPc(sp);
+ if (UNLIKELY(Dbg::IsForcedInterpreterNeededForUpcall(self, caller) &&
+ Runtime::Current()->IsDeoptimizeable(caller_pc))) {
// Push the context of the deoptimization stack so we can restore the return value and the
// exception before executing the deoptimized frames.
self->PushDeoptimizationContext(
bool deoptimize = (visitor.caller != nullptr) &&
(interpreter_stubs_installed_ || IsDeoptimized(visitor.caller) ||
Dbg::IsForcedInterpreterNeededForUpcall(self, visitor.caller));
- if (deoptimize) {
+ if (deoptimize && Runtime::Current()->IsDeoptimizeable(*return_pc)) {
if (kVerboseInstrumentation) {
LOG(INFO) << StringPrintf("Deoptimizing %s by returning from %s with result %#" PRIx64 " in ",
PrettyMethod(visitor.caller).c_str(),
}
}
-void Instrumentation::PopMethodForUnwind(Thread* self, bool is_deoptimization) const {
+uintptr_t Instrumentation::PopMethodForUnwind(Thread* self, bool is_deoptimization) const {
// Do the pop.
std::deque<instrumentation::InstrumentationStackFrame>* stack = self->GetInstrumentationStack();
CHECK_GT(stack->size(), 0U);
uint32_t dex_pc = DexFile::kDexNoIndex;
MethodUnwindEvent(self, instrumentation_frame.this_object_, method, dex_pc);
}
+ return instrumentation_frame.return_pc_;
}
std::string InstrumentationStackFrame::Dump() const {
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
// Pops an instrumentation frame from the current thread and generate an unwind event.
- void PopMethodForUnwind(Thread* self, bool is_deoptimization) const
+ // Returns the return pc for the instrumentation frame that's popped.
+ uintptr_t PopMethodForUnwind(Thread* self, bool is_deoptimization) const
SHARED_REQUIRES(Locks::mutator_lock_);
// Call back for configure stubs.
DCHECK(caller == nullptr);
if (count == n) {
caller = m;
+ caller_pc = GetCurrentQuickFramePc();
return false;
}
count++;
const bool include_runtime_and_upcalls_;
size_t count;
ArtMethod* caller;
+ uintptr_t caller_pc;
};
} // namespace art
handler_method_(nullptr),
handler_dex_pc_(0),
clear_exception_(false),
- handler_frame_depth_(kInvalidFrameDepth) {}
+ handler_frame_depth_(kInvalidFrameDepth),
+ full_fragment_done_(false) {}
// Finds catch handler.
class CatchBlockStackVisitor FINAL : public StackVisitor {
single_frame_deopt_(single_frame),
single_frame_done_(false),
single_frame_deopt_method_(nullptr),
- single_frame_deopt_quick_method_header_(nullptr) {
+ single_frame_deopt_quick_method_header_(nullptr),
+ callee_method_(nullptr) {
}
ArtMethod* GetSingleFrameDeoptMethod() const {
return single_frame_deopt_quick_method_header_;
}
+ void FinishStackWalk() SHARED_REQUIRES(Locks::mutator_lock_) {
+ // This is the upcall, or the next full frame in single-frame deopt, or the
+ // code isn't deoptimizeable. We remember the frame and last pc so that we
+ // may long jump to them.
+ exception_handler_->SetHandlerQuickFramePc(GetCurrentQuickFramePc());
+ exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
+ exception_handler_->SetHandlerMethodHeader(GetCurrentOatQuickMethodHeader());
+ if (!stacked_shadow_frame_pushed_) {
+ // In case there is no deoptimized shadow frame for this upcall, we still
+ // need to push a nullptr to the stack since there is always a matching pop after
+ // the long jump.
+ GetThread()->PushStackedShadowFrame(nullptr,
+ StackedShadowFrameType::kDeoptimizationShadowFrame);
+ stacked_shadow_frame_pushed_ = true;
+ }
+ if (GetMethod() == nullptr) {
+ exception_handler_->SetFullFragmentDone(true);
+ } else {
+ CHECK(callee_method_ != nullptr) << art::PrettyMethod(GetMethod(), false);
+ exception_handler_->SetHandlerQuickArg0(reinterpret_cast<uintptr_t>(callee_method_));
+ }
+ }
+
bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
exception_handler_->SetHandlerFrameDepth(GetFrameDepth());
ArtMethod* method = GetMethod();
if (method == nullptr || single_frame_done_) {
- // This is the upcall (or the next full frame in single-frame deopt), we remember the frame
- // and last pc so that we may long jump to them.
- exception_handler_->SetHandlerQuickFramePc(GetCurrentQuickFramePc());
- exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
- exception_handler_->SetHandlerMethodHeader(GetCurrentOatQuickMethodHeader());
- if (!stacked_shadow_frame_pushed_) {
- // In case there is no deoptimized shadow frame for this upcall, we still
- // need to push a nullptr to the stack since there is always a matching pop after
- // the long jump.
- GetThread()->PushStackedShadowFrame(nullptr,
- StackedShadowFrameType::kDeoptimizationShadowFrame);
- stacked_shadow_frame_pushed_ = true;
- }
+ FinishStackWalk();
return false; // End stack walk.
} else if (method->IsRuntimeMethod()) {
// Ignore callee save method.
// the native method.
// The top method is a runtime method, the native method comes next.
CHECK_EQ(GetFrameDepth(), 1U);
+ callee_method_ = method;
return true;
+ } else if (!single_frame_deopt_ &&
+ !Runtime::Current()->IsDeoptimizeable(GetCurrentQuickFramePc())) {
+ // We hit some code that's not deoptimizeable. However, Single-frame deoptimization triggered
+ // from compiled code is always allowed since HDeoptimize always saves the full environment.
+ FinishStackWalk();
+ return false; // End stack walk.
} else {
// Check if a shadow frame already exists for debugger's set-local-value purpose.
const size_t frame_id = GetFrameId();
// right before interpreter::EnterInterpreterFromDeoptimize().
stacked_shadow_frame_pushed_ = true;
GetThread()->PushStackedShadowFrame(
- new_frame,
- single_frame_deopt_
- ? StackedShadowFrameType::kSingleFrameDeoptimizationShadowFrame
- : StackedShadowFrameType::kDeoptimizationShadowFrame);
+ new_frame, StackedShadowFrameType::kDeoptimizationShadowFrame);
}
prev_shadow_frame_ = new_frame;
if (single_frame_deopt_ && !IsInInlinedFrame()) {
// Single-frame deopt ends at the first non-inlined frame and needs to store that method.
- exception_handler_->SetHandlerQuickArg0(reinterpret_cast<uintptr_t>(method));
single_frame_done_ = true;
single_frame_deopt_method_ = method;
single_frame_deopt_quick_method_header_ = GetCurrentOatQuickMethodHeader();
}
+ callee_method_ = method;
return true;
}
}
bool single_frame_done_;
ArtMethod* single_frame_deopt_method_;
const OatQuickMethodHeader* single_frame_deopt_quick_method_header_;
+ ArtMethod* callee_method_;
DISALLOW_COPY_AND_ASSIGN(DeoptimizeStackVisitor);
};
+void QuickExceptionHandler::PrepareForLongJumpToInvokeStubOrInterpreterBridge() {
+ if (full_fragment_done_) {
+ // Restore deoptimization exception. When returning from the invoke stub,
+ // ArtMethod::Invoke() will see the special exception to know deoptimization
+ // is needed.
+ self_->SetException(Thread::GetDeoptimizationException());
+ } else {
+ // PC needs to be of the quick-to-interpreter bridge.
+ int32_t offset;
+ #ifdef __LP64__
+ offset = GetThreadOffset<8>(kQuickQuickToInterpreterBridge).Int32Value();
+ #else
+ offset = GetThreadOffset<4>(kQuickQuickToInterpreterBridge).Int32Value();
+ #endif
+ handler_quick_frame_pc_ = *reinterpret_cast<uintptr_t*>(
+ reinterpret_cast<uint8_t*>(self_) + offset);
+ }
+}
+
void QuickExceptionHandler::DeoptimizeStack() {
DCHECK(is_deoptimization_);
if (kDebugExceptionDelivery) {
DeoptimizeStackVisitor visitor(self_, context_, this, false);
visitor.WalkStack(true);
-
- // Restore deoptimization exception
- self_->SetException(Thread::GetDeoptimizationException());
+ PrepareForLongJumpToInvokeStubOrInterpreterBridge();
}
void QuickExceptionHandler::DeoptimizeSingleFrame() {
deopt_method, GetQuickToInterpreterBridge());
}
- // PC needs to be of the quick-to-interpreter bridge.
- int32_t offset;
- #ifdef __LP64__
- offset = GetThreadOffset<8>(kQuickQuickToInterpreterBridge).Int32Value();
- #else
- offset = GetThreadOffset<4>(kQuickQuickToInterpreterBridge).Int32Value();
- #endif
- handler_quick_frame_pc_ = *reinterpret_cast<uintptr_t*>(
- reinterpret_cast<uint8_t*>(self_) + offset);
+ PrepareForLongJumpToInvokeStubOrInterpreterBridge();
}
-void QuickExceptionHandler::DeoptimizeSingleFrameArchDependentFixup() {
- // Architecture-dependent work. This is to get the LR right for x86 and x86-64.
+void QuickExceptionHandler::DeoptimizePartialFragmentFixup(uintptr_t return_pc) {
+ // At this point, the instrumentation stack has been updated. We need to install
+ // the real return pc on stack, in case instrumentation stub is stored there,
+ // so that the interpreter bridge code can return to the right place.
+ if (return_pc != 0) {
+ uintptr_t* pc_addr = reinterpret_cast<uintptr_t*>(handler_quick_frame_);
+ CHECK(pc_addr != nullptr);
+ pc_addr--;
+ *reinterpret_cast<uintptr_t*>(pc_addr) = return_pc;
+ }
+ // Architecture-dependent work. This is to get the LR right for x86 and x86-64.
if (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64) {
// On x86, the return address is on the stack, so just reuse it. Otherwise we would have to
// change how longjump works.
DISALLOW_COPY_AND_ASSIGN(InstrumentationStackVisitor);
};
-void QuickExceptionHandler::UpdateInstrumentationStack() {
+uintptr_t QuickExceptionHandler::UpdateInstrumentationStack() {
+ uintptr_t return_pc = 0;
if (method_tracing_active_) {
InstrumentationStackVisitor visitor(self_, handler_frame_depth_);
visitor.WalkStack(true);
size_t instrumentation_frames_to_pop = visitor.GetInstrumentationFramesToPop();
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
for (size_t i = 0; i < instrumentation_frames_to_pop; ++i) {
- instrumentation->PopMethodForUnwind(self_, is_deoptimization_);
+ return_pc = instrumentation->PopMethodForUnwind(self_, is_deoptimization_);
}
}
+ return return_pc;
}
void QuickExceptionHandler::DoLongJump(bool smash_caller_saves) {
// Find the catch handler for the given exception.
void FindCatch(mirror::Throwable* exception) SHARED_REQUIRES(Locks::mutator_lock_);
- // Deoptimize the stack to the upcall. For every compiled frame, we create a "copy"
- // shadow frame that will be executed with the interpreter.
+ // Deoptimize the stack to the upcall/some code that's not deoptimizeable. For
+ // every compiled frame, we create a "copy" shadow frame that will be executed
+ // with the interpreter.
void DeoptimizeStack() SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Deoptimize a single frame. It's directly triggered from compiled code. It
+ // has the following properties:
+ // - It deoptimizes a single frame, which can include multiple inlined frames.
+ // - It doesn't have return result or pending exception at the deoptimization point.
+ // - It always deoptimizes, even if IsDeoptimizeable() returns false for the
+ // code, since HDeoptimize always saves the full environment. So it overrides
+ // the result of IsDeoptimizeable().
+ // - It can be either full-fragment, or partial-fragment deoptimization, depending
+ // on whether that single frame covers full or partial fragment.
void DeoptimizeSingleFrame() SHARED_REQUIRES(Locks::mutator_lock_);
- void DeoptimizeSingleFrameArchDependentFixup() SHARED_REQUIRES(Locks::mutator_lock_);
+
+ void DeoptimizePartialFragmentFixup(uintptr_t return_pc)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Update the instrumentation stack by removing all methods that will be unwound
// by the exception being thrown.
- void UpdateInstrumentationStack() SHARED_REQUIRES(Locks::mutator_lock_);
+ // Return the return pc of the last frame that's unwound.
+ uintptr_t UpdateInstrumentationStack() SHARED_REQUIRES(Locks::mutator_lock_);
// Set up environment before delivering an exception to optimized code.
void SetCatchEnvironmentForOptimizedHandler(StackVisitor* stack_visitor)
handler_frame_depth_ = frame_depth;
}
+ bool IsFullFragmentDone() const {
+ return full_fragment_done_;
+ }
+
+ void SetFullFragmentDone(bool full_fragment_done) {
+ full_fragment_done_ = full_fragment_done;
+ }
+
// Walk the stack frames of the given thread, printing out non-runtime methods with their types
- // of frames. Helps to verify that single-frame deopt really only deopted one frame.
+ // of frames. Helps to verify that partial-fragment deopt really works as expected.
static void DumpFramesWithType(Thread* self, bool details = false)
SHARED_REQUIRES(Locks::mutator_lock_);
bool clear_exception_;
// Frame depth of the catch handler or the upcall.
size_t handler_frame_depth_;
+ // Does the handler successfully walk the full fragment (not stopped
+ // by some code that's not deoptimizeable)? Even single-frame deoptimization
+ // can set this to true if the fragment contains only one quick frame.
+ bool full_fragment_done_;
+
+ void PrepareForLongJumpToInvokeStubOrInterpreterBridge()
+ SHARED_REQUIRES(Locks::mutator_lock_);
DISALLOW_COPY_AND_ASSIGN(QuickExceptionHandler);
};
return verify_ == verifier::VerifyMode::kSoftFail;
}
+bool Runtime::IsDeoptimizeable(uintptr_t code) const
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ return !heap_->IsInBootImageOatFile(reinterpret_cast<void *>(code));
+}
+
LinearAlloc* Runtime::CreateLinearAlloc() {
// For 64 bit compilers, it needs to be in low 4GB in the case where we are cross compiling for a
// 32 bit target. In this case, we have 32 bit pointers in the dex cache arrays which can't hold
return zygote_no_threads_;
}
+ // Returns if the code can be deoptimized. Code may be compiled with some
+ // optimization that makes it impossible to deoptimize.
+ bool IsDeoptimizeable(uintptr_t code) const SHARED_REQUIRES(Locks::mutator_lock_);
+
private:
static void InitPlatformSignalHandlers();
#include "mirror/object_array-inl.h"
#include "mirror/stack_trace_element.h"
#include "monitor.h"
+#include "nth_caller_visitor.h"
#include "oat_quick_method_header.h"
#include "object_lock.h"
#include "quick_exception_handler.h"
namespace art {
+extern "C" NO_RETURN void artDeoptimize(Thread* self);
+
bool Thread::is_started_ = false;
pthread_key_t Thread::pthread_key_self_;
ConditionVariable* Thread::resume_cond_ = nullptr;
StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record;
if (must_be_present) {
DCHECK(record != nullptr);
- DCHECK_EQ(record->GetType(), type);
} else {
if (record == nullptr || record->GetType() != type) {
return nullptr;
// Get exception from thread.
mirror::Throwable* exception = GetException();
CHECK(exception != nullptr);
- bool is_deoptimization = (exception == GetDeoptimizationException());
- if (!is_deoptimization) {
- // This is a real exception: let the instrumentation know about it.
- instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
- if (instrumentation->HasExceptionCaughtListeners() &&
- IsExceptionThrownByCurrentMethod(exception)) {
- // Instrumentation may cause GC so keep the exception object safe.
- StackHandleScope<1> hs(this);
- HandleWrapper<mirror::Throwable> h_exception(hs.NewHandleWrapper(&exception));
- instrumentation->ExceptionCaughtEvent(this, exception);
- }
- // Does instrumentation need to deoptimize the stack?
- // Note: we do this *after* reporting the exception to instrumentation in case it
- // now requires deoptimization. It may happen if a debugger is attached and requests
- // new events (single-step, breakpoint, ...) when the exception is reported.
- is_deoptimization = Dbg::IsForcedInterpreterNeededForException(this);
- if (is_deoptimization) {
+ if (exception == GetDeoptimizationException()) {
+ artDeoptimize(this);
+ UNREACHABLE();
+ }
+
+ // This is a real exception: let the instrumentation know about it.
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+ if (instrumentation->HasExceptionCaughtListeners() &&
+ IsExceptionThrownByCurrentMethod(exception)) {
+ // Instrumentation may cause GC so keep the exception object safe.
+ StackHandleScope<1> hs(this);
+ HandleWrapper<mirror::Throwable> h_exception(hs.NewHandleWrapper(&exception));
+ instrumentation->ExceptionCaughtEvent(this, exception);
+ }
+ // Does instrumentation need to deoptimize the stack?
+ // Note: we do this *after* reporting the exception to instrumentation in case it
+ // now requires deoptimization. It may happen if a debugger is attached and requests
+ // new events (single-step, breakpoint, ...) when the exception is reported.
+ if (Dbg::IsForcedInterpreterNeededForException(this)) {
+ NthCallerVisitor visitor(this, 0, false);
+ visitor.WalkStack();
+ if (Runtime::Current()->IsDeoptimizeable(visitor.caller_pc)) {
// Save the exception into the deoptimization context so it can be restored
// before entering the interpreter.
PushDeoptimizationContext(
JValue(), /*is_reference */ false, /* from_code */ false, exception);
+ artDeoptimize(this);
+ UNREACHABLE();
}
}
+
// Don't leave exception visible while we try to find the handler, which may cause class
// resolution.
ClearException();
- QuickExceptionHandler exception_handler(this, is_deoptimization);
- if (is_deoptimization) {
- exception_handler.DeoptimizeStack();
- } else {
- exception_handler.FindCatch(exception);
- }
+ QuickExceptionHandler exception_handler(this, false);
+ exception_handler.FindCatch(exception);
exception_handler.UpdateInstrumentationStack();
exception_handler.DoLongJump();
}
mirror::Throwable* pending_exception = nullptr;
bool from_code = false;
PopDeoptimizationContext(result, &pending_exception, &from_code);
- CHECK(!from_code) << "Deoptimizing from code should be done with single frame deoptimization";
SetTopOfStack(nullptr);
SetTopOfShadowStack(shadow_frame);
enum class StackedShadowFrameType {
kShadowFrameUnderConstruction,
kDeoptimizationShadowFrame,
- kSingleFrameDeoptimizationShadowFrame
};
// This should match RosAlloc::kNumThreadLocalSizeBrackets.
--- /dev/null
+JNI_OnLoad called
+Finishing
--- /dev/null
+Test various cases for full/partial-fragment deoptimization.
--- /dev/null
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.HashMap;
+
+class DummyObject {
+ public static boolean sHashCodeInvoked = false;
+ private int i;
+
+ public DummyObject(int i) {
+ this.i = i;
+ }
+
+ public boolean equals(Object obj) {
+ return (obj instanceof DummyObject) && (i == ((DummyObject)obj).i);
+ }
+
+ public int hashCode() {
+ sHashCodeInvoked = true;
+ Main.assertIsManaged();
+ Main.deoptimizeAll();
+ Main.assertIsInterpreted();
+ Main.assertCallerIsManaged(); // Caller is from framework code HashMap.
+ return i % 64;
+ }
+}
+
+public class Main {
+ static boolean sFlag = false;
+
+ public static native void deoptimizeAll();
+ public static native void undeoptimizeAll();
+ public static native void assertIsInterpreted();
+ public static native void assertIsManaged();
+ public static native void assertCallerIsInterpreted();
+ public static native void assertCallerIsManaged();
+
+ public static void execute(Runnable runnable) throws Exception {
+ Thread t = new Thread(runnable);
+ t.start();
+ t.join();
+ }
+
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
+ final HashMap<DummyObject, Long> map = new HashMap<DummyObject, Long>();
+
+ // Single-frame deoptimization that covers partial fragment.
+ execute(new Runnable() {
+ public void run() {
+ int[] arr = new int[3];
+ assertIsManaged();
+ int res = $noinline$run1(arr);
+ assertIsManaged(); // Only single frame is deoptimized.
+ if (res != 79) {
+ System.out.println("Failure 1!");
+ System.exit(0);
+ }
+ }
+ });
+
+ // Single-frame deoptimization that covers a full fragment.
+ execute(new Runnable() {
+ public void run() {
+ try {
+ int[] arr = new int[3];
+ assertIsManaged();
+ // Use reflection to call $noinline$run2 so that it does
+ // full-fragment deoptimization since that is an upcall.
+ Class<?> cls = Class.forName("Main");
+ Method method = cls.getDeclaredMethod("$noinline$run2", int[].class);
+ double res = (double)method.invoke(Main.class, arr);
+ assertIsManaged(); // Only single frame is deoptimized.
+ if (res != 79.3d) {
+ System.out.println("Failure 2!");
+ System.exit(0);
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+ });
+
+ // Full-fragment deoptimization.
+ execute(new Runnable() {
+ public void run() {
+ assertIsManaged();
+ float res = $noinline$run3B();
+ assertIsInterpreted(); // Every deoptimizeable method is deoptimized.
+ if (res != 0.034f) {
+ System.out.println("Failure 3!");
+ System.exit(0);
+ }
+ }
+ });
+
+ undeoptimizeAll(); // Make compiled code useable again.
+
+ // Partial-fragment deoptimization.
+ execute(new Runnable() {
+ public void run() {
+ try {
+ assertIsManaged();
+ map.put(new DummyObject(10), Long.valueOf(100));
+ assertIsInterpreted(); // Every deoptimizeable method is deoptimized.
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+ });
+
+ undeoptimizeAll(); // Make compiled code useable again.
+
+ if (!DummyObject.sHashCodeInvoked) {
+ System.out.println("hashCode() method not invoked!");
+ }
+ if (map.get(new DummyObject(10)) != 100) {
+ System.out.println("Wrong hashmap value!");
+ }
+ System.out.println("Finishing");
+ }
+
+ public static int $noinline$run1(int[] arr) {
+ assertIsManaged();
+ // Prevent inlining.
+ if (sFlag) {
+ throw new Error();
+ }
+ boolean caught = false;
+ // BCE will use deoptimization for the code below.
+ try {
+ arr[0] = 1;
+ arr[1] = 1;
+ arr[2] = 1;
+ // This causes AIOOBE and triggers deoptimization from compiled code.
+ arr[3] = 1;
+ } catch (ArrayIndexOutOfBoundsException e) {
+ assertIsInterpreted(); // Single-frame deoptimization triggered.
+ caught = true;
+ }
+ if (!caught) {
+ System.out.println("Expected exception");
+ }
+ assertIsInterpreted();
+ return 79;
+ }
+
+ public static double $noinline$run2(int[] arr) {
+ assertIsManaged();
+ // Prevent inlining.
+ if (sFlag) {
+ throw new Error();
+ }
+ boolean caught = false;
+ // BCE will use deoptimization for the code below.
+ try {
+ arr[0] = 1;
+ arr[1] = 1;
+ arr[2] = 1;
+ // This causes AIOOBE and triggers deoptimization from compiled code.
+ arr[3] = 1;
+ } catch (ArrayIndexOutOfBoundsException e) {
+ assertIsInterpreted(); // Single-frame deoptimization triggered.
+ caught = true;
+ }
+ if (!caught) {
+ System.out.println("Expected exception");
+ }
+ assertIsInterpreted();
+ return 79.3d;
+ }
+
+ public static float $noinline$run3A() {
+ assertIsManaged();
+ // Prevent inlining.
+ if (sFlag) {
+ throw new Error();
+ }
+ // Deoptimize callers.
+ deoptimizeAll();
+ assertIsInterpreted();
+ assertCallerIsInterpreted(); // $noinline$run3B is deoptimizeable.
+ return 0.034f;
+ }
+
+ public static float $noinline$run3B() {
+ assertIsManaged();
+ // Prevent inlining.
+ if (sFlag) {
+ throw new Error();
+ }
+ float res = $noinline$run3A();
+ assertIsInterpreted();
+ return res;
+ }
+}
asserts_enabled = false;
}
-
-// public static native boolean isInterpreted();
-
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInterpreted(JNIEnv* env, jclass) {
+static jboolean IsInterpreted(JNIEnv* env, jclass, size_t level) {
ScopedObjectAccess soa(env);
- NthCallerVisitor caller(soa.Self(), 1, false);
+ NthCallerVisitor caller(soa.Self(), level, false);
caller.WalkStack();
CHECK(caller.caller != nullptr);
return caller.GetCurrentShadowFrame() != nullptr ? JNI_TRUE : JNI_FALSE;
}
+// public static native boolean isInterpreted();
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInterpreted(JNIEnv* env, jclass klass) {
+ return IsInterpreted(env, klass, 1);
+}
+
// public static native void assertIsInterpreted();
extern "C" JNIEXPORT void JNICALL Java_Main_assertIsInterpreted(JNIEnv* env, jclass klass) {
}
}
-
-// public static native boolean isManaged();
-
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_isManaged(JNIEnv* env, jclass cls) {
+static jboolean IsManaged(JNIEnv* env, jclass cls, size_t level) {
ScopedObjectAccess soa(env);
mirror::Class* klass = soa.Decode<mirror::Class*>(cls);
return JNI_FALSE;
}
- NthCallerVisitor caller(soa.Self(), 1, false);
+ NthCallerVisitor caller(soa.Self(), level, false);
caller.WalkStack();
CHECK(caller.caller != nullptr);
return caller.GetCurrentShadowFrame() != nullptr ? JNI_FALSE : JNI_TRUE;
}
+// public static native boolean isManaged();
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isManaged(JNIEnv* env, jclass cls) {
+ return IsManaged(env, cls, 1);
+}
+
// public static native void assertIsManaged();
extern "C" JNIEXPORT void JNICALL Java_Main_assertIsManaged(JNIEnv* env, jclass cls) {
}
}
+// public static native boolean isCallerInterpreted();
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isCallerInterpreted(JNIEnv* env, jclass klass) {
+ return IsInterpreted(env, klass, 2);
+}
+
+// public static native void assertCallerIsInterpreted();
+
+extern "C" JNIEXPORT void JNICALL Java_Main_assertCallerIsInterpreted(JNIEnv* env, jclass klass) {
+ if (asserts_enabled) {
+ CHECK(Java_Main_isCallerInterpreted(env, klass));
+ }
+}
+
+// public static native boolean isCallerManaged();
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isCallerManaged(JNIEnv* env, jclass cls) {
+ return IsManaged(env, cls, 2);
+}
+
+// public static native void assertCallerIsManaged();
+
+extern "C" JNIEXPORT void JNICALL Java_Main_assertCallerIsManaged(JNIEnv* env, jclass cls) {
+ if (asserts_enabled) {
+ CHECK(Java_Main_isCallerManaged(env, cls));
+ }
+}
+
} // namespace art