From 8ef84afbaeb433adaed98a11902502dd733e5222 Mon Sep 17 00:00:00 2001 From: Serguei Katkov Date: Thu, 3 Apr 2014 15:17:26 +0700 Subject: [PATCH] art_quick_lock_object uses registers incorrectly x86 implementation of art_quick_lock_object uses registers incorrectly in the case of .Lalready_thin. eax points to object while it is used as holding the lock word. The patch fixes this. Change-Id: Iacf1a40c6570fae78a5504ca3b2f1218631f7a8f Signed-off-by: Serguei Katkov --- runtime/arch/x86/quick_entrypoints_x86.S | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S index 4bde8b7a8..336a0ccf8 100644 --- a/runtime/arch/x86/quick_entrypoints_x86.S +++ b/runtime/arch/x86/quick_entrypoints_x86.S @@ -532,12 +532,12 @@ DEFINE_FUNCTION art_quick_lock_object movl %ecx, %eax // restore eax jmp .Lretry_lock .Lalready_thin: - cmpw %ax, %dx // do we hold the lock already? + cmpw %cx, %dx // do we hold the lock already? jne .Lslow_lock - addl LITERAL(65536), %eax // increment recursion count - test LITERAL(0xC0000000), %eax // overflowed if either of top two bits are set + addl LITERAL(65536), %ecx // increment recursion count + test LITERAL(0xC0000000), %ecx // overflowed if either of top two bits are set jne .Lslow_lock // count overflowed so go slow - movl %eax, LOCK_WORD_OFFSET(%ecx) // update lockword, cmpxchg not necessary as we hold lock + movl %ecx, LOCK_WORD_OFFSET(%eax) // update lockword, cmpxchg not necessary as we hold lock ret .Lslow_lock: SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC -- 2.11.0