diff options
| author | 2014-04-03 15:17:26 +0700 | |
|---|---|---|
| committer | 2014-04-04 09:14:18 +0700 | |
| commit | 8ef84afbaeb433adaed98a11902502dd733e5222 (patch) | |
| tree | 04400f9cac5cb04391f602594674a9651cd1fba6 | |
| parent | e5893f8fb70c58fe9950c1fc8b1023e32ca34637 (diff) | |
art_quick_lock_object uses registers incorrectly
x86 implementation of art_quick_lock_object uses registers
incorrectly in the case of .Lalready_thin. eax points to object
while it is used as holding the lock word.
The patch fixes this.
Change-Id: Iacf1a40c6570fae78a5504ca3b2f1218631f7a8f
Signed-off-by: Serguei Katkov <serguei.i.katkov@intel.com>
| -rw-r--r-- | runtime/arch/x86/quick_entrypoints_x86.S | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S index 4bde8b7a8f..336a0ccf8c 100644 --- a/runtime/arch/x86/quick_entrypoints_x86.S +++ b/runtime/arch/x86/quick_entrypoints_x86.S @@ -532,12 +532,12 @@ DEFINE_FUNCTION art_quick_lock_object movl %ecx, %eax // restore eax jmp .Lretry_lock .Lalready_thin: - cmpw %ax, %dx // do we hold the lock already? + cmpw %cx, %dx // do we hold the lock already? jne .Lslow_lock - addl LITERAL(65536), %eax // increment recursion count - test LITERAL(0xC0000000), %eax // overflowed if either of top two bits are set + addl LITERAL(65536), %ecx // increment recursion count + test LITERAL(0xC0000000), %ecx // overflowed if either of top two bits are set jne .Lslow_lock // count overflowed so go slow - movl %eax, LOCK_WORD_OFFSET(%ecx) // update lockword, cmpxchg not necessary as we hold lock + movl %ecx, LOCK_WORD_OFFSET(%eax) // update lockword, cmpxchg not necessary as we hold lock ret .Lslow_lock: SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC |