Address checker tests around inline cache and the baseline compiler.
I failed to repro b/147094528 locally, but there are known two sources
of inline cache noises introduced by the baseline compiler:
- ensureJitCompiled doesn't ensure the method is compile 'optimized'.
That's what the tests expect.
- when the GC is marking, we can potentially create duplicate entries of
the same class in the inline cache.
Bug: 147094528
Test: test.py
Change-Id: I5cd6e5874b2c7b3273a9f4c8cb0fca59263da034
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 74c1fe7..0609500 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -2735,6 +2735,9 @@
#if (INLINE_CACHE_SIZE != 5)
#error "INLINE_CACHE_SIZE not as expected."
#endif
+ // Don't update the cache if we are marking.
+ cmp rMR, #0
+ bne .Ldone
.Lentry1:
ldr ip, [r4, #INLINE_CACHE_CLASSES_OFFSET]
cmp ip, r0
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index fd1a44e..f44f3e0 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -2856,6 +2856,8 @@
#if (INLINE_CACHE_SIZE != 5)
#error "INLINE_CACHE_SIZE not as expected."
#endif
+ // Don't update the cache if we are marking.
+ cbnz wMR, .Ldone
.Lentry1:
ldr w9, [x8, #INLINE_CACHE_CLASSES_OFFSET]
cmp w9, w0
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index f212f34..ea0ff6b 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -2509,6 +2509,9 @@
#if (INLINE_CACHE_SIZE != 5)
#error "INLINE_CACHE_SIZE not as expected."
#endif
+ // Don't update the cache if we are marking.
+ cmpl LITERAL(0), %fs:THREAD_IS_GC_MARKING_OFFSET
+ jnz .Lret
PUSH ecx
movl %eax, %ecx // eax will be used for cmpxchg
.Lentry1:
@@ -2554,6 +2557,7 @@
// Restore registers
movl %ecx, %eax
POP ecx
+.Lret:
ret
END_FUNCTION art_quick_update_inline_cache
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index d6d68de..6e3f602 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -2311,6 +2311,9 @@
#if (INLINE_CACHE_SIZE != 5)
#error "INLINE_CACHE_SIZE not as expected."
#endif
+ // Don't update the cache if we are marking.
+ cmpl LITERAL(0), %gs:THREAD_IS_GC_MARKING_OFFSET
+ jnz .Ldone
.Lentry1:
movl INLINE_CACHE_CLASSES_OFFSET(%r11), %eax
cmpl %edi, %eax
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index 22dbcce..6c76288 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -272,8 +272,6 @@
while (true) {
if (native && code_cache->ContainsMethod(method)) {
break;
- } else if (code_cache->WillExecuteJitCode(method)) {
- break;
} else {
// Sleep to yield to the compiler thread.
usleep(1000);
@@ -282,8 +280,14 @@
// Make sure there is a profiling info, required by the compiler.
ProfilingInfo::Create(self, method, /* retry_allocation */ true);
}
- // Will either ensure it's compiled or do the compilation itself.
+ // Will either ensure it's compiled or do the compilation itself. We do
+ // this before checking if we will execute JIT code to make sure the
+ // method is compiled 'optimized' and not baseline (tests expect optimized
+ // compilation).
jit->CompileMethod(method, self, /*baseline=*/ false, /*osr=*/ false, /*prejit=*/ false);
+ if (code_cache->WillExecuteJitCode(method)) {
+ break;
+ }
}
}
}