diff options
| author | 2016-11-29 21:32:11 +0000 | |
|---|---|---|
| committer | 2016-11-29 21:32:12 +0000 | |
| commit | ab191538a1d9eee6ec96bc3fa86dde36a007a6f5 (patch) | |
| tree | 64c4ffaf96a8cf0aeb27bf8cbbbd5cfa42d1ff40 /runtime/thread.cc | |
| parent | 8fb28dcf0e83f7153e76e176671cd4ad1f20205b (diff) | |
| parent | f5de23265360e15fcfceb7d07bdadca0e5bb5f0a (diff) | |
Merge "X86_64: Add allocation entrypoint switching for CC is_marking"
Diffstat (limited to 'runtime/thread.cc')
| -rw-r--r-- | runtime/thread.cc | 14 |
1 files changed, 10 insertions, 4 deletions
diff --git a/runtime/thread.cc b/runtime/thread.cc index 65c86815b5..c92e38b6e8 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -122,21 +122,27 @@ void Thread::SetIsGcMarkingAndUpdateEntrypoints(bool is_marking) { CHECK(kUseReadBarrier); tls32_.is_gc_marking = is_marking; UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, is_marking); + if (kRuntimeISA == kX86_64) { + // Entrypoint switching is only implemented for X86_64. + ResetQuickAllocEntryPointsForThread(is_marking); + } } void Thread::InitTlsEntryPoints() { // Insert a placeholder so we can easily tell if we call an unimplemented entry point. uintptr_t* begin = reinterpret_cast<uintptr_t*>(&tlsPtr_.jni_entrypoints); - uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(&tlsPtr_.quick_entrypoints) + - sizeof(tlsPtr_.quick_entrypoints)); + uintptr_t* end = reinterpret_cast<uintptr_t*>( + reinterpret_cast<uint8_t*>(&tlsPtr_.quick_entrypoints) + sizeof(tlsPtr_.quick_entrypoints)); for (uintptr_t* it = begin; it != end; ++it) { *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint); } InitEntryPoints(&tlsPtr_.jni_entrypoints, &tlsPtr_.quick_entrypoints); } -void Thread::ResetQuickAllocEntryPointsForThread() { - ResetQuickAllocEntryPoints(&tlsPtr_.quick_entrypoints); +void Thread::ResetQuickAllocEntryPointsForThread(bool is_marking) { + // Entrypoint switching is currnetly only faster for X86_64 since other archs don't have TLAB + // fast path for non region space entrypoints. + ResetQuickAllocEntryPoints(&tlsPtr_.quick_entrypoints, is_marking); } class DeoptimizationContextRecord { |