summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author David Srbecky <dsrbecky@google.com> 2022-02-20 16:10:11 +0000
committer David Srbecky <dsrbecky@google.com> 2022-02-21 10:29:12 +0000
commit0992436853a4a31e4bf7f4eefdc5245ea1d7c73f (patch)
treefef7a06ecddc92e7ecbcf0828f96fac284a6438b
parent17958de62ddb522b009e5968227c650a7af6f3cb (diff)
Move GetWeakRefAccessEnabled check from nterp into the cache.
Ensure that all callers do the check. The check only applies if we have read barriers. Just by-pass the cache if that is not the case. Test: run_build_test_target.py -j80 art-gtest-ss-gc-tlab Change-Id: I9b12c694845277bf3a07719d3fd419538d07ef2c
-rw-r--r--runtime/interpreter/interpreter_cache-inl.h18
-rw-r--r--runtime/interpreter/interpreter_cache.cc4
-rw-r--r--runtime/interpreter/interpreter_cache.h6
-rw-r--r--runtime/interpreter/interpreter_common.h4
-rw-r--r--runtime/interpreter/mterp/nterp.cc7
5 files changed, 18 insertions, 21 deletions
diff --git a/runtime/interpreter/interpreter_cache-inl.h b/runtime/interpreter/interpreter_cache-inl.h
index 3a179483f1..cea8157d26 100644
--- a/runtime/interpreter/interpreter_cache-inl.h
+++ b/runtime/interpreter/interpreter_cache-inl.h
@@ -19,10 +19,12 @@
#include "interpreter_cache.h"
+#include "thread.h"
+
namespace art {
-inline bool InterpreterCache::Get(const void* key, /* out */ size_t* value) {
- DCHECK(IsCalledFromOwningThread());
+inline bool InterpreterCache::Get(Thread* self, const void* key, /* out */ size_t* value) {
+ DCHECK(self->GetInterpreterCache() == this) << "Must be called from owning thread";
Entry& entry = data_[IndexOf(key)];
if (LIKELY(entry.first == key)) {
*value = entry.second;
@@ -31,9 +33,15 @@ inline bool InterpreterCache::Get(const void* key, /* out */ size_t* value) {
return false;
}
-inline void InterpreterCache::Set(const void* key, size_t value) {
- DCHECK(IsCalledFromOwningThread());
- data_[IndexOf(key)] = Entry{key, value};
+inline void InterpreterCache::Set(Thread* self, const void* key, size_t value) {
+ DCHECK(self->GetInterpreterCache() == this) << "Must be called from owning thread";
+
+ // For simplicity, only update the cache if weak ref accesses are enabled. If
+ // they are disabled, this means the GC is processing the cache, and is
+ // reading it concurrently.
+ if (kUseReadBarrier && self->GetWeakRefAccessEnabled()) {
+ data_[IndexOf(key)] = Entry{key, value};
+ }
}
} // namespace art
diff --git a/runtime/interpreter/interpreter_cache.cc b/runtime/interpreter/interpreter_cache.cc
index e43fe318cc..450edbaa4e 100644
--- a/runtime/interpreter/interpreter_cache.cc
+++ b/runtime/interpreter/interpreter_cache.cc
@@ -25,8 +25,4 @@ void InterpreterCache::Clear(Thread* owning_thread) {
data_.fill(Entry{});
}
-bool InterpreterCache::IsCalledFromOwningThread() {
- return Thread::Current()->GetInterpreterCache() == this;
-}
-
} // namespace art
diff --git a/runtime/interpreter/interpreter_cache.h b/runtime/interpreter/interpreter_cache.h
index 99c7afe1e5..c57d0233a6 100644
--- a/runtime/interpreter/interpreter_cache.h
+++ b/runtime/interpreter/interpreter_cache.h
@@ -62,17 +62,15 @@ class ALIGNED(16) InterpreterCache {
// Clear the whole cache. It requires the owning thread for DCHECKs.
void Clear(Thread* owning_thread);
- ALWAYS_INLINE bool Get(const void* key, /* out */ size_t* value);
+ ALWAYS_INLINE bool Get(Thread* self, const void* key, /* out */ size_t* value);
- ALWAYS_INLINE void Set(const void* key, size_t value);
+ ALWAYS_INLINE void Set(Thread* self, const void* key, size_t value);
std::array<Entry, kSize>& GetArray() {
return data_;
}
private:
- bool IsCalledFromOwningThread();
-
static ALWAYS_INLINE size_t IndexOf(const void* key) {
static_assert(IsPowerOfTwo(kSize), "Size must be power of two");
size_t index = (reinterpret_cast<uintptr_t>(key) >> 2) & (kSize - 1);
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index e3c13a20d7..0b91120c58 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -240,7 +240,7 @@ static ALWAYS_INLINE bool DoInvoke(Thread* self,
InterpreterCache* tls_cache = self->GetInterpreterCache();
size_t tls_value;
ArtMethod* resolved_method;
- if (!IsNterpSupported() && LIKELY(tls_cache->Get(inst, &tls_value))) {
+ if (!IsNterpSupported() && LIKELY(tls_cache->Get(self, inst, &tls_value))) {
resolved_method = reinterpret_cast<ArtMethod*>(tls_value);
} else {
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
@@ -254,7 +254,7 @@ static ALWAYS_INLINE bool DoInvoke(Thread* self,
return false;
}
if (!IsNterpSupported()) {
- tls_cache->Set(inst, reinterpret_cast<size_t>(resolved_method));
+ tls_cache->Set(self, inst, reinterpret_cast<size_t>(resolved_method));
}
}
diff --git a/runtime/interpreter/mterp/nterp.cc b/runtime/interpreter/mterp/nterp.cc
index bd5636ca6a..c58c8a0e3b 100644
--- a/runtime/interpreter/mterp/nterp.cc
+++ b/runtime/interpreter/mterp/nterp.cc
@@ -91,12 +91,7 @@ inline void UpdateHotness(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock
template<typename T>
inline void UpdateCache(Thread* self, uint16_t* dex_pc_ptr, T value) {
DCHECK(kUseReadBarrier) << "Nterp only works with read barriers";
- // For simplicity, only update the cache if weak ref accesses are enabled. If
- // they are disabled, this means the GC is processing the cache, and is
- // reading it concurrently.
- if (self->GetWeakRefAccessEnabled()) {
- self->GetInterpreterCache()->Set(dex_pc_ptr, value);
- }
+ self->GetInterpreterCache()->Set(self, dex_pc_ptr, value);
}
template<typename T>