diff options
author | 2021-12-01 17:09:08 +0000 | |
---|---|---|
committer | 2022-01-15 22:04:52 +0000 | |
commit | fa40e6e318b21d4a1885a6ffea6efc3c0b5cc1cd (patch) | |
tree | ea74f6947c5c6caaaf7a6fb3ece3369aec3a62c3 /runtime/mirror/dex_cache-inl.h | |
parent | c5f41cd1a39472dbe2480c09a1ce20aeced2a9f6 (diff) |
Add thread-shared interpreter cache
The thread-local interpreter cache handles around 75% of method/field
lookups from the interpreter.
Add thread-shared interpreter cache which can be much bigger (since
we pay the memory just once rather then per thread). This increases
the cache hit rate to 90%.
This effectively halves the amount of time we spend in
'NterpGetMethod' (including DexCache lookups), which is the single
most expensive method during startup.
Furthermore, it also reduces the amount of time we spend resolving
methods by around 25% since DexCache entries get evicted less often.
The shared cache increases memory use by 256k per process, so also
reduce the fixed-size DexCache fields array, which balances it back.
Test: test.py --host
Change-Id: I3cd369613d47de117ab69d5bee00d4cf89b87913
Diffstat (limited to 'runtime/mirror/dex_cache-inl.h')
-rw-r--r-- | runtime/mirror/dex_cache-inl.h | 26 |
1 files changed, 6 insertions, 20 deletions
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h index 31f2bd2d7b..8a1ed71197 100644 --- a/runtime/mirror/dex_cache-inl.h +++ b/runtime/mirror/dex_cache-inl.h @@ -357,32 +357,18 @@ inline void DexCache::SetResolvedMethod(uint32_t method_idx, ArtMethod* method) template <typename T> NativeDexCachePair<T> DexCache::GetNativePair(std::atomic<NativeDexCachePair<T>>* pair_array, size_t idx) { - if (kRuntimePointerSize == PointerSize::k64) { - auto* array = reinterpret_cast<std::atomic<ConversionPair64>*>(pair_array); - ConversionPair64 value = AtomicLoadRelaxed16B(&array[idx]); - return NativeDexCachePair<T>(reinterpret_cast64<T*>(value.first), - dchecked_integral_cast<size_t>(value.second)); - } else { - auto* array = reinterpret_cast<std::atomic<ConversionPair32>*>(pair_array); - ConversionPair32 value = array[idx].load(std::memory_order_relaxed); - return NativeDexCachePair<T>(reinterpret_cast32<T*>(value.first), value.second); - } + auto* array = reinterpret_cast<std::atomic<AtomicPair<size_t>>*>(pair_array); + AtomicPair<size_t> value = AtomicPairLoadAcquire(&array[idx]); + return NativeDexCachePair<T>(reinterpret_cast<T*>(value.first), value.second); } template <typename T> void DexCache::SetNativePair(std::atomic<NativeDexCachePair<T>>* pair_array, size_t idx, NativeDexCachePair<T> pair) { - if (kRuntimePointerSize == PointerSize::k64) { - auto* array = reinterpret_cast<std::atomic<ConversionPair64>*>(pair_array); - ConversionPair64 v(reinterpret_cast64<uint64_t>(pair.object), pair.index); - AtomicStoreRelease16B(&array[idx], v); - } else { - auto* array = reinterpret_cast<std::atomic<ConversionPair32>*>(pair_array); - ConversionPair32 v(reinterpret_cast32<uint32_t>(pair.object), - dchecked_integral_cast<uint32_t>(pair.index)); - array[idx].store(v, std::memory_order_release); - } + auto* array = reinterpret_cast<std::atomic<AtomicPair<size_t>>*>(pair_array); + AtomicPair<size_t> v(reinterpret_cast<size_t>(pair.object), pair.index); + AtomicPairStoreRelease(&array[idx], v); } template <typename T, |