summaryrefslogtreecommitdiff
path: root/runtime/mirror/dex_cache.h
diff options
context:
space:
mode:
author Vladimir Marko <vmarko@google.com> 2017-03-14 14:18:46 +0000
committer Vladimir Marko <vmarko@google.com> 2017-03-14 19:03:20 +0000
commitf44d36c8423f81cbb5e9f55d8813e26ffa1a7f3b (patch)
tree324b41485ce6c414c1a006c72cbcc5ed9f466138 /runtime/mirror/dex_cache.h
parent8d6768d47b66a688d35399d524ad5a5450e9d9d4 (diff)
Revert^2 "Hash-based DexCache field array."
Test: testrunner.py --host --interpreter Bug: 30627598 This reverts commit 6374c58f2ea403b3a05fb27376110fe4d0fc8e3f. Change-Id: I275508e288a85d3aa08f7405a1a4f362af43b775
Diffstat (limited to 'runtime/mirror/dex_cache.h')
-rw-r--r--runtime/mirror/dex_cache.h121
1 files changed, 116 insertions, 5 deletions
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 057919806f..35707ef4e9 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -91,12 +91,44 @@ template <typename T> struct PACKED(8) DexCachePair {
}
};
+template <typename T> struct PACKED(2 * __SIZEOF_POINTER__) NativeDexCachePair {
+ T* object;
+ size_t index;
+ // This is similar to DexCachePair except that we're storing a native pointer
+ // instead of a GC root. See DexCachePair for the details.
+ NativeDexCachePair(T* object, uint32_t index)
+ : object(object),
+ index(index) {}
+ NativeDexCachePair() : object(nullptr), index(0u) { }
+ NativeDexCachePair(const NativeDexCachePair<T>&) = default;
+ NativeDexCachePair& operator=(const NativeDexCachePair<T>&) = default;
+
+ static void Initialize(std::atomic<NativeDexCachePair<T>>* dex_cache, PointerSize pointer_size);
+
+ static uint32_t InvalidIndexForSlot(uint32_t slot) {
+ // Since the cache size is a power of two, 0 will always map to slot 0.
+ // Use 1 for slot 0 and 0 for all other slots.
+ return (slot == 0) ? 1u : 0u;
+ }
+
+ T* GetObjectForIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (idx != index) {
+ return nullptr;
+ }
+ DCHECK(object != nullptr);
+ return object;
+ }
+};
+
using TypeDexCachePair = DexCachePair<Class>;
using TypeDexCacheType = std::atomic<TypeDexCachePair>;
using StringDexCachePair = DexCachePair<String>;
using StringDexCacheType = std::atomic<StringDexCachePair>;
+using FieldDexCachePair = NativeDexCachePair<ArtField>;
+using FieldDexCacheType = std::atomic<FieldDexCachePair>;
+
using MethodTypeDexCachePair = DexCachePair<MethodType>;
using MethodTypeDexCacheType = std::atomic<MethodTypeDexCachePair>;
@@ -116,6 +148,11 @@ class MANAGED DexCache FINAL : public Object {
static_assert(IsPowerOfTwo(kDexCacheStringCacheSize),
"String dex cache size is not a power of 2.");
+ // Size of field dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
+ static constexpr size_t kDexCacheFieldCacheSize = 1024;
+ static_assert(IsPowerOfTwo(kDexCacheFieldCacheSize),
+ "Field dex cache size is not a power of 2.");
+
// Size of method type dex cache. Needs to be a power of 2 for entrypoint assumptions
// to hold.
static constexpr size_t kDexCacheMethodTypeCacheSize = 1024;
@@ -130,6 +167,10 @@ class MANAGED DexCache FINAL : public Object {
return kDexCacheStringCacheSize;
}
+ static constexpr size_t StaticArtFieldSize() {
+ return kDexCacheFieldCacheSize;
+ }
+
static constexpr size_t StaticMethodTypeSize() {
return kDexCacheMethodTypeCacheSize;
}
@@ -255,6 +296,8 @@ class MANAGED DexCache FINAL : public Object {
// Pointer sized variant, used for patching.
ALWAYS_INLINE void SetResolvedField(uint32_t idx, ArtField* field, PointerSize ptr_size)
REQUIRES_SHARED(Locks::mutator_lock_);
+ ALWAYS_INLINE void ClearResolvedField(uint32_t idx, PointerSize ptr_size)
+ REQUIRES_SHARED(Locks::mutator_lock_);
MethodType* GetResolvedMethodType(uint32_t proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -299,11 +342,11 @@ class MANAGED DexCache FINAL : public Object {
SetFieldPtr<false>(ResolvedMethodsOffset(), resolved_methods);
}
- ArtField** GetResolvedFields() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetFieldPtr<ArtField**>(ResolvedFieldsOffset());
+ FieldDexCacheType* GetResolvedFields() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
+ return GetFieldPtr<FieldDexCacheType*>(ResolvedFieldsOffset());
}
- void SetResolvedFields(ArtField** resolved_fields)
+ void SetResolvedFields(FieldDexCacheType* resolved_fields)
ALWAYS_INLINE
REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldPtr<false>(ResolvedFieldsOffset(), resolved_fields);
@@ -376,6 +419,17 @@ class MANAGED DexCache FINAL : public Object {
template <typename PtrType>
static void SetElementPtrSize(PtrType* ptr_array, size_t idx, PtrType ptr, PointerSize ptr_size);
+ template <typename T>
+ static NativeDexCachePair<T> GetNativePairPtrSize(std::atomic<NativeDexCachePair<T>>* pair_array,
+ size_t idx,
+ PointerSize ptr_size);
+
+ template <typename T>
+ static void SetNativePairPtrSize(std::atomic<NativeDexCachePair<T>>* pair_array,
+ size_t idx,
+ NativeDexCachePair<T> pair,
+ PointerSize ptr_size);
+
private:
void Init(const DexFile* dex_file,
ObjPtr<String> location,
@@ -385,7 +439,7 @@ class MANAGED DexCache FINAL : public Object {
uint32_t num_resolved_types,
ArtMethod** resolved_methods,
uint32_t num_resolved_methods,
- ArtField** resolved_fields,
+ FieldDexCacheType* resolved_fields,
uint32_t num_resolved_fields,
MethodTypeDexCacheType* resolved_method_types,
uint32_t num_resolved_method_types,
@@ -394,8 +448,22 @@ class MANAGED DexCache FINAL : public Object {
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // std::pair<> is not trivially copyable and as such it is unsuitable for atomic operations,
+ // so we use a custom pair class for loading and storing the NativeDexCachePair<>.
+ template <typename IntType>
+ struct PACKED(2 * sizeof(IntType)) ConversionPair {
+ ConversionPair(IntType f, IntType s) : first(f), second(s) { }
+ ConversionPair(const ConversionPair&) = default;
+ ConversionPair& operator=(const ConversionPair&) = default;
+ IntType first;
+ IntType second;
+ };
+ using ConversionPair32 = ConversionPair<uint32_t>;
+ using ConversionPair64 = ConversionPair<uint64_t>;
+
uint32_t StringSlotIndex(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t TypeSlotIndex(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+ uint32_t FieldSlotIndex(uint32_t field_idx) REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t MethodTypeSlotIndex(uint32_t proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
// Visit instance fields of the dex cache as well as its associated arrays.
@@ -406,12 +474,55 @@ class MANAGED DexCache FINAL : public Object {
void VisitReferences(ObjPtr<Class> klass, const Visitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
+ // Due to lack of 16-byte atomics support, we use hand-crafted routines.
+#if defined(__aarch64__)
+ // 16-byte atomics are supported on aarch64.
+ ALWAYS_INLINE static ConversionPair64 AtomicLoadRelaxed16B(
+ std::atomic<ConversionPair64>* target) {
+ return target->load(std::memory_order_relaxed);
+ }
+
+ ALWAYS_INLINE static void AtomicStoreRelease16B(
+ std::atomic<ConversionPair64>* target, ConversionPair64 value) {
+ target->store(value, std::memory_order_release);
+ }
+#elif defined(__x86_64__)
+ ALWAYS_INLINE static ConversionPair64 AtomicLoadRelaxed16B(
+ std::atomic<ConversionPair64>* target) {
+ uint64_t first, second;
+ __asm__ __volatile__(
+ "lock cmpxchg16b (%2)"
+ : "=&a"(first), "=&d"(second)
+ : "r"(target), "a"(0), "d"(0), "b"(0), "c"(0)
+ : "cc");
+ return ConversionPair64(first, second);
+ }
+
+ ALWAYS_INLINE static void AtomicStoreRelease16B(
+ std::atomic<ConversionPair64>* target, ConversionPair64 value) {
+ uint64_t first, second;
+ __asm__ __volatile__ (
+ "movq (%2), %%rax\n\t"
+ "movq 8(%2), %%rdx\n\t"
+ "1:\n\t"
+ "lock cmpxchg16b (%2)\n\t"
+ "jnz 1b"
+ : "=&a"(first), "=&d"(second)
+ : "r"(target), "b"(value.first), "c"(value.second)
+ : "cc");
+ }
+#else
+ static ConversionPair64 AtomicLoadRelaxed16B(std::atomic<ConversionPair64>* target);
+ static void AtomicStoreRelease16B(std::atomic<ConversionPair64>* target, ConversionPair64 value);
+#endif
+
HeapReference<Object> dex_;
HeapReference<String> location_;
uint64_t dex_file_; // const DexFile*
uint64_t resolved_call_sites_; // GcRoot<CallSite>* array with num_resolved_call_sites_
// elements.
- uint64_t resolved_fields_; // ArtField*, array with num_resolved_fields_ elements.
+ uint64_t resolved_fields_; // std::atomic<FieldDexCachePair>*, array with
+ // num_resolved_fields_ elements.
uint64_t resolved_method_types_; // std::atomic<MethodTypeDexCachePair>* array with
// num_resolved_method_types_ elements.
uint64_t resolved_methods_; // ArtMethod*, array with num_resolved_methods_ elements.