Refactor GetIMTIndex
This allows us to more easily maintain and experiment with
interface method table indexing and hashing.
Change-Id: I719920fae7490dcedcda7c1c36db225c2b8b16df
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 4fc3b54..eca9e2c 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1901,7 +1901,7 @@
__ LoadFromOffset(kLoadWord, temp, temp,
mirror::Class::ImtPtrOffset(kArmPointerSize).Uint32Value());
uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
- invoke->GetImtIndex() % ImTable::kSize, kArmPointerSize));
+ invoke->GetImtIndex(), kArmPointerSize));
// temp = temp->GetImtEntryAt(method_offset);
__ LoadFromOffset(kLoadWord, temp, temp, method_offset);
uint32_t entry_point =
@@ -6783,7 +6783,7 @@
locations->InAt(0).AsRegister<Register>(),
mirror::Class::ImtPtrOffset(kArmPointerSize).Uint32Value());
method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
- instruction->GetIndex() % ImTable::kSize, kArmPointerSize));
+ instruction->GetIndex(), kArmPointerSize));
}
__ LoadFromOffset(kLoadWord,
locations->Out().AsRegister<Register>(),
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index b63a3d4..5d3c8c5 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -3522,7 +3522,7 @@
__ Ldr(temp,
MemOperand(temp, mirror::Class::ImtPtrOffset(kArm64PointerSize).Uint32Value()));
uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
- invoke->GetImtIndex() % ImTable::kSize, kArm64PointerSize));
+ invoke->GetImtIndex(), kArm64PointerSize));
// temp = temp->GetImtEntryAt(method_offset);
__ Ldr(temp, MemOperand(temp, method_offset));
// lr = temp->GetEntryPoint();
@@ -5153,7 +5153,7 @@
__ Ldr(XRegisterFrom(locations->Out()), MemOperand(XRegisterFrom(locations->InAt(0)),
mirror::Class::ImtPtrOffset(kArm64PointerSize).Uint32Value()));
method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
- instruction->GetIndex() % ImTable::kSize, kArm64PointerSize));
+ instruction->GetIndex(), kArm64PointerSize));
}
__ Ldr(XRegisterFrom(locations->Out()),
MemOperand(XRegisterFrom(locations->InAt(0)), method_offset));
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index c8e927d..d5bad28 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -3720,7 +3720,7 @@
__ LoadFromOffset(kLoadWord, temp, temp,
mirror::Class::ImtPtrOffset(kMipsPointerSize).Uint32Value());
uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
- invoke->GetImtIndex() % ImTable::kSize, kMipsPointerSize));
+ invoke->GetImtIndex(), kMipsPointerSize));
// temp = temp->GetImtEntryAt(method_offset);
__ LoadFromOffset(kLoadWord, temp, temp, method_offset);
// T9 = temp->GetEntryPoint();
@@ -5169,7 +5169,7 @@
locations->InAt(0).AsRegister<Register>(),
mirror::Class::ImtPtrOffset(kMipsPointerSize).Uint32Value());
method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
- instruction->GetIndex() % ImTable::kSize, kMipsPointerSize));
+ instruction->GetIndex(), kMipsPointerSize));
}
__ LoadFromOffset(kLoadWord,
locations->Out().AsRegister<Register>(),
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 8d5dc84..539abf1 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -2954,7 +2954,7 @@
__ LoadFromOffset(kLoadDoubleword, temp, temp,
mirror::Class::ImtPtrOffset(kMips64PointerSize).Uint32Value());
uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
- invoke->GetImtIndex() % ImTable::kSize, kMips64PointerSize));
+ invoke->GetImtIndex(), kMips64PointerSize));
// temp = temp->GetImtEntryAt(method_offset);
__ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
// T9 = temp->GetEntryPoint();
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 9d0092b..a21c295 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -2043,7 +2043,7 @@
Address(temp, mirror::Class::ImtPtrOffset(kX86PointerSize).Uint32Value()));
// temp = temp->GetImtEntryAt(method_offset);
uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
- invoke->GetImtIndex() % ImTable::kSize, kX86PointerSize));
+ invoke->GetImtIndex(), kX86PointerSize));
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
__ call(Address(temp,
@@ -4068,7 +4068,7 @@
mirror::Class::ImtPtrOffset(kX86PointerSize).Uint32Value()));
// temp = temp->GetImtEntryAt(method_offset);
method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
- instruction->GetIndex() % ImTable::kSize, kX86PointerSize));
+ instruction->GetIndex(), kX86PointerSize));
}
__ movl(locations->Out().AsRegister<Register>(),
Address(locations->InAt(0).AsRegister<Register>(), method_offset));
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index a8da5f2..135f0c4 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -2258,7 +2258,7 @@
Address(temp, mirror::Class::ImtPtrOffset(kX86_64PointerSize).Uint32Value()));
// temp = temp->GetImtEntryAt(method_offset);
uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
- invoke->GetImtIndex() % ImTable::kSize, kX86_64PointerSize));
+ invoke->GetImtIndex(), kX86_64PointerSize));
// temp = temp->GetImtEntryAt(method_offset);
__ movq(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
@@ -3986,7 +3986,7 @@
Address(locations->InAt(0).AsRegister<CpuRegister>(),
mirror::Class::ImtPtrOffset(kX86_64PointerSize).Uint32Value()));
method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
- instruction->GetIndex() % ImTable::kSize, kX86_64PointerSize));
+ instruction->GetIndex(), kX86_64PointerSize));
}
__ movq(locations->Out().AsRegister<CpuRegister>(),
Address(locations->InAt(0).AsRegister<CpuRegister>(), method_offset));
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 27b6896..d5e80b4 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -657,7 +657,7 @@
ArtMethod* new_method = nullptr;
if (invoke_instruction->IsInvokeInterface()) {
new_method = ic.GetTypeAt(i)->GetImt(pointer_size)->Get(
- method_index % ImTable::kSize, pointer_size);
+ method_index, pointer_size);
if (new_method->IsRuntimeMethod()) {
// Bail out as soon as we see a conflict trampoline in one of the target's
// interface table.
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index f2286e4..1c67bcc 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -16,6 +16,7 @@
#include "instruction_builder.h"
+#include "art_method-inl.h"
#include "bytecode_utils.h"
#include "class_linker.h"
#include "driver/compiler_options.h"
@@ -890,7 +891,7 @@
return_type,
dex_pc,
method_idx,
- resolved_method->GetDexMethodIndex());
+ resolved_method->GetImtIndex());
}
return HandleInvoke(invoke,
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 7647ad6..9f63bca 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -120,6 +120,10 @@
return dex_method_index_;
}
+inline uint32_t ArtMethod::GetImtIndex() {
+ return GetDexMethodIndex() % ImTable::kSize;
+}
+
inline ArtMethod** ArtMethod::GetDexCacheResolvedMethods(size_t pointer_size) {
return GetNativePointer<ArtMethod**>(DexCacheResolvedMethodsOffset(pointer_size),
pointer_size);
diff --git a/runtime/art_method.h b/runtime/art_method.h
index b65cb23..2c40d6d 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -418,6 +418,8 @@
ALWAYS_INLINE uint32_t GetDexMethodIndex() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t GetImtIndex() SHARED_REQUIRES(Locks::mutator_lock_);
+
void SetDexMethodIndex(uint32_t new_idx) {
// Not called within a transaction.
dex_method_index_ = new_idx;
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 4406c0a..4259aea 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -6167,11 +6167,6 @@
}
}
-static inline uint32_t GetIMTIndex(ArtMethod* interface_method)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- return interface_method->GetDexMethodIndex() % ImTable::kSize;
-}
-
ImtConflictTable* ClassLinker::CreateImtConflictTable(size_t count,
LinearAlloc* linear_alloc,
size_t image_pointer_size) {
@@ -6223,7 +6218,7 @@
// or interface methods in the IMT here they will not create extra conflicts since we compare
// names and signatures in SetIMTRef.
ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_);
- const uint32_t imt_index = GetIMTIndex(interface_method);
+ const uint32_t imt_index = interface_method->GetImtIndex();
// There is only any conflicts if all of the interface methods for an IMT slot don't have
// the same implementation method, keep track of this to avoid creating a conflict table in
@@ -6277,7 +6272,7 @@
}
DCHECK(implementation_method != nullptr);
ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_);
- const uint32_t imt_index = GetIMTIndex(interface_method);
+ const uint32_t imt_index = interface_method->GetImtIndex();
if (!imt[imt_index]->IsRuntimeMethod() ||
imt[imt_index] == unimplemented_method ||
imt[imt_index] == imt_conflict_method) {
@@ -6683,7 +6678,7 @@
auto* interface_method = iftable->GetInterface(i)->GetVirtualMethod(j, image_pointer_size_);
MethodNameAndSignatureComparator interface_name_comparator(
interface_method->GetInterfaceMethodIfProxy(image_pointer_size_));
- uint32_t imt_index = GetIMTIndex(interface_method);
+ uint32_t imt_index = interface_method->GetImtIndex();
ArtMethod** imt_ptr = &out_imt[imt_index];
// For each method listed in the interface's method list, find the
// matching method in our class's method list. We want to favor the
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 916ca29..db3f88f 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -19,7 +19,7 @@
#include "entrypoint_utils.h"
-#include "art_method.h"
+#include "art_method-inl.h"
#include "class_linker-inl.h"
#include "common_throws.h"
#include "dex_file.h"
@@ -559,7 +559,7 @@
}
}
case kInterface: {
- uint32_t imt_index = resolved_method->GetDexMethodIndex() % ImTable::kSize;
+ uint32_t imt_index = resolved_method->GetImtIndex();
size_t pointer_size = class_linker->GetImagePointerSize();
ArtMethod* imt_method = (*this_object)->GetClass()->GetImt(pointer_size)->
Get(imt_index, pointer_size);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 7175d54..0a70be1 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -2174,8 +2174,7 @@
if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) {
// If the dex cache already resolved the interface method, look whether we have
// a match in the ImtConflictTable.
- uint32_t imt_index = interface_method->GetDexMethodIndex();
- ArtMethod* conflict_method = imt->Get(imt_index % ImTable::kSize, sizeof(void*));
+ ArtMethod* conflict_method = imt->Get(interface_method->GetImtIndex(), sizeof(void*));
if (LIKELY(conflict_method->IsRuntimeMethod())) {
ImtConflictTable* current_table = conflict_method->GetImtConflictTable(sizeof(void*));
DCHECK(current_table != nullptr);
@@ -2226,8 +2225,8 @@
// We arrive here if we have found an implementation, and it is not in the ImtConflictTable.
// We create a new table with the new pair { interface_method, method }.
- uint32_t imt_index = interface_method->GetDexMethodIndex();
- ArtMethod* conflict_method = imt->Get(imt_index % ImTable::kSize, sizeof(void*));
+ uint32_t imt_index = interface_method->GetImtIndex();
+ ArtMethod* conflict_method = imt->Get(imt_index, sizeof(void*));
if (conflict_method->IsRuntimeMethod()) {
ArtMethod* new_conflict_method = Runtime::Current()->GetClassLinker()->AddMethodToConflictTable(
cls.Get(),
@@ -2238,7 +2237,7 @@
if (new_conflict_method != conflict_method) {
// Update the IMT if we create a new conflict method. No fence needed here, as the
// data is consistent.
- imt->Set(imt_index % ImTable::kSize,
+ imt->Set(imt_index,
new_conflict_method,
sizeof(void*));
}