Store ImtIndex in ArtMethod.

This avoids recalculation and reduces pressure on the thread local cache.

This halves the time we spend hashing from 2% to 1% (maps on device).

Test: ./art/test.py -b --host --64
Change-Id: I2407bd9c222de4ddc6eea938908a1ac6d7abc35b
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 1472490..b6adcf0 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -2671,7 +2671,7 @@
 
   DCHECK(!interface_method->IsRuntimeMethod());
   // Look whether we have a match in the ImtConflictTable.
-  uint32_t imt_index = ImTable::GetImtIndex(interface_method);
+  uint32_t imt_index = interface_method->GetImtIndex();
   ArtMethod* conflict_method = imt->Get(imt_index, kRuntimePointerSize);
   if (LIKELY(conflict_method->IsRuntimeMethod())) {
     ImtConflictTable* current_table = conflict_method->GetImtConflictTable(kRuntimePointerSize);