Use ArtMethod* .bss entries for HInvokeStaticOrDirect.

Test: m test-art-host-gtest
Test: testrunner.py --host
Test: testrunner.py --target
Test: Nexus 6P boots.
Test: Build aosp_mips64-userdebug.
Bug: 30627598
Change-Id: I0e54fdd2e91e983d475b7a04d40815ba89ae3d4f
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index 117684a..bc21607 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -59,11 +59,11 @@
     case LinkerPatch::Type::kBakerReadBarrierBranch:
       return false;
     case LinkerPatch::Type::kMethodRelative:
+    case LinkerPatch::Type::kMethodBssEntry:
     case LinkerPatch::Type::kTypeRelative:
     case LinkerPatch::Type::kTypeBssEntry:
     case LinkerPatch::Type::kStringRelative:
     case LinkerPatch::Type::kStringBssEntry:
-    case LinkerPatch::Type::kDexCacheArray:
       return patch.LiteralOffset() == patch.PcInsnOffset();
   }
 }
@@ -251,20 +251,20 @@
       // ADD immediate, 64-bit with imm12 == 0 (unset).
       if (!kEmitCompilerReadBarrier) {
         DCHECK(patch.GetType() == LinkerPatch::Type::kMethodRelative ||
-               patch.GetType() == LinkerPatch::Type::kStringRelative ||
-               patch.GetType() == LinkerPatch::Type::kTypeRelative) << patch.GetType();
+               patch.GetType() == LinkerPatch::Type::kTypeRelative ||
+               patch.GetType() == LinkerPatch::Type::kStringRelative) << patch.GetType();
       } else {
         // With the read barrier (non-Baker) enabled, it could be kStringBssEntry or kTypeBssEntry.
         DCHECK(patch.GetType() == LinkerPatch::Type::kMethodRelative ||
-               patch.GetType() == LinkerPatch::Type::kStringRelative ||
                patch.GetType() == LinkerPatch::Type::kTypeRelative ||
-               patch.GetType() == LinkerPatch::Type::kStringBssEntry ||
-               patch.GetType() == LinkerPatch::Type::kTypeBssEntry) << patch.GetType();
+               patch.GetType() == LinkerPatch::Type::kStringRelative ||
+               patch.GetType() == LinkerPatch::Type::kTypeBssEntry ||
+               patch.GetType() == LinkerPatch::Type::kStringBssEntry) << patch.GetType();
       }
       shift = 0u;  // No shift for ADD.
     } else {
       // LDR/STR 32-bit or 64-bit with imm12 == 0 (unset).
-      DCHECK(patch.GetType() == LinkerPatch::Type::kDexCacheArray ||
+      DCHECK(patch.GetType() == LinkerPatch::Type::kMethodBssEntry ||
              patch.GetType() == LinkerPatch::Type::kTypeBssEntry ||
              patch.GetType() == LinkerPatch::Type::kStringBssEntry) << patch.GetType();
       DCHECK_EQ(insn & 0xbfbffc00, 0xb9000000) << std::hex << insn;
diff --git a/compiler/linker/method_bss_mapping_encoder.h b/compiler/linker/method_bss_mapping_encoder.h
new file mode 100644
index 0000000..b2922ec
--- /dev/null
+++ b/compiler/linker/method_bss_mapping_encoder.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_LINKER_METHOD_BSS_MAPPING_ENCODER_H_
+#define ART_COMPILER_LINKER_METHOD_BSS_MAPPING_ENCODER_H_
+
+#include "base/enums.h"
+#include "base/logging.h"
+#include "dex_file.h"
+#include "method_bss_mapping.h"
+
+namespace art {
+namespace linker {
+
+// Helper class for encoding compressed MethodBssMapping.
+class MethodBssMappingEncoder {
+ public:
+  explicit MethodBssMappingEncoder(PointerSize pointer_size)
+      : pointer_size_(static_cast<size_t>(pointer_size)) {
+    entry_.method_index = DexFile::kDexNoIndex16;
+    entry_.index_mask = 0u;
+    entry_.bss_offset = static_cast<uint32_t>(-1);
+  }
+
+  // Try to merge the next method_index -> bss_offset mapping into the current entry.
+  // Return true on success, false on failure.
+  bool TryMerge(uint32_t method_index, uint32_t bss_offset) {
+    DCHECK_NE(method_index, entry_.method_index);
+    if (entry_.bss_offset + pointer_size_ != bss_offset) {
+      return false;
+    }
+    uint32_t diff = method_index - entry_.method_index;
+    if (diff > 16u) {
+      return false;
+    }
+    if ((entry_.index_mask & ~(static_cast<uint32_t>(-1) << diff)) != 0u) {
+      return false;
+    }
+    entry_.method_index = method_index;
+    // Insert the bit indicating the method index we've just overwritten
+    // and shift bits indicating method indexes before that.
+    entry_.index_mask = dchecked_integral_cast<uint16_t>(
+        (static_cast<uint32_t>(entry_.index_mask) | 0x10000u) >> diff);
+    entry_.bss_offset = bss_offset;
+    return true;
+  }
+
+  void Reset(uint32_t method_index, uint32_t bss_offset) {
+    entry_.method_index = method_index;
+    entry_.index_mask = 0u;
+    entry_.bss_offset = bss_offset;
+  }
+
+  MethodBssMappingEntry GetEntry() {
+    return entry_;
+  }
+
+ private:
+  size_t pointer_size_;
+  MethodBssMappingEntry entry_;
+};
+
+}  // namespace linker
+}  // namespace art
+
+#endif  // ART_COMPILER_LINKER_METHOD_BSS_MAPPING_ENCODER_H_
diff --git a/compiler/linker/method_bss_mapping_encoder_test.cc b/compiler/linker/method_bss_mapping_encoder_test.cc
new file mode 100644
index 0000000..1240389
--- /dev/null
+++ b/compiler/linker/method_bss_mapping_encoder_test.cc
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "method_bss_mapping_encoder.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+namespace linker {
+
+TEST(MethodBssMappingEncoder, TryMerge) {
+  for (PointerSize pointer_size : {PointerSize::k32, PointerSize::k64}) {
+    size_t raw_pointer_size = static_cast<size_t>(pointer_size);
+    MethodBssMappingEncoder encoder(pointer_size);
+    encoder.Reset(1u, 0u);
+    ASSERT_FALSE(encoder.TryMerge(5u, raw_pointer_size + 1));       // Wrong bss_offset difference.
+    ASSERT_FALSE(encoder.TryMerge(18u, raw_pointer_size));          // Method index out of range.
+    ASSERT_TRUE(encoder.TryMerge(5u, raw_pointer_size));
+    ASSERT_TRUE(encoder.GetEntry().CoversIndex(1u));
+    ASSERT_TRUE(encoder.GetEntry().CoversIndex(5u));
+    ASSERT_FALSE(encoder.GetEntry().CoversIndex(17u));
+    ASSERT_FALSE(encoder.TryMerge(17u, 2 * raw_pointer_size + 1));  // Wrong bss_offset difference.
+    ASSERT_FALSE(encoder.TryMerge(18u, 2 * raw_pointer_size));      // Method index out of range.
+    ASSERT_TRUE(encoder.TryMerge(17u, 2 * raw_pointer_size));
+    ASSERT_TRUE(encoder.GetEntry().CoversIndex(1u));
+    ASSERT_TRUE(encoder.GetEntry().CoversIndex(5u));
+    ASSERT_TRUE(encoder.GetEntry().CoversIndex(17u));
+    ASSERT_EQ(0u, encoder.GetEntry().GetBssOffset(1u, raw_pointer_size));
+    ASSERT_EQ(raw_pointer_size, encoder.GetEntry().GetBssOffset(5u, raw_pointer_size));
+    ASSERT_EQ(2 * raw_pointer_size, encoder.GetEntry().GetBssOffset(17u, raw_pointer_size));
+    ASSERT_EQ(0x0011u, encoder.GetEntry().index_mask);
+    ASSERT_FALSE(encoder.TryMerge(18u, 2 * raw_pointer_size));      // Method index out of range.
+  }
+}
+
+}  // namespace linker
+}  // namespace art
diff --git a/compiler/linker/mips/relative_patcher_mips.cc b/compiler/linker/mips/relative_patcher_mips.cc
index 8da530f..d99d237 100644
--- a/compiler/linker/mips/relative_patcher_mips.cc
+++ b/compiler/linker/mips/relative_patcher_mips.cc
@@ -50,7 +50,6 @@
   uint32_t anchor_literal_offset = patch.PcInsnOffset();
   uint32_t literal_offset = patch.LiteralOffset();
   uint32_t literal_low_offset;
-  bool dex_cache_array = (patch.GetType() == LinkerPatch::Type::kDexCacheArray);
 
   // Perform basic sanity checks and initialize `literal_low_offset` to point
   // to the instruction containing the 16 least significant bits of the
@@ -72,16 +71,8 @@
     DCHECK_GE(code->size(), 16u);
     DCHECK_LE(literal_offset, code->size() - 12u);
     DCHECK_GE(literal_offset, 4u);
-    // The NAL instruction may not precede immediately as the PC+0 value may
-    // come from HMipsComputeBaseMethodAddress.
-    if (dex_cache_array) {
-      DCHECK_EQ(literal_offset + 4u, anchor_literal_offset);
-      // NAL
-      DCHECK_EQ((*code)[literal_offset - 4], 0x00);
-      DCHECK_EQ((*code)[literal_offset - 3], 0x00);
-      DCHECK_EQ((*code)[literal_offset - 2], 0x10);
-      DCHECK_EQ((*code)[literal_offset - 1], 0x04);
-    }
+    // The NAL instruction does not precede immediately as the PC+0
+    // comes from HMipsComputeBaseMethodAddress.
     // LUI reg, offset_high
     DCHECK_EQ((*code)[literal_offset + 0], 0x34);
     DCHECK_EQ((*code)[literal_offset + 1], 0x12);
@@ -90,10 +81,6 @@
     // ADDU reg, reg, reg2
     DCHECK_EQ((*code)[literal_offset + 4], 0x21);
     DCHECK_EQ(((*code)[literal_offset + 5] & 0x07), 0x00);
-    if (dex_cache_array) {
-      // reg2 is either RA or from HMipsComputeBaseMethodAddress.
-      DCHECK_EQ(((*code)[literal_offset + 6] & 0x1F), 0x1F);
-    }
     DCHECK_EQ(((*code)[literal_offset + 7] & 0xFC), 0x00);
     // instr reg(s), offset_low
     DCHECK_EQ((*code)[literal_offset + 8], 0x78);
@@ -104,9 +91,6 @@
   // Apply patch.
   uint32_t anchor_offset = patch_offset - literal_offset + anchor_literal_offset;
   uint32_t diff = target_offset - anchor_offset;
-  if (dex_cache_array && !is_r6) {
-    diff += kDexCacheArrayLwOffset;
-  }
   diff += (diff & 0x8000) << 1;  // Account for sign extension in "instr reg(s), offset_low".
 
   // LUI reg, offset_high / AUIPC reg, offset_high
diff --git a/compiler/linker/mips/relative_patcher_mips.h b/compiler/linker/mips/relative_patcher_mips.h
index 852a345..0b74bd3 100644
--- a/compiler/linker/mips/relative_patcher_mips.h
+++ b/compiler/linker/mips/relative_patcher_mips.h
@@ -46,9 +46,6 @@
                                    uint32_t patch_offset) OVERRIDE;
 
  private:
-  // We'll maximize the range of a single load instruction for dex cache array accesses
-  // by aligning offset -32768 with the offset of the first used element.
-  static constexpr uint32_t kDexCacheArrayLwOffset = 0x8000;
   bool is_r6;
 
   DISALLOW_COPY_AND_ASSIGN(MipsRelativePatcher);
diff --git a/compiler/linker/mips/relative_patcher_mips_test.cc b/compiler/linker/mips/relative_patcher_mips_test.cc
index 961b312..49af7c6 100644
--- a/compiler/linker/mips/relative_patcher_mips_test.cc
+++ b/compiler/linker/mips/relative_patcher_mips_test.cc
@@ -61,7 +61,6 @@
   ASSERT_TRUE(result.first);
 
   uint32_t diff = target_offset - (result.second + kAnchorOffset);
-  CHECK_NE(patches[0].GetType(), LinkerPatch::Type::kDexCacheArray);
   diff += (diff & 0x8000) << 1;  // Account for sign extension in addiu.
 
   const uint8_t expected_code[] = {