Optimizing/ARM: Implement kDexCachePcRelative dispatch.
Change-Id: I0fe2da50a30a3f62bec8ea01688dd1fec84b1831
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index a98d9c6..76bf951 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -724,7 +724,9 @@
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
call_patches_(MethodReferenceComparator(),
graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ dex_cache_arrays_base_labels_(std::less<HArmDexCacheArraysBase*>(),
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
// Always save the LR register to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(LR));
}
@@ -1922,10 +1924,18 @@
codegen_->GetAssembler(),
codegen_->GetInstructionSetFeatures());
if (intrinsic.TryDispatch(invoke)) {
+ if (invoke->GetLocations()->CanCall() && invoke->HasPcRelativeDexCache()) {
+ invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::Any());
+ }
return;
}
HandleInvoke(invoke);
+
+ // For PC-relative dex cache the invoke has an extra input, the PC-relative address base.
+ if (invoke->HasPcRelativeDexCache()) {
+ invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::RequiresRegister());
+ }
}
static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM* codegen) {
@@ -5818,16 +5828,6 @@
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARM::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
MethodReference target_method) {
- if (desired_dispatch_info.method_load_kind ==
- HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative) {
- // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
- return HInvokeStaticOrDirect::DispatchInfo {
- HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
- HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
- 0u,
- 0u
- };
- }
if (desired_dispatch_info.code_ptr_location ==
HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative) {
const DexFile& outer_dex_file = GetGraph()->GetDexFile();
@@ -5850,6 +5850,32 @@
return desired_dispatch_info;
}
+Register CodeGeneratorARM::GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke,
+ Register temp) {
+ DCHECK_EQ(invoke->InputCount(), invoke->GetNumberOfArguments() + 1u);
+ Location location = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
+ if (!invoke->GetLocations()->Intrinsified()) {
+ return location.AsRegister<Register>();
+ }
+ // For intrinsics we allow any location, so it may be on the stack.
+ if (!location.IsRegister()) {
+ __ LoadFromOffset(kLoadWord, temp, SP, location.GetStackIndex());
+ return temp;
+ }
+ // For register locations, check if the register was saved. If so, get it from the stack.
+ // Note: There is a chance that the register was saved but not overwritten, so we could
+ // save one load. However, since this is just an intrinsic slow path we prefer this
+ // simple and more robust approach rather that trying to determine if that's the case.
+ SlowPathCode* slow_path = GetCurrentSlowPath();
+ DCHECK(slow_path != nullptr); // For intrinsified invokes the call is emitted on the slow path.
+ if (slow_path->IsCoreRegisterSaved(location.AsRegister<Register>())) {
+ int stack_offset = slow_path->GetStackOffsetOfCoreRegister(location.AsRegister<Register>());
+ __ LoadFromOffset(kLoadWord, temp, SP, stack_offset);
+ return temp;
+ }
+ return location.AsRegister<Register>();
+}
+
void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
// For better instruction scheduling we load the direct code pointer before the method pointer.
switch (invoke->GetCodePtrLocation()) {
@@ -5881,11 +5907,15 @@
__ LoadLiteral(temp.AsRegister<Register>(),
DeduplicateMethodAddressLiteral(invoke->GetTargetMethod()));
break;
- case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
- // TODO: Implement this type.
- // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
- LOG(FATAL) << "Unsupported";
- UNREACHABLE();
+ case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
+ HArmDexCacheArraysBase* base =
+ invoke->InputAt(invoke->GetSpecialInputIndex())->AsArmDexCacheArraysBase();
+ Register base_reg = GetInvokeStaticOrDirectExtraParameter(invoke,
+ temp.AsRegister<Register>());
+ int32_t offset = invoke->GetDexCacheArrayOffset() - base->GetElementOffset();
+ __ LoadFromOffset(kLoadWord, temp.AsRegister<Register>(), base_reg, offset);
+ break;
+ }
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
Location current_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
Register method_reg;
@@ -5970,7 +6000,11 @@
void CodeGeneratorARM::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
DCHECK(linker_patches->empty());
- size_t size = method_patches_.size() + call_patches_.size() + relative_call_patches_.size();
+ size_t size =
+ method_patches_.size() +
+ call_patches_.size() +
+ relative_call_patches_.size() +
+ /* MOVW+MOVT for each base */ 2u * dex_cache_arrays_base_labels_.size();
linker_patches->reserve(size);
for (const auto& entry : method_patches_) {
const MethodReference& target_method = entry.first;
@@ -5996,6 +6030,28 @@
info.target_method.dex_file,
info.target_method.dex_method_index));
}
+ for (const auto& pair : dex_cache_arrays_base_labels_) {
+ HArmDexCacheArraysBase* base = pair.first;
+ const DexCacheArraysBaseLabels* labels = &pair.second;
+ const DexFile& dex_file = base->GetDexFile();
+ size_t base_element_offset = base->GetElementOffset();
+ DCHECK(labels->add_pc_label.IsBound());
+ uint32_t add_pc_offset = dchecked_integral_cast<uint32_t>(labels->add_pc_label.Position());
+ // Add MOVW patch.
+ DCHECK(labels->movw_label.IsBound());
+ uint32_t movw_offset = dchecked_integral_cast<uint32_t>(labels->movw_label.Position());
+ linker_patches->push_back(LinkerPatch::DexCacheArrayPatch(movw_offset,
+ &dex_file,
+ add_pc_offset,
+ base_element_offset));
+ // Add MOVT patch.
+ DCHECK(labels->movt_label.IsBound());
+ uint32_t movt_offset = dchecked_integral_cast<uint32_t>(labels->movt_label.Position());
+ linker_patches->push_back(LinkerPatch::DexCacheArrayPatch(movt_offset,
+ &dex_file,
+ add_pc_offset,
+ base_element_offset));
+ }
}
Literal* CodeGeneratorARM::DeduplicateMethodLiteral(MethodReference target_method,
@@ -6107,6 +6163,23 @@
}
}
+void LocationsBuilderARM::VisitArmDexCacheArraysBase(HArmDexCacheArraysBase* base) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(base);
+ locations->SetOut(Location::RequiresRegister());
+ codegen_->AddDexCacheArraysBase(base);
+}
+
+void InstructionCodeGeneratorARM::VisitArmDexCacheArraysBase(HArmDexCacheArraysBase* base) {
+ Register base_reg = base->GetLocations()->Out().AsRegister<Register>();
+ CodeGeneratorARM::DexCacheArraysBaseLabels* labels = codegen_->GetDexCacheArraysBaseLabels(base);
+ __ BindTrackedLabel(&labels->movw_label);
+ __ movw(base_reg, 0u);
+ __ BindTrackedLabel(&labels->movt_label);
+ __ movt(base_reg, 0u);
+ __ BindTrackedLabel(&labels->add_pc_label);
+ __ add(base_reg, base_reg, ShifterOperand(PC));
+}
+
void CodeGeneratorARM::MoveFromReturnRegister(Location trg, Primitive::Type type) {
if (!trg.IsValid()) {
DCHECK(type == Primitive::kPrimVoid);
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 89de4f8..193add2 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -373,6 +373,31 @@
void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE;
+ // The PC-relative base address is loaded with three instructions, MOVW+MOVT
+ // to load the offset to base_reg and then ADD base_reg, PC. The offset is
+ // calculated from the ADD's effective PC, i.e. PC+4 on Thumb2. Though we
+ // currently emit these 3 instructions together, instruction scheduling could
+ // split this sequence apart, so we keep separate labels for each of them.
+ struct DexCacheArraysBaseLabels {
+ DexCacheArraysBaseLabels() = default;
+ DexCacheArraysBaseLabels(DexCacheArraysBaseLabels&& other) = default;
+
+ Label movw_label;
+ Label movt_label;
+ Label add_pc_label;
+ };
+
+ void AddDexCacheArraysBase(HArmDexCacheArraysBase* base) {
+ DexCacheArraysBaseLabels labels;
+ dex_cache_arrays_base_labels_.Put(base, std::move(labels));
+ }
+
+ DexCacheArraysBaseLabels* GetDexCacheArraysBaseLabels(HArmDexCacheArraysBase* base) {
+ auto it = dex_cache_arrays_base_labels_.find(base);
+ DCHECK(it != dex_cache_arrays_base_labels_.end());
+ return &it->second;
+ }
+
// Generate a read barrier for a heap reference within `instruction`.
//
// A read barrier for an object reference read from the heap is
@@ -419,7 +444,12 @@
void GenerateReadBarrierForRoot(HInstruction* instruction, Location out, Location root);
private:
+ Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, Register temp);
+
using MethodToLiteralMap = ArenaSafeMap<MethodReference, Literal*, MethodReferenceComparator>;
+ using DexCacheArraysBaseToLabelsMap = ArenaSafeMap<HArmDexCacheArraysBase*,
+ DexCacheArraysBaseLabels,
+ std::less<HArmDexCacheArraysBase*>>;
Literal* DeduplicateMethodLiteral(MethodReference target_method, MethodToLiteralMap* map);
Literal* DeduplicateMethodAddressLiteral(MethodReference target_method);
@@ -441,6 +471,8 @@
// Using ArenaDeque<> which retains element addresses on push/emplace_back().
ArenaDeque<MethodPatchInfo<Label>> relative_call_patches_;
+ DexCacheArraysBaseToLabelsMap dex_cache_arrays_base_labels_;
+
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARM);
};
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 1fc09a8..a0d31da 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1929,8 +1929,7 @@
// For PC-relative dex cache the invoke has an extra input, the PC-relative address base.
if (invoke->HasPcRelativeDexCache()) {
- invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(),
- Location::RequiresRegister());
+ invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::RequiresRegister());
}
if (codegen_->IsBaseline()) {
diff --git a/compiler/optimizing/dex_cache_array_fixups_arm.cc b/compiler/optimizing/dex_cache_array_fixups_arm.cc
new file mode 100644
index 0000000..7f5f0e5
--- /dev/null
+++ b/compiler/optimizing/dex_cache_array_fixups_arm.cc
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dex_cache_array_fixups_arm.h"
+
+#include "base/arena_containers.h"
+#include "utils/dex_cache_arrays_layout-inl.h"
+
+namespace art {
+namespace arm {
+
+/**
+ * Finds instructions that need the dex cache arrays base as an input.
+ */
+class DexCacheArrayFixupsVisitor : public HGraphVisitor {
+ public:
+ explicit DexCacheArrayFixupsVisitor(HGraph* graph)
+ : HGraphVisitor(graph),
+ dex_cache_array_bases_(std::less<const DexFile*>(),
+ // Attribute memory use to code generator.
+ graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {}
+
+ private:
+ void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+ // If this is an invoke with PC-relative access to the dex cache methods array,
+ // we need to add the dex cache arrays base as the special input.
+ if (invoke->HasPcRelativeDexCache()) {
+ // Initialize base for target method dex file if needed.
+ MethodReference target_method = invoke->GetTargetMethod();
+ HArmDexCacheArraysBase* base = GetOrCreateDexCacheArrayBase(invoke, *target_method.dex_file);
+ // Update the element offset in base.
+ DexCacheArraysLayout layout(kArmPointerSize, target_method.dex_file);
+ base->UpdateElementOffset(layout.MethodOffset(target_method.dex_method_index));
+ // Add the special argument base to the method.
+ DCHECK(!invoke->HasCurrentMethodInput());
+ invoke->AddSpecialInput(base);
+ }
+ }
+
+ HArmDexCacheArraysBase* GetOrCreateDexCacheArrayBase(HInstruction* user,
+ const DexFile& dex_file) {
+ // Ensure we only initialize the pointer once for each dex file.
+ auto lb = dex_cache_array_bases_.lower_bound(&dex_file);
+ if (lb != dex_cache_array_bases_.end() &&
+ !dex_cache_array_bases_.key_comp()(&dex_file, lb->first)) {
+ return lb->second;
+ }
+
+ HGraph* graph = GetGraph();
+ HBasicBlock* entry = graph->GetEntryBlock();
+ HArmDexCacheArraysBase* base = new (graph->GetArena()) HArmDexCacheArraysBase(dex_file);
+ HInstruction* insert_pos = (user->GetBlock() == entry) ? user : entry->GetLastInstruction();
+ entry->InsertInstructionBefore(base, insert_pos);
+ dex_cache_array_bases_.PutBefore(lb, &dex_file, base);
+ return base;
+ }
+
+ using DexCacheArraysBaseMap =
+ ArenaSafeMap<const DexFile*, HArmDexCacheArraysBase*, std::less<const DexFile*>>;
+ DexCacheArraysBaseMap dex_cache_array_bases_;
+};
+
+void DexCacheArrayFixups::Run() {
+ DexCacheArrayFixupsVisitor visitor(graph_);
+ visitor.VisitInsertionOrder();
+}
+
+} // namespace arm
+} // namespace art
diff --git a/compiler/optimizing/dex_cache_array_fixups_arm.h b/compiler/optimizing/dex_cache_array_fixups_arm.h
new file mode 100644
index 0000000..015f910
--- /dev/null
+++ b/compiler/optimizing/dex_cache_array_fixups_arm.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_DEX_CACHE_ARRAY_FIXUPS_ARM_H_
+#define ART_COMPILER_OPTIMIZING_DEX_CACHE_ARRAY_FIXUPS_ARM_H_
+
+#include "nodes.h"
+#include "optimization.h"
+
+namespace art {
+namespace arm {
+
+class DexCacheArrayFixups : public HOptimization {
+ public:
+ DexCacheArrayFixups(HGraph* graph, OptimizingCompilerStats* stats)
+ : HOptimization(graph, "dex_cache_array_fixups_arm", stats) {}
+
+ void Run() OVERRIDE;
+};
+
+} // namespace arm
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_DEX_CACHE_ARRAY_FIXUPS_ARM_H_
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index d3f30cb..0bf7734 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1095,7 +1095,12 @@
M(UShr, BinaryOperation) \
M(Xor, BinaryOperation) \
+#ifndef ART_ENABLE_CODEGEN_arm
#define FOR_EACH_CONCRETE_INSTRUCTION_ARM(M)
+#else
+#define FOR_EACH_CONCRETE_INSTRUCTION_ARM(M) \
+ M(ArmDexCacheArraysBase, Instruction)
+#endif
#ifndef ART_ENABLE_CODEGEN_arm64
#define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M)
@@ -5565,6 +5570,9 @@
} // namespace art
+#ifdef ART_ENABLE_CODEGEN_arm
+#include "nodes_arm.h"
+#endif
#ifdef ART_ENABLE_CODEGEN_arm64
#include "nodes_arm64.h"
#endif
diff --git a/compiler/optimizing/nodes_arm.h b/compiler/optimizing/nodes_arm.h
new file mode 100644
index 0000000..6a1dbb9
--- /dev/null
+++ b/compiler/optimizing/nodes_arm.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_NODES_ARM_H_
+#define ART_COMPILER_OPTIMIZING_NODES_ARM_H_
+
+namespace art {
+
+class HArmDexCacheArraysBase : public HExpression<0> {
+ public:
+ explicit HArmDexCacheArraysBase(const DexFile& dex_file)
+ : HExpression(Primitive::kPrimInt, SideEffects::None(), kNoDexPc),
+ dex_file_(&dex_file),
+ element_offset_(static_cast<size_t>(-1)) { }
+
+ void UpdateElementOffset(size_t element_offset) {
+ // Use the lowest offset from the requested elements so that all offsets from
+ // this base are non-negative because our assemblers emit negative-offset loads
+ // as a sequence of two or more instructions. (However, positive offsets beyond
+ // 4KiB also require two or more instructions, so this simple heuristic could
+ // be improved for cases where there is a dense cluster of elements far from
+ // the lowest offset. This is expected to be rare enough though, so we choose
+ // not to spend compile time on elaborate calculations.)
+ element_offset_ = std::min(element_offset_, element_offset);
+ }
+
+ const DexFile& GetDexFile() const {
+ return *dex_file_;
+ }
+
+ size_t GetElementOffset() const {
+ return element_offset_;
+ }
+
+ DECLARE_INSTRUCTION(ArmDexCacheArraysBase);
+
+ private:
+ const DexFile* dex_file_;
+ size_t element_offset_;
+
+ DISALLOW_COPY_AND_ASSIGN(HArmDexCacheArraysBase);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_NODES_ARM_H_
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index dec08d8..3eb3279 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -20,6 +20,10 @@
#include <stdint.h>
#ifdef ART_ENABLE_CODEGEN_arm64
+#include "dex_cache_array_fixups_arm.h"
+#endif
+
+#ifdef ART_ENABLE_CODEGEN_arm64
#include "instruction_simplifier_arm64.h"
#endif
@@ -434,6 +438,17 @@
PassObserver* pass_observer) {
ArenaAllocator* arena = graph->GetArena();
switch (instruction_set) {
+#ifdef ART_ENABLE_CODEGEN_arm
+ case kThumb2:
+ case kArm: {
+ arm::DexCacheArrayFixups* fixups = new (arena) arm::DexCacheArrayFixups(graph, stats);
+ HOptimization* arm_optimizations[] = {
+ fixups
+ };
+ RunOptimizations(arm_optimizations, arraysize(arm_optimizations), pass_observer);
+ break;
+ }
+#endif
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64: {
arm64::InstructionSimplifierArm64* simplifier =