Implement invoke virtual in optimizing compiler.
Also refactor 004 tests to make them work with both Quick and
Optimizing.
Change-Id: I87e275cb0ae0258fc3bb32b612140000b1d2adf8
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index a03588f..33b00d2 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -331,18 +331,61 @@
bool is_range,
uint32_t* args,
uint32_t register_index) {
+ Instruction::Code opcode = instruction.Opcode();
+ InvokeType invoke_type;
+ switch (opcode) {
+ case Instruction::INVOKE_STATIC:
+ case Instruction::INVOKE_STATIC_RANGE:
+ invoke_type = kStatic;
+ break;
+ case Instruction::INVOKE_DIRECT:
+ case Instruction::INVOKE_DIRECT_RANGE:
+ invoke_type = kDirect;
+ break;
+ case Instruction::INVOKE_VIRTUAL:
+ case Instruction::INVOKE_VIRTUAL_RANGE:
+ invoke_type = kVirtual;
+ break;
+ case Instruction::INVOKE_INTERFACE:
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ invoke_type = kInterface;
+ break;
+ case Instruction::INVOKE_SUPER_RANGE:
+ case Instruction::INVOKE_SUPER:
+ invoke_type = kSuper;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected invoke op: " << opcode;
+ return false;
+ }
+
const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
const DexFile::ProtoId& proto_id = dex_file_->GetProtoId(method_id.proto_idx_);
const char* descriptor = dex_file_->StringDataByIdx(proto_id.shorty_idx_);
Primitive::Type return_type = Primitive::GetType(descriptor[0]);
- bool is_instance_call =
- instruction.Opcode() != Instruction::INVOKE_STATIC
- && instruction.Opcode() != Instruction::INVOKE_STATIC_RANGE;
+ bool is_instance_call = invoke_type != kStatic;
const size_t number_of_arguments = strlen(descriptor) - (is_instance_call ? 0 : 1);
- // Treat invoke-direct like static calls for now.
- HInvoke* invoke = new (arena_) HInvokeStatic(
- arena_, number_of_arguments, return_type, dex_offset, method_idx);
+ HInvoke* invoke = nullptr;
+ if (invoke_type == kVirtual) {
+ MethodReference target_method(dex_file_, method_idx);
+ uintptr_t direct_code;
+ uintptr_t direct_method;
+ int vtable_index;
+ // TODO: Add devirtualization support.
+ compiler_driver_->ComputeInvokeInfo(dex_compilation_unit_, dex_offset, true, true,
+ &invoke_type, &target_method, &vtable_index,
+ &direct_code, &direct_method);
+ if (vtable_index == -1) {
+ return false;
+ }
+ invoke = new (arena_) HInvokeVirtual(
+ arena_, number_of_arguments, return_type, dex_offset, vtable_index);
+ } else {
+ // Treat invoke-direct like static calls for now.
+ invoke = new (arena_) HInvokeStatic(
+ arena_, number_of_arguments, return_type, dex_offset, method_idx);
+ }
size_t start_index = 0;
Temporaries temps(graph_, is_instance_call ? 1 : 0);
@@ -620,7 +663,8 @@
}
case Instruction::INVOKE_STATIC:
- case Instruction::INVOKE_DIRECT: {
+ case Instruction::INVOKE_DIRECT:
+ case Instruction::INVOKE_VIRTUAL: {
uint32_t method_idx = instruction.VRegB_35c();
uint32_t number_of_vreg_arguments = instruction.VRegA_35c();
uint32_t args[5];
@@ -632,7 +676,8 @@
}
case Instruction::INVOKE_STATIC_RANGE:
- case Instruction::INVOKE_DIRECT_RANGE: {
+ case Instruction::INVOKE_DIRECT_RANGE:
+ case Instruction::INVOKE_VIRTUAL_RANGE: {
uint32_t method_idx = instruction.VRegB_3rc();
uint32_t number_of_vreg_arguments = instruction.VRegA_3rc();
uint32_t register_index = instruction.VRegC();
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 9903092..ad62279 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -20,6 +20,7 @@
#include "gc/accounting/card_table.h"
#include "mirror/array.h"
#include "mirror/art_method.h"
+#include "mirror/class.h"
#include "thread.h"
#include "utils/assembler.h"
#include "utils/arm/assembler_arm.h"
@@ -818,6 +819,47 @@
}
void LocationsBuilderARM::VisitInvokeStatic(HInvokeStatic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorARM::LoadCurrentMethod(Register reg) {
+ __ ldr(reg, Address(SP, kCurrentMethodStackOffset));
+}
+
+void InstructionCodeGeneratorARM::VisitInvokeStatic(HInvokeStatic* invoke) {
+ Register temp = invoke->GetLocations()->GetTemp(0).AsArm().AsCoreRegister();
+ uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
+ size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).Int32Value() +
+ invoke->GetIndexInDexCache() * kArmWordSize;
+
+ // TODO: Implement all kinds of calls:
+ // 1) boot -> boot
+ // 2) app -> boot
+ // 3) app -> app
+ //
+ // Currently we implement the app -> app logic, which looks up in the resolve cache.
+
+ // temp = method;
+ LoadCurrentMethod(temp);
+ // temp = temp->dex_cache_resolved_methods_;
+ __ ldr(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
+ // temp = temp[index_in_cache]
+ __ ldr(temp, Address(temp, index_in_cache));
+ // LR = temp[offset_of_quick_compiled_code]
+ __ ldr(LR, Address(temp,
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+ // LR()
+ __ blx(LR);
+
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ DCHECK(!codegen_->IsLeafMethod());
+}
+
+void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ HandleInvoke(invoke);
+}
+
+void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
locations->AddTemp(ArmCoreLocation(R0));
@@ -852,37 +894,30 @@
}
}
-void InstructionCodeGeneratorARM::LoadCurrentMethod(Register reg) {
- __ ldr(reg, Address(SP, kCurrentMethodStackOffset));
-}
-void InstructionCodeGeneratorARM::VisitInvokeStatic(HInvokeStatic* invoke) {
+void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
Register temp = invoke->GetLocations()->GetTemp(0).AsArm().AsCoreRegister();
- uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
- size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).Int32Value() +
- invoke->GetIndexInDexCache() * kArmWordSize;
-
- // TODO: Implement all kinds of calls:
- // 1) boot -> boot
- // 2) app -> boot
- // 3) app -> app
- //
- // Currently we implement the app -> app logic, which looks up in the resolve cache.
-
- // temp = method;
- LoadCurrentMethod(temp);
- // temp = temp->dex_cache_resolved_methods_;
- __ ldr(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
- // temp = temp[index_in_cache]
- __ ldr(temp, Address(temp, index_in_cache));
- // LR = temp[offset_of_quick_compiled_code]
- __ ldr(LR, Address(temp,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
- // LR()
+ uint32_t method_offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() +
+ invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
+ LocationSummary* locations = invoke->GetLocations();
+ Location receiver = locations->InAt(0);
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ ldr(temp, Address(SP, receiver.GetStackIndex()));
+ __ ldr(temp, Address(temp, class_offset));
+ } else {
+ __ ldr(temp, Address(receiver.AsArm().AsCoreRegister(), class_offset));
+ }
+ // temp = temp->GetMethodAt(method_offset);
+ uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value();
+ __ ldr(temp, Address(temp, method_offset));
+ // LR = temp->GetEntryPoint();
+ __ ldr(LR, Address(temp, entry_point));
+ // LR();
__ blx(LR);
-
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
void LocationsBuilderARM::VisitAdd(HAdd* add) {
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 660294b..2480960 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -93,6 +93,8 @@
#undef DECLARE_VISIT_INSTRUCTION
+ void HandleInvoke(HInvoke* invoke);
+
private:
CodeGeneratorARM* const codegen_;
InvokeDexCallingConventionVisitor parameter_visitor_;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 3dd9b37..3383cb2 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -20,6 +20,7 @@
#include "gc/accounting/card_table.h"
#include "mirror/array.h"
#include "mirror/art_method.h"
+#include "mirror/class.h"
#include "thread.h"
#include "utils/assembler.h"
#include "utils/stack_checks.h"
@@ -763,6 +764,40 @@
}
void LocationsBuilderX86::VisitInvokeStatic(HInvokeStatic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorX86::VisitInvokeStatic(HInvokeStatic* invoke) {
+ Register temp = invoke->GetLocations()->GetTemp(0).AsX86().AsCpuRegister();
+ uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
+ size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).Int32Value() +
+ invoke->GetIndexInDexCache() * kX86WordSize;
+
+ // TODO: Implement all kinds of calls:
+ // 1) boot -> boot
+ // 2) app -> boot
+ // 3) app -> app
+ //
+ // Currently we implement the app -> app logic, which looks up in the resolve cache.
+
+ // temp = method;
+ LoadCurrentMethod(temp);
+ // temp = temp->dex_cache_resolved_methods_;
+ __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
+ // temp = temp[index_in_cache]
+ __ movl(temp, Address(temp, index_in_cache));
+ // (temp + offset_of_quick_compiled_code)()
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void LocationsBuilderX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ HandleInvoke(invoke);
+}
+
+void LocationsBuilderX86::HandleInvoke(HInvoke* invoke) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
locations->AddTemp(X86CpuLocation(EAX));
@@ -799,26 +834,23 @@
invoke->SetLocations(locations);
}
-void InstructionCodeGeneratorX86::VisitInvokeStatic(HInvokeStatic* invoke) {
+void InstructionCodeGeneratorX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
Register temp = invoke->GetLocations()->GetTemp(0).AsX86().AsCpuRegister();
- uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
- size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).Int32Value() +
- invoke->GetIndexInDexCache() * kX86WordSize;
-
- // TODO: Implement all kinds of calls:
- // 1) boot -> boot
- // 2) app -> boot
- // 3) app -> app
- //
- // Currently we implement the app -> app logic, which looks up in the resolve cache.
-
- // temp = method;
- LoadCurrentMethod(temp);
- // temp = temp->dex_cache_resolved_methods_;
- __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
- // temp = temp[index_in_cache]
- __ movl(temp, Address(temp, index_in_cache));
- // (temp + offset_of_quick_compiled_code)()
+ uint32_t method_offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() +
+ invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
+ LocationSummary* locations = invoke->GetLocations();
+ Location receiver = locations->InAt(0);
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ movl(temp, Address(ESP, receiver.GetStackIndex()));
+ __ movl(temp, Address(temp, class_offset));
+ } else {
+ __ movl(temp, Address(receiver.AsX86().AsCpuRegister(), class_offset));
+ }
+ // temp = temp->GetMethodAt(method_offset);
+ __ movl(temp, Address(temp, method_offset));
+ // call temp->GetEntryPoint();
__ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
DCHECK(!codegen_->IsLeafMethod());
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 7c50204..f1be0ad 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -94,6 +94,8 @@
#undef DECLARE_VISIT_INSTRUCTION
+ void HandleInvoke(HInvoke* invoke);
+
private:
CodeGeneratorX86* const codegen_;
InvokeDexCallingConventionVisitor parameter_visitor_;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 2f352e0..ca03af8 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -20,6 +20,7 @@
#include "gc/accounting/card_table.h"
#include "mirror/array.h"
#include "mirror/art_method.h"
+#include "mirror/class.h"
#include "mirror/object_reference.h"
#include "thread.h"
#include "utils/assembler.h"
@@ -709,12 +710,46 @@
}
void LocationsBuilderX86_64::VisitInvokeStatic(HInvokeStatic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorX86_64::VisitInvokeStatic(HInvokeStatic* invoke) {
+ CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsX86_64().AsCpuRegister();
+ uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
+ size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).SizeValue() +
+ invoke->GetIndexInDexCache() * heap_reference_size;
+
+ // TODO: Implement all kinds of calls:
+ // 1) boot -> boot
+ // 2) app -> boot
+ // 3) app -> app
+ //
+ // Currently we implement the app -> app logic, which looks up in the resolve cache.
+
+ // temp = method;
+ LoadCurrentMethod(temp);
+ // temp = temp->dex_cache_resolved_methods_;
+ __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
+ // temp = temp[index_in_cache]
+ __ movl(temp, Address(temp, index_in_cache));
+ // (temp + offset_of_quick_compiled_code)()
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
+
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void LocationsBuilderX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ HandleInvoke(invoke);
+}
+
+void LocationsBuilderX86_64::HandleInvoke(HInvoke* invoke) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
locations->AddTemp(X86_64CpuLocation(RDI));
InvokeDexCallingConventionVisitor calling_convention_visitor;
- for (size_t i = 0; i < invoke->InputCount(); ++i) {
+ for (size_t i = 0; i < invoke->InputCount(); i++) {
HInstruction* input = invoke->InputAt(i);
locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType()));
}
@@ -740,26 +775,23 @@
}
}
-void InstructionCodeGeneratorX86_64::VisitInvokeStatic(HInvokeStatic* invoke) {
+void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsX86_64().AsCpuRegister();
- uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
- size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).SizeValue() +
- invoke->GetIndexInDexCache() * heap_reference_size;
-
- // TODO: Implement all kinds of calls:
- // 1) boot -> boot
- // 2) app -> boot
- // 3) app -> app
- //
- // Currently we implement the app -> app logic, which looks up in the resolve cache.
-
- // temp = method;
- LoadCurrentMethod(temp);
- // temp = temp->dex_cache_resolved_methods_;
- __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
- // temp = temp[index_in_cache]
- __ movl(temp, Address(temp, index_in_cache));
- // (temp + offset_of_quick_compiled_code)()
+ size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() +
+ invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
+ LocationSummary* locations = invoke->GetLocations();
+ Location receiver = locations->InAt(0);
+ size_t class_offset = mirror::Object::ClassOffset().SizeValue();
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ movq(temp, Address(CpuRegister(RSP), receiver.GetStackIndex()));
+ __ movq(temp, Address(temp, class_offset));
+ } else {
+ __ movq(temp, Address(receiver.AsX86_64().AsCpuRegister(), class_offset));
+ }
+ // temp = temp->GetMethodAt(method_offset);
+ __ movl(temp, Address(temp, method_offset));
+ // call temp->GetEntryPoint();
__ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
DCHECK(!codegen_->IsLeafMethod());
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 44552ea..78b60fe 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -91,6 +91,8 @@
#undef DECLARE_VISIT_INSTRUCTION
+ void HandleInvoke(HInvoke* invoke);
+
private:
CodeGeneratorX86_64* const codegen_;
InvokeDexCallingConventionVisitor parameter_visitor_;
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index ed6dd93..d6dfeae 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -422,6 +422,7 @@
M(If) \
M(IntConstant) \
M(InvokeStatic) \
+ M(InvokeVirtual) \
M(LoadLocal) \
M(Local) \
M(LongConstant) \
@@ -1272,6 +1273,26 @@
DISALLOW_COPY_AND_ASSIGN(HInvokeStatic);
};
+class HInvokeVirtual : public HInvoke {
+ public:
+ HInvokeVirtual(ArenaAllocator* arena,
+ uint32_t number_of_arguments,
+ Primitive::Type return_type,
+ uint32_t dex_pc,
+ uint32_t vtable_index)
+ : HInvoke(arena, number_of_arguments, return_type, dex_pc),
+ vtable_index_(vtable_index) {}
+
+ uint32_t GetVTableIndex() const { return vtable_index_; }
+
+ DECLARE_INSTRUCTION(InvokeVirtual);
+
+ private:
+ const uint32_t vtable_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(HInvokeVirtual);
+};
+
class HNewInstance : public HExpression<0> {
public:
HNewInstance(uint32_t dex_pc, uint16_t type_index)