Move mirror::ArtMethod to native
Optimizing + quick tests are passing, devices boot.
TODO: Test and fix bugs in mips64.
Saves 16 bytes per most ArtMethod, 7.5MB reduction in system PSS.
Some of the savings are from removal of virtual methods and direct
methods object arrays.
Bug: 19264997
Change-Id: I622469a0cfa0e7082a2119f3d6a9491eb61e3f3d
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 411a5aa..f98029d 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -663,9 +663,8 @@
*dex_compilation_unit_->GetDexFile())));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
soa.Decode<mirror::ClassLoader*>(dex_compilation_unit_->GetClassLoader())));
- mirror::ArtMethod* resolved_method = compiler_driver_->ResolveMethod(
- soa, dex_cache, class_loader, dex_compilation_unit_, method_idx,
- optimized_invoke_type);
+ ArtMethod* resolved_method = compiler_driver_->ResolveMethod(
+ soa, dex_cache, class_loader, dex_compilation_unit_, method_idx, optimized_invoke_type);
if (resolved_method == nullptr) {
MaybeRecordStat(MethodCompilationStat::kNotCompiledUnresolvedMethod);
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index d71266d..c106d30 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -114,6 +114,11 @@
return mirror::ObjectArray<mirror::Object>::OffsetOfElement(index).SizeValue();
}
+size_t CodeGenerator::GetCachePointerOffset(uint32_t index) {
+ auto pointer_size = InstructionSetPointerSize(GetInstructionSet());
+ return mirror::Array::DataOffset(pointer_size).Uint32Value() + pointer_size * index;
+}
+
void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) {
Initialize();
if (!is_leaf) {
@@ -270,7 +275,8 @@
uint16_t number_of_locals = GetGraph()->GetNumberOfLocalVRegs();
if (reg_number >= number_of_locals) {
// Local is a parameter of the method. It is stored in the caller's frame.
- return GetFrameSize() + kVRegSize // ART method
+ // TODO: Share this logic with StackVisitor::GetVRegOffsetFromQuickCode.
+ return GetFrameSize() + InstructionSetPointerSize(GetInstructionSet()) // ART method
+ (reg_number - number_of_locals) * kVRegSize;
} else {
// Local is a temporary in this method. It is stored in this method's frame.
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 740beab..3012098 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -152,7 +152,7 @@
size_t GetStackSlotOfParameter(HParameterValue* parameter) const {
// Note that this follows the current calling convention.
return GetFrameSize()
- + kVRegSize // Art method
+ + InstructionSetPointerSize(GetInstructionSet()) // Art method
+ parameter->GetIndex() * kVRegSize;
}
@@ -273,6 +273,8 @@
// Note: this method assumes we always have the same pointer size, regardless
// of the architecture.
static size_t GetCacheOffset(uint32_t index);
+ // Pointer variant for ArtMethod and ArtField arrays.
+ size_t GetCachePointerOffset(uint32_t index);
void EmitParallelMoves(Location from1,
Location to1,
@@ -477,11 +479,13 @@
CallingConvention(const C* registers,
size_t number_of_registers,
const F* fpu_registers,
- size_t number_of_fpu_registers)
+ size_t number_of_fpu_registers,
+ size_t pointer_size)
: registers_(registers),
number_of_registers_(number_of_registers),
fpu_registers_(fpu_registers),
- number_of_fpu_registers_(number_of_fpu_registers) {}
+ number_of_fpu_registers_(number_of_fpu_registers),
+ pointer_size_(pointer_size) {}
size_t GetNumberOfRegisters() const { return number_of_registers_; }
size_t GetNumberOfFpuRegisters() const { return number_of_fpu_registers_; }
@@ -498,8 +502,8 @@
size_t GetStackOffsetOf(size_t index) const {
// We still reserve the space for parameters passed by registers.
- // Add one for the method pointer.
- return (index + 1) * kVRegSize;
+ // Add space for the method pointer.
+ return pointer_size_ + index * kVRegSize;
}
private:
@@ -507,6 +511,7 @@
const size_t number_of_registers_;
const F* fpu_registers_;
const size_t number_of_fpu_registers_;
+ const size_t pointer_size_;
DISALLOW_COPY_AND_ASSIGN(CallingConvention);
};
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index bd1f134..7f0be05 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -17,14 +17,14 @@
#include "code_generator_arm.h"
#include "arch/arm/instruction_set_features_arm.h"
+#include "art_method.h"
#include "code_generator_utils.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "gc/accounting/card_table.h"
#include "intrinsics.h"
#include "intrinsics_arm.h"
#include "mirror/array-inl.h"
-#include "mirror/art_method.h"
-#include "mirror/class.h"
+#include "mirror/class-inl.h"
#include "thread.h"
#include "utils/arm/assembler_arm.h"
#include "utils/arm/managed_register_arm.h"
@@ -1314,8 +1314,8 @@
}
Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
- uint32_t method_offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() +
- invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
+ uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
+ invoke->GetVTableIndex(), kArmPointerSize).Uint32Value();
LocationSummary* locations = invoke->GetLocations();
Location receiver = locations->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
@@ -1328,7 +1328,7 @@
}
codegen_->MaybeRecordImplicitNullCheck(invoke);
// temp = temp->GetMethodAt(method_offset);
- uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kArmWordSize).Int32Value();
__ LoadFromOffset(kLoadWord, temp, temp, method_offset);
// LR = temp->GetEntryPoint();
@@ -1348,8 +1348,8 @@
void InstructionCodeGeneratorARM::VisitInvokeInterface(HInvokeInterface* invoke) {
// TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
- uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
- (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
+ uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
+ invoke->GetImtIndex() % mirror::Class::kImtSize, kArmPointerSize).Uint32Value();
LocationSummary* locations = invoke->GetLocations();
Location receiver = locations->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
@@ -1367,7 +1367,7 @@
}
codegen_->MaybeRecordImplicitNullCheck(invoke);
// temp = temp->GetImtEntryAt(method_offset);
- uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kArmWordSize).Int32Value();
__ LoadFromOffset(kLoadWord, temp, temp, method_offset);
// LR = temp->GetEntryPoint();
@@ -3977,13 +3977,13 @@
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
__ LoadFromOffset(
- kLoadWord, out, current_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value());
+ kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
} else {
DCHECK(cls->CanCallRuntime());
__ LoadFromOffset(kLoadWord,
out,
current_method,
- mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value());
+ ArtMethod::DexCacheResolvedTypesOffset().Int32Value());
__ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
@@ -4043,7 +4043,7 @@
Register out = locations->Out().AsRegister<Register>();
Register current_method = locations->InAt(0).AsRegister<Register>();
__ LoadFromOffset(
- kLoadWord, out, current_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value());
+ kLoadWord, out, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
__ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
__ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
__ cmp(out, ShifterOperand(0));
@@ -4268,7 +4268,7 @@
__ LoadFromOffset(kLoadWord, temp, TR, invoke->GetStringInitOffset());
// LR = temp[offset_of_quick_compiled_code]
__ LoadFromOffset(kLoadWord, LR, temp,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kArmWordSize).Int32Value());
// LR()
__ blx(LR);
@@ -4278,14 +4278,13 @@
if (!invoke->IsRecursive()) {
// temp = temp->dex_cache_resolved_methods_;
__ LoadFromOffset(
- kLoadWord, temp, temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
+ kLoadWord, temp, temp, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
// temp = temp[index_in_cache]
__ LoadFromOffset(
kLoadWord, temp, temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()));
// LR = temp[offset_of_quick_compiled_code]
- __ LoadFromOffset(kLoadWord, LR, temp,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kArmWordSize).Int32Value());
+ __ LoadFromOffset(kLoadWord, LR, temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArmWordSize).Int32Value());
// LR()
__ blx(LR);
} else {
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 071bbee..d649cbf 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -54,7 +54,8 @@
: CallingConvention(kRuntimeParameterCoreRegisters,
kRuntimeParameterCoreRegistersLength,
kRuntimeParameterFpuRegisters,
- kRuntimeParameterFpuRegistersLength) {}
+ kRuntimeParameterFpuRegistersLength,
+ kArmPointerSize) {}
private:
DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
@@ -72,7 +73,8 @@
: CallingConvention(kParameterCoreRegisters,
kParameterCoreRegistersLength,
kParameterFpuRegisters,
- kParameterFpuRegistersLength) {}
+ kParameterFpuRegistersLength,
+ kArmPointerSize) {}
private:
DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index cf5a8fb..fbe26b0 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -17,6 +17,7 @@
#include "code_generator_arm64.h"
#include "arch/arm64/instruction_set_features_arm64.h"
+#include "art_method.h"
#include "code_generator_utils.h"
#include "common_arm64.h"
#include "entrypoints/quick/quick_entrypoints.h"
@@ -25,8 +26,7 @@
#include "intrinsics.h"
#include "intrinsics_arm64.h"
#include "mirror/array-inl.h"
-#include "mirror/art_method.h"
-#include "mirror/class.h"
+#include "mirror/class-inl.h"
#include "offsets.h"
#include "thread.h"
#include "utils/arm64/assembler_arm64.h"
@@ -67,7 +67,6 @@
using helpers::ARM64EncodableConstantOrRegister;
using helpers::ArtVixlRegCodeCoherentForRegSet;
-static constexpr size_t kHeapRefSize = sizeof(mirror::HeapReference<mirror::Object>);
static constexpr int kCurrentMethodStackOffset = 0;
inline Condition ARM64Condition(IfCondition cond) {
@@ -1069,7 +1068,7 @@
void CodeGeneratorARM64::LoadCurrentMethod(vixl::Register current_method) {
DCHECK(RequiresCurrentMethod());
- DCHECK(current_method.IsW());
+ CHECK(current_method.IsX());
__ Ldr(current_method, MemOperand(sp, kCurrentMethodStackOffset));
}
@@ -2186,12 +2185,12 @@
void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invoke) {
// TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
- Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0));
- uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
- (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
+ Register temp = XRegisterFrom(invoke->GetLocations()->GetTemp(0));
+ uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
+ invoke->GetImtIndex() % mirror::Class::kImtSize, kArm64PointerSize).Uint32Value();
Location receiver = invoke->GetLocations()->InAt(0);
Offset class_offset = mirror::Object::ClassOffset();
- Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
// The register ip1 is required to be used for the hidden argument in
// art_quick_imt_conflict_trampoline, so prevent VIXL from using it.
@@ -2203,16 +2202,16 @@
// temp = object->GetClass();
if (receiver.IsStackSlot()) {
- __ Ldr(temp, StackOperandFrom(receiver));
- __ Ldr(temp, HeapOperand(temp, class_offset));
+ __ Ldr(temp.W(), StackOperandFrom(receiver));
+ __ Ldr(temp.W(), HeapOperand(temp.W(), class_offset));
} else {
- __ Ldr(temp, HeapOperandFrom(receiver, class_offset));
+ __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset));
}
codegen_->MaybeRecordImplicitNullCheck(invoke);
// temp = temp->GetImtEntryAt(method_offset);
- __ Ldr(temp, HeapOperand(temp, method_offset));
+ __ Ldr(temp, MemOperand(temp, method_offset));
// lr = temp->GetEntryPoint();
- __ Ldr(lr, HeapOperand(temp, entry_point));
+ __ Ldr(lr, MemOperand(temp, entry_point.Int32Value()));
// lr();
__ Blr(lr);
DCHECK(!codegen_->IsLeafMethod());
@@ -2253,8 +2252,7 @@
void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Register temp) {
// Make sure that ArtMethod* is passed in kArtMethodRegister as per the calling convention.
DCHECK(temp.Is(kArtMethodRegister));
- size_t index_in_cache = mirror::Array::DataOffset(kHeapRefSize).SizeValue() +
- invoke->GetDexMethodIndex() * kHeapRefSize;
+ size_t index_in_cache = GetCachePointerOffset(invoke->GetDexMethodIndex());
// TODO: Implement all kinds of calls:
// 1) boot -> boot
@@ -2265,23 +2263,24 @@
if (invoke->IsStringInit()) {
// temp = thread->string_init_entrypoint
- __ Ldr(temp, HeapOperand(tr, invoke->GetStringInitOffset()));
+ __ Ldr(temp.X(), MemOperand(tr, invoke->GetStringInitOffset()));
// LR = temp->entry_point_from_quick_compiled_code_;
- __ Ldr(lr, HeapOperand(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kArm64WordSize)));
+ __ Ldr(lr, MemOperand(
+ temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize).Int32Value()));
// lr()
__ Blr(lr);
} else {
// temp = method;
- LoadCurrentMethod(temp);
+ LoadCurrentMethod(temp.X());
if (!invoke->IsRecursive()) {
// temp = temp->dex_cache_resolved_methods_;
- __ Ldr(temp, HeapOperand(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset()));
+ __ Ldr(temp.W(), MemOperand(temp.X(),
+ ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
// temp = temp[index_in_cache];
- __ Ldr(temp, HeapOperand(temp, index_in_cache));
+ __ Ldr(temp.X(), MemOperand(temp, index_in_cache));
// lr = temp->entry_point_from_quick_compiled_code_;
- __ Ldr(lr, HeapOperand(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- kArm64WordSize)));
+ __ Ldr(lr, MemOperand(temp.X(), ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ kArm64WordSize).Int32Value()));
// lr();
__ Blr(lr);
} else {
@@ -2302,7 +2301,7 @@
}
BlockPoolsScope block_pools(GetVIXLAssembler());
- Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0));
+ Register temp = XRegisterFrom(invoke->GetLocations()->GetTemp(0));
codegen_->GenerateStaticOrDirectCall(invoke, temp);
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
@@ -2314,27 +2313,27 @@
LocationSummary* locations = invoke->GetLocations();
Location receiver = locations->InAt(0);
- Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0));
- size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() +
- invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
+ Register temp = XRegisterFrom(invoke->GetLocations()->GetTemp(0));
+ size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
+ invoke->GetVTableIndex(), kArm64PointerSize).SizeValue();
Offset class_offset = mirror::Object::ClassOffset();
- Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
+ Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
BlockPoolsScope block_pools(GetVIXLAssembler());
// temp = object->GetClass();
if (receiver.IsStackSlot()) {
- __ Ldr(temp, MemOperand(sp, receiver.GetStackIndex()));
- __ Ldr(temp, HeapOperand(temp, class_offset));
+ __ Ldr(temp.W(), MemOperand(sp, receiver.GetStackIndex()));
+ __ Ldr(temp.W(), HeapOperand(temp.W(), class_offset));
} else {
DCHECK(receiver.IsRegister());
- __ Ldr(temp, HeapOperandFrom(receiver, class_offset));
+ __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset));
}
codegen_->MaybeRecordImplicitNullCheck(invoke);
// temp = temp->GetMethodAt(method_offset);
- __ Ldr(temp, HeapOperand(temp, method_offset));
+ __ Ldr(temp, MemOperand(temp, method_offset));
// lr = temp->GetEntryPoint();
- __ Ldr(lr, HeapOperand(temp, entry_point.SizeValue()));
+ __ Ldr(lr, MemOperand(temp, entry_point.SizeValue()));
// lr();
__ Blr(lr);
DCHECK(!codegen_->IsLeafMethod());
@@ -2355,10 +2354,10 @@
if (cls->IsReferrersClass()) {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
- __ Ldr(out, HeapOperand(current_method, mirror::ArtMethod::DeclaringClassOffset()));
+ __ Ldr(out, MemOperand(current_method, ArtMethod::DeclaringClassOffset().Int32Value()));
} else {
DCHECK(cls->CanCallRuntime());
- __ Ldr(out, HeapOperand(current_method, mirror::ArtMethod::DexCacheResolvedTypesOffset()));
+ __ Ldr(out, MemOperand(current_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
__ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
@@ -2407,7 +2406,7 @@
Register out = OutputRegister(load);
Register current_method = InputRegisterAt(load, 0);
- __ Ldr(out, HeapOperand(current_method, mirror::ArtMethod::DeclaringClassOffset()));
+ __ Ldr(out, MemOperand(current_method, ArtMethod::DeclaringClassOffset().Int32Value()));
__ Ldr(out, HeapOperand(out, mirror::Class::DexCacheStringsOffset()));
__ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
__ Cbz(out, slow_path->GetEntryLabel());
@@ -2535,7 +2534,7 @@
locations->SetOut(LocationFrom(x0));
locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck,
- void*, uint32_t, int32_t, mirror::ArtMethod*>();
+ void*, uint32_t, int32_t, ArtMethod*>();
}
void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) {
@@ -2543,17 +2542,16 @@
InvokeRuntimeCallingConvention calling_convention;
Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt);
DCHECK(type_index.Is(w0));
- Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot);
- DCHECK(current_method.Is(w2));
- codegen_->LoadCurrentMethod(current_method);
+ Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimLong);
+ DCHECK(current_method.Is(x2));
+ codegen_->LoadCurrentMethod(current_method.X());
__ Mov(type_index, instruction->GetTypeIndex());
codegen_->InvokeRuntime(
GetThreadOffset<kArm64WordSize>(instruction->GetEntrypoint()).Int32Value(),
instruction,
instruction->GetDexPc(),
nullptr);
- CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck,
- void*, uint32_t, int32_t, mirror::ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
}
void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
@@ -2563,7 +2561,7 @@
locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1)));
locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
}
void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) {
@@ -2572,14 +2570,14 @@
DCHECK(type_index.Is(w0));
Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot);
DCHECK(current_method.Is(w1));
- codegen_->LoadCurrentMethod(current_method);
+ codegen_->LoadCurrentMethod(current_method.X());
__ Mov(type_index, instruction->GetTypeIndex());
codegen_->InvokeRuntime(
GetThreadOffset<kArm64WordSize>(instruction->GetEntrypoint()).Int32Value(),
instruction,
instruction->GetDexPc(),
nullptr);
- CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, mirror::ArtMethod*>();
+ CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
}
void LocationsBuilderARM64::VisitNot(HNot* instruction) {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index ab793a5..7a502e0 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -45,7 +45,7 @@
static constexpr size_t kParameterFPRegistersLength = arraysize(kParameterFPRegisters);
const vixl::Register tr = vixl::x19; // Thread Register
-static const vixl::Register kArtMethodRegister = vixl::w0; // Method register on invoke.
+static const vixl::Register kArtMethodRegister = vixl::x0; // Method register on invoke.
const vixl::CPURegList vixl_reserved_core_registers(vixl::ip0, vixl::ip1);
const vixl::CPURegList vixl_reserved_fp_registers(vixl::d31);
@@ -97,7 +97,8 @@
: CallingConvention(kRuntimeParameterCoreRegisters,
kRuntimeParameterCoreRegistersLength,
kRuntimeParameterFpuRegisters,
- kRuntimeParameterFpuRegistersLength) {}
+ kRuntimeParameterFpuRegistersLength,
+ kArm64PointerSize) {}
Location GetReturnLocation(Primitive::Type return_type);
@@ -111,7 +112,8 @@
: CallingConvention(kParameterCoreRegisters,
kParameterCoreRegistersLength,
kParameterFPRegisters,
- kParameterFPRegistersLength) {}
+ kParameterFPRegistersLength,
+ kArm64PointerSize) {}
Location GetReturnLocation(Primitive::Type return_type) {
return ARM64ReturnLocation(return_type);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 81c3526..8678428 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -16,6 +16,7 @@
#include "code_generator_x86.h"
+#include "art_method.h"
#include "code_generator_utils.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
@@ -23,8 +24,7 @@
#include "intrinsics.h"
#include "intrinsics_x86.h"
#include "mirror/array-inl.h"
-#include "mirror/art_method.h"
-#include "mirror/class.h"
+#include "mirror/class-inl.h"
#include "thread.h"
#include "utils/assembler.h"
#include "utils/stack_checks.h"
@@ -1276,8 +1276,8 @@
void InstructionCodeGeneratorX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
- uint32_t method_offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() +
- invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
+ uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
+ invoke->GetVTableIndex(), kX86PointerSize).Uint32Value();
LocationSummary* locations = invoke->GetLocations();
Location receiver = locations->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
@@ -1293,7 +1293,7 @@
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
__ call(Address(
- temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
+ temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
@@ -1308,8 +1308,8 @@
void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke) {
// TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
- uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
- (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
+ uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
+ invoke->GetImtIndex() % mirror::Class::kImtSize, kX86PointerSize).Uint32Value();
LocationSummary* locations = invoke->GetLocations();
Location receiver = locations->InAt(0);
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
@@ -1329,7 +1329,7 @@
// temp = temp->GetImtEntryAt(method_offset);
__ movl(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kX86WordSize).Int32Value()));
DCHECK(!codegen_->IsLeafMethod());
@@ -3219,18 +3219,19 @@
__ fs()->movl(temp, Address::Absolute(invoke->GetStringInitOffset()));
// (temp + offset_of_quick_compiled_code)()
__ call(Address(
- temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
+ temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
} else {
// temp = method;
LoadCurrentMethod(temp);
if (!invoke->IsRecursive()) {
// temp = temp->dex_cache_resolved_methods_;
- __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
+ __ movl(temp, Address(temp, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
// temp = temp[index_in_cache]
- __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex())));
+ __ movl(temp, Address(temp,
+ CodeGenerator::GetCachePointerOffset(invoke->GetDexMethodIndex())));
// (temp + offset_of_quick_compiled_code)()
__ call(Address(temp,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
+ ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value()));
} else {
__ call(GetFrameEntryLabel());
}
@@ -4302,11 +4303,11 @@
if (cls->IsReferrersClass()) {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
- __ movl(out, Address(current_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
+ __ movl(out, Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()));
} else {
DCHECK(cls->CanCallRuntime());
__ movl(out, Address(
- current_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
+ current_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
__ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86(
@@ -4363,7 +4364,7 @@
LocationSummary* locations = load->GetLocations();
Register out = locations->Out().AsRegister<Register>();
Register current_method = locations->InAt(0).AsRegister<Register>();
- __ movl(out, Address(current_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
+ __ movl(out, Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()));
__ movl(out, Address(out, mirror::Class::DexCacheStringsOffset().Int32Value()));
__ movl(out, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
__ testl(out, out);
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 28766d8..6988803 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -52,7 +52,8 @@
: CallingConvention(kRuntimeParameterCoreRegisters,
kRuntimeParameterCoreRegistersLength,
kRuntimeParameterFpuRegisters,
- kRuntimeParameterFpuRegistersLength) {}
+ kRuntimeParameterFpuRegistersLength,
+ kX86PointerSize) {}
private:
DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
@@ -64,7 +65,8 @@
kParameterCoreRegisters,
kParameterCoreRegistersLength,
kParameterFpuRegisters,
- kParameterFpuRegistersLength) {}
+ kParameterFpuRegistersLength,
+ kX86PointerSize) {}
RegisterPair GetRegisterPairAt(size_t argument_index) {
DCHECK_LT(argument_index + 1, GetNumberOfRegisters());
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index f8125c6..59a9565 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -16,14 +16,14 @@
#include "code_generator_x86_64.h"
+#include "art_method.h"
#include "code_generator_utils.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "gc/accounting/card_table.h"
#include "intrinsics.h"
#include "intrinsics_x86_64.h"
#include "mirror/array-inl.h"
-#include "mirror/art_method.h"
-#include "mirror/class.h"
+#include "mirror/class-inl.h"
#include "mirror/object_reference.h"
#include "thread.h"
#include "utils/assembler.h"
@@ -374,18 +374,19 @@
// temp = thread->string_init_entrypoint
__ gs()->movl(temp, Address::Absolute(invoke->GetStringInitOffset()));
// (temp + offset_of_quick_compiled_code)()
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kX86_64WordSize).SizeValue()));
} else {
// temp = method;
LoadCurrentMethod(temp);
if (!invoke->IsRecursive()) {
// temp = temp->dex_cache_resolved_methods_;
- __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
+ __ movl(temp, Address(temp, ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
// temp = temp[index_in_cache]
- __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex())));
+ __ movq(temp, Address(
+ temp, CodeGenerator::GetCachePointerOffset(invoke->GetDexMethodIndex())));
// (temp + offset_of_quick_compiled_code)()
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kX86_64WordSize).SizeValue()));
} else {
__ call(&frame_entry_label_);
@@ -545,7 +546,7 @@
}
}
- __ movl(Address(CpuRegister(RSP), kCurrentMethodStackOffset),
+ __ movq(Address(CpuRegister(RSP), kCurrentMethodStackOffset),
CpuRegister(kMethodRegisterArgument));
}
@@ -586,7 +587,7 @@
void CodeGeneratorX86_64::LoadCurrentMethod(CpuRegister reg) {
DCHECK(RequiresCurrentMethod());
- __ movl(reg, Address(CpuRegister(RSP), kCurrentMethodStackOffset));
+ __ movq(reg, Address(CpuRegister(RSP), kCurrentMethodStackOffset));
}
Location CodeGeneratorX86_64::GetStackLocation(HLoadLocal* load) const {
@@ -1384,8 +1385,8 @@
}
CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<CpuRegister>();
- size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() +
- invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
+ size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
+ invoke->GetVTableIndex(), kX86_64PointerSize).SizeValue();
LocationSummary* locations = invoke->GetLocations();
Location receiver = locations->InAt(0);
size_t class_offset = mirror::Object::ClassOffset().SizeValue();
@@ -1398,9 +1399,9 @@
}
codegen_->MaybeRecordImplicitNullCheck(invoke);
// temp = temp->GetMethodAt(method_offset);
- __ movl(temp, Address(temp, method_offset));
+ __ movq(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kX86_64WordSize).SizeValue()));
DCHECK(!codegen_->IsLeafMethod());
@@ -1416,8 +1417,8 @@
void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invoke) {
// TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<CpuRegister>();
- uint32_t method_offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() +
- (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
+ uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
+ invoke->GetImtIndex() % mirror::Class::kImtSize, kX86_64PointerSize).Uint32Value();
LocationSummary* locations = invoke->GetLocations();
Location receiver = locations->InAt(0);
size_t class_offset = mirror::Object::ClassOffset().SizeValue();
@@ -1435,9 +1436,9 @@
}
codegen_->MaybeRecordImplicitNullCheck(invoke);
// temp = temp->GetImtEntryAt(method_offset);
- __ movl(temp, Address(temp, method_offset));
+ __ movq(temp, Address(temp, method_offset));
// call temp->GetEntryPoint();
- __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ __ call(Address(temp, ArtMethod::EntryPointFromQuickCompiledCodeOffset(
kX86_64WordSize).SizeValue()));
DCHECK(!codegen_->IsLeafMethod());
@@ -4150,11 +4151,11 @@
if (cls->IsReferrersClass()) {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
- __ movl(out, Address(current_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
+ __ movl(out, Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()));
} else {
DCHECK(cls->CanCallRuntime());
__ movl(out, Address(
- current_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
+ current_method, ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
__ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
@@ -4201,7 +4202,7 @@
LocationSummary* locations = load->GetLocations();
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
CpuRegister current_method = locations->InAt(0).AsRegister<CpuRegister>();
- __ movl(out, Address(current_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
+ __ movl(out, Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()));
__ movl(out, Address(out, mirror::Class::DexCacheStringsOffset().Int32Value()));
__ movl(out, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
__ testl(out, out);
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index d7bd525..c74335b 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -50,7 +50,8 @@
: CallingConvention(kRuntimeParameterCoreRegisters,
kRuntimeParameterCoreRegistersLength,
kRuntimeParameterFpuRegisters,
- kRuntimeParameterFpuRegistersLength) {}
+ kRuntimeParameterFpuRegistersLength,
+ kX86_64PointerSize) {}
private:
DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
@@ -62,7 +63,8 @@
kParameterCoreRegisters,
kParameterCoreRegistersLength,
kParameterFloatRegisters,
- kParameterFloatRegistersLength) {}
+ kParameterFloatRegistersLength,
+ kX86_64PointerSize) {}
private:
DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 15f3deb..83f8d83 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -16,6 +16,7 @@
#include "inliner.h"
+#include "art_method-inl.h"
#include "builder.h"
#include "class_linker.h"
#include "constant_folding.h"
@@ -23,7 +24,6 @@
#include "driver/compiler_driver-inl.h"
#include "driver/dex_compilation_unit.h"
#include "instruction_simplifier.h"
-#include "mirror/art_method-inl.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache.h"
#include "nodes.h"
@@ -88,11 +88,10 @@
hs.NewHandle(caller_compilation_unit_.GetClassLinker()->FindDexCache(caller_dex_file)));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
soa.Decode<mirror::ClassLoader*>(caller_compilation_unit_.GetClassLoader())));
- Handle<mirror::ArtMethod> resolved_method(hs.NewHandle(
- compiler_driver_->ResolveMethod(
- soa, dex_cache, class_loader, &caller_compilation_unit_, method_index, invoke_type)));
+ ArtMethod* resolved_method(compiler_driver_->ResolveMethod(
+ soa, dex_cache, class_loader, &caller_compilation_unit_, method_index, invoke_type));
- if (resolved_method.Get() == nullptr) {
+ if (resolved_method == nullptr) {
VLOG(compiler) << "Method cannot be resolved " << PrettyMethod(method_index, caller_dex_file);
return false;
}
@@ -156,7 +155,7 @@
return true;
}
-bool HInliner::TryBuildAndInline(Handle<mirror::ArtMethod> resolved_method,
+bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
HInvoke* invoke_instruction,
uint32_t method_index,
bool same_dex_file) const {
@@ -199,6 +198,7 @@
caller_dex_file,
method_index,
requires_ctor_barrier,
+ compiler_driver_->GetInstructionSet(),
invoke_instruction->GetOriginalInvokeType(),
graph_->IsDebuggable(),
graph_->GetCurrentInstructionId());
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 09a36c6..b86c1ed 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -48,7 +48,7 @@
private:
bool TryInline(HInvoke* invoke_instruction, uint32_t method_index, InvokeType invoke_type) const;
- bool TryBuildAndInline(Handle<mirror::ArtMethod> resolved_method,
+ bool TryBuildAndInline(ArtMethod* resolved_method,
HInvoke* invoke_instruction,
uint32_t method_index,
bool same_dex_file) const;
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index e785bf9..5436ec2 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -17,11 +17,11 @@
#include "intrinsics_arm.h"
#include "arch/arm/instruction_set_features_arm.h"
+#include "art_method.h"
#include "code_generator_arm.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "intrinsics.h"
#include "mirror/array-inl.h"
-#include "mirror/art_method.h"
#include "mirror/string.h"
#include "thread.h"
#include "utils/arm/assembler_arm.h"
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 53497b6..d1dc5b3 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -17,12 +17,12 @@
#include "intrinsics_arm64.h"
#include "arch/arm64/instruction_set_features_arm64.h"
+#include "art_method.h"
#include "code_generator_arm64.h"
#include "common_arm64.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "intrinsics.h"
#include "mirror/array-inl.h"
-#include "mirror/art_method.h"
#include "mirror/string.h"
#include "thread.h"
#include "utils/arm64/assembler_arm64.h"
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index d2ca42d..5bbbc72 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -19,11 +19,11 @@
#include <limits>
#include "arch/x86/instruction_set_features_x86.h"
+#include "art_method.h"
#include "code_generator_x86.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "intrinsics.h"
#include "mirror/array-inl.h"
-#include "mirror/art_method.h"
#include "mirror/string.h"
#include "thread.h"
#include "utils/x86/assembler_x86.h"
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 2ccecfe..d6c90ff 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -19,11 +19,11 @@
#include <limits>
#include "arch/x86_64/instruction_set_features_x86_64.h"
+#include "art_method-inl.h"
#include "code_generator_x86_64.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "intrinsics.h"
#include "mirror/array-inl.h"
-#include "mirror/art_method.h"
#include "mirror/string.h"
#include "thread.h"
#include "utils/x86_64/assembler_x86_64.h"
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 06f6a7f..cd91d2c 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -297,7 +297,8 @@
HCurrentMethod* HGraph::GetCurrentMethod() {
if (cached_current_method_ == nullptr) {
- cached_current_method_ = new (arena_) HCurrentMethod();
+ cached_current_method_ = new (arena_) HCurrentMethod(
+ Is64BitInstructionSet(instruction_set_) ? Primitive::kPrimLong : Primitive::kPrimInt);
if (entry_block_->GetFirstInstruction() == nullptr) {
entry_block_->AddInstruction(cached_current_method_);
} else {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 6c2506e..6c8098b 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -126,6 +126,7 @@
const DexFile& dex_file,
uint32_t method_idx,
bool should_generate_constructor_barrier,
+ InstructionSet instruction_set,
InvokeType invoke_type = kInvalidInvokeType,
bool debuggable = false,
int start_instruction_id = 0)
@@ -147,6 +148,7 @@
invoke_type_(invoke_type),
in_ssa_form_(false),
should_generate_constructor_barrier_(should_generate_constructor_barrier),
+ instruction_set_(instruction_set),
cached_null_constant_(nullptr),
cached_int_constants_(std::less<int32_t>(), arena->Adapter()),
cached_float_constants_(std::less<int32_t>(), arena->Adapter()),
@@ -399,6 +401,8 @@
const bool should_generate_constructor_barrier_;
+ const InstructionSet instruction_set_;
+
// Cached constants.
HNullConstant* cached_null_constant_;
ArenaSafeMap<int32_t, HIntConstant*> cached_int_constants_;
@@ -1873,7 +1877,7 @@
// instructions that work with the dex cache.
class HCurrentMethod : public HExpression<0> {
public:
- HCurrentMethod() : HExpression(Primitive::kPrimNot, SideEffects::None()) {}
+ explicit HCurrentMethod(Primitive::Type type) : HExpression(type, SideEffects::None()) {}
DECLARE_INSTRUCTION(CurrentMethod);
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index 7aea249..b0d1433 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -31,7 +31,7 @@
// Run the tests only on host.
#ifndef HAVE_ANDROID_OS
-class OptimizingCFITest : public CFITest {
+class OptimizingCFITest : public CFITest {
public:
// Enable this flag to generate the expected outputs.
static constexpr bool kGenerateExpected = false;
diff --git a/compiler/optimizing/optimizing_cfi_test_expected.inc b/compiler/optimizing/optimizing_cfi_test_expected.inc
index ecb3b0a..2c2c55f 100644
--- a/compiler/optimizing/optimizing_cfi_test_expected.inc
+++ b/compiler/optimizing/optimizing_cfi_test_expected.inc
@@ -32,7 +32,7 @@
// 0x00000012: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kArm64[] = {
- 0xE0, 0x0F, 0x1C, 0xB8, 0xF4, 0xD7, 0x02, 0xA9, 0xFE, 0x1F, 0x00, 0xF9,
+ 0xE0, 0x0F, 0x1C, 0xF8, 0xF4, 0xD7, 0x02, 0xA9, 0xFE, 0x1F, 0x00, 0xF9,
0xE8, 0xA7, 0x01, 0x6D, 0xE8, 0xA7, 0x41, 0x6D, 0xF4, 0xD7, 0x42, 0xA9,
0xFE, 0x1F, 0x40, 0xF9, 0xFF, 0x03, 0x01, 0x91, 0xC0, 0x03, 0x5F, 0xD6,
};
@@ -41,7 +41,7 @@
0x05, 0x48, 0x0A, 0x05, 0x49, 0x08, 0x0A, 0x44, 0x06, 0x48, 0x06, 0x49,
0x44, 0xD4, 0xD5, 0x44, 0xDE, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0x40,
};
-// 0x00000000: str w0, [sp, #-64]!
+// 0x00000000: str x0, [sp, #-64]!
// 0x00000004: .cfi_def_cfa_offset: 64
// 0x00000004: stp x20, x21, [sp, #40]
// 0x00000008: .cfi_offset: r20 at cfa-24
@@ -99,13 +99,13 @@
static constexpr uint8_t expected_asm_kX86_64[] = {
0x55, 0x53, 0x48, 0x83, 0xEC, 0x28, 0xF2, 0x44, 0x0F, 0x11, 0x6C, 0x24,
- 0x20, 0xF2, 0x44, 0x0F, 0x11, 0x64, 0x24, 0x18, 0x89, 0x3C, 0x24, 0xF2,
- 0x44, 0x0F, 0x10, 0x64, 0x24, 0x18, 0xF2, 0x44, 0x0F, 0x10, 0x6C, 0x24,
- 0x20, 0x48, 0x83, 0xC4, 0x28, 0x5B, 0x5D, 0xC3,
+ 0x20, 0xF2, 0x44, 0x0F, 0x11, 0x64, 0x24, 0x18, 0x48, 0x89, 0x3C, 0x24,
+ 0xF2, 0x44, 0x0F, 0x10, 0x64, 0x24, 0x18, 0xF2, 0x44, 0x0F, 0x10, 0x6C,
+ 0x24, 0x20, 0x48, 0x83, 0xC4, 0x28, 0x5B, 0x5D, 0xC3,
};
static constexpr uint8_t expected_cfi_kX86_64[] = {
0x41, 0x0E, 0x10, 0x86, 0x04, 0x41, 0x0E, 0x18, 0x83, 0x06, 0x44, 0x0E,
- 0x40, 0x47, 0x9E, 0x08, 0x47, 0x9D, 0x0A, 0x43, 0x0A, 0x47, 0xDD, 0x47,
+ 0x40, 0x47, 0x9E, 0x08, 0x47, 0x9D, 0x0A, 0x44, 0x0A, 0x47, 0xDD, 0x47,
0xDE, 0x44, 0x0E, 0x18, 0x41, 0x0E, 0x10, 0xC3, 0x41, 0x0E, 0x08, 0xC6,
0x41, 0x0B, 0x0E, 0x40,
};
@@ -121,21 +121,20 @@
// 0x0000000d: .cfi_offset: r30 at cfa-32
// 0x0000000d: movsd [rsp + 24], xmm12
// 0x00000014: .cfi_offset: r29 at cfa-40
-// 0x00000014: mov [rsp], edi
-// 0x00000017: .cfi_remember_state
-// 0x00000017: movsd xmm12, [rsp + 24]
-// 0x0000001e: .cfi_restore: r29
-// 0x0000001e: movsd xmm13, [rsp + 32]
-// 0x00000025: .cfi_restore: r30
-// 0x00000025: addq rsp, 40
-// 0x00000029: .cfi_def_cfa_offset: 24
-// 0x00000029: pop rbx
-// 0x0000002a: .cfi_def_cfa_offset: 16
-// 0x0000002a: .cfi_restore: r3
-// 0x0000002a: pop rbp
-// 0x0000002b: .cfi_def_cfa_offset: 8
-// 0x0000002b: .cfi_restore: r6
-// 0x0000002b: ret
-// 0x0000002c: .cfi_restore_state
-// 0x0000002c: .cfi_def_cfa_offset: 64
-
+// 0x00000014: movq [rsp], rdi
+// 0x00000018: .cfi_remember_state
+// 0x00000018: movsd xmm12, [rsp + 24]
+// 0x0000001f: .cfi_restore: r29
+// 0x0000001f: movsd xmm13, [rsp + 32]
+// 0x00000026: .cfi_restore: r30
+// 0x00000026: addq rsp, 40
+// 0x0000002a: .cfi_def_cfa_offset: 24
+// 0x0000002a: pop rbx
+// 0x0000002b: .cfi_def_cfa_offset: 16
+// 0x0000002b: .cfi_restore: r3
+// 0x0000002b: pop rbp
+// 0x0000002c: .cfi_def_cfa_offset: 8
+// 0x0000002c: .cfi_restore: r6
+// 0x0000002c: ret
+// 0x0000002d: .cfi_restore_state
+// 0x0000002d: .cfi_def_cfa_offset: 64
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 3123843..f6ef2f7 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -19,6 +19,7 @@
#include <fstream>
#include <stdint.h>
+#include "art_method-inl.h"
#include "base/arena_allocator.h"
#include "base/dumpable.h"
#include "base/timing_logger.h"
@@ -44,7 +45,6 @@
#include "intrinsics.h"
#include "licm.h"
#include "jni/quick/jni_compiler.h"
-#include "mirror/art_method-inl.h"
#include "nodes.h"
#include "prepare_for_register_allocation.h"
#include "reference_type_propagation.h"
@@ -196,7 +196,7 @@
return ArtQuickJniCompileMethod(GetCompilerDriver(), access_flags, method_idx, dex_file);
}
- uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const OVERRIDE
+ uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet())));
@@ -515,8 +515,8 @@
dex_compilation_unit.GetClassDefIndex());
ArenaAllocator arena(Runtime::Current()->GetArenaPool());
HGraph* graph = new (&arena) HGraph(
- &arena, dex_file, method_idx, requires_barrier, kInvalidInvokeType,
- compiler_driver->GetCompilerOptions().GetDebuggable());
+ &arena, dex_file, method_idx, requires_barrier, compiler_driver->GetInstructionSet(),
+ kInvalidInvokeType, compiler_driver->GetCompilerOptions().GetDebuggable());
// For testing purposes, we put a special marker on method names that should be compiled
// with this compiler. This makes sure we're not regressing.
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 1fe9346..86c22ed 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -74,7 +74,8 @@
inline HGraph* CreateGraph(ArenaAllocator* allocator) {
return new (allocator) HGraph(
- allocator, *reinterpret_cast<DexFile*>(allocator->Alloc(sizeof(DexFile))), -1, false);
+ allocator, *reinterpret_cast<DexFile*>(allocator->Alloc(sizeof(DexFile))), -1, false,
+ kRuntimeISA);
}
// Create a control-flow graph from Dex instructions.
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index f7d2ed1..4f1f457 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -16,7 +16,7 @@
#include "reference_type_propagation.h"
-#include "class_linker.h"
+#include "class_linker-inl.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
#include "scoped_thread_state_change.h"
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 9a859bf..5e784d7 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -71,7 +71,9 @@
physical_fp_register_intervals_.SetSize(codegen->GetNumberOfFloatingPointRegisters());
// Always reserve for the current method and the graph's max out registers.
// TODO: compute it instead.
- reserved_out_slots_ = 1 + codegen->GetGraph()->GetMaximumNumberOfOutVRegs();
+ // ArtMethod* takes 2 vregs for 64 bits.
+ reserved_out_slots_ = InstructionSetPointerSize(codegen->GetInstructionSet()) / kVRegSize +
+ codegen->GetGraph()->GetMaximumNumberOfOutVRegs();
}
bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph ATTRIBUTE_UNUSED,