summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Calin Juravle <calin@google.com> 2015-09-29 04:52:17 +0100
committer Calin Juravle <calin@google.com> 2015-10-02 02:25:18 +0100
commite460d1df1f789c7c8bb97024a8efbd713ac175e9 (patch)
tree3511036fb18828dd0ee140d33a8bcd0535ebeab6
parent25217af2a7cae96b32ba566aaf697288f3374c99 (diff)
Revert "Revert "Support unresolved fields in optimizing"
The CL also changes the calling convetion for 64bit static field set to use kArg2 instead of kArg1. This allows optimizing to keep the asumptions: - arm pairs are always of form (even_reg, odd_reg) - ecx_edx is not used as a register on x86. This reverts commit e6f49b47b6a4dc9c7684e4483757872cfc7ff1a1. Change-Id: I93159917565824084abc96775f31be1a4249f2f3
-rwxr-xr-xcompiler/dex/quick/gen_invoke.cc2
-rw-r--r--compiler/optimizing/builder.cc95
-rw-r--r--compiler/optimizing/code_generator.cc124
-rw-r--r--compiler/optimizing/code_generator.h31
-rw-r--r--compiler/optimizing/code_generator_arm.cc95
-rw-r--r--compiler/optimizing/code_generator_arm.h35
-rw-r--r--compiler/optimizing/code_generator_arm64.cc132
-rw-r--r--compiler/optimizing/code_generator_arm64.h36
-rw-r--r--compiler/optimizing/code_generator_mips64.cc128
-rw-r--r--compiler/optimizing/code_generator_mips64.h31
-rw-r--r--compiler/optimizing/code_generator_x86.cc101
-rw-r--r--compiler/optimizing/code_generator_x86.h33
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc81
-rw-r--r--compiler/optimizing/code_generator_x86_64.h32
-rw-r--r--compiler/optimizing/graph_visualizer.cc16
-rw-r--r--compiler/optimizing/nodes.h110
-rw-r--r--compiler/optimizing/optimizing_compiler_stats.h4
-rw-r--r--compiler/optimizing/reference_type_propagation.cc18
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S8
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S3
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S2
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S11
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S2
-rw-r--r--test/529-checker-unresolved/src/Main.java114
-rw-r--r--test/529-checker-unresolved/src/Unresolved.java16
25 files changed, 1170 insertions, 90 deletions
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 1f114cf336..3c5c2fe010 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -148,7 +148,7 @@ void Mir2Lir::CallRuntimeHelperImmRegLocation(QuickEntrypointEnum trampoline, in
if (arg1.wide == 0) {
LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
} else {
- RegStorage r_tmp = TargetReg(cu_->instruction_set == kMips ? kArg2 : kArg1, kWide);
+ RegStorage r_tmp = TargetReg(kArg2, kWide);
LoadValueDirectWideFixed(arg1, r_tmp);
}
LoadConstant(TargetReg(kArg0, kNotWide), arg0);
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index cb36f62235..7ef79ec111 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -1186,6 +1186,12 @@ void HGraphBuilder::PotentiallySimplifyFakeString(uint16_t original_dex_register
}
}
+static Primitive::Type GetFieldAccessType(const DexFile& dex_file, uint16_t field_index) {
+ const DexFile::FieldId& field_id = dex_file.GetFieldId(field_index);
+ const char* type = dex_file.GetFieldTypeDescriptor(field_id);
+ return Primitive::GetType(type[0]);
+}
+
bool HGraphBuilder::BuildInstanceFieldAccess(const Instruction& instruction,
uint32_t dex_pc,
bool is_put) {
@@ -1205,44 +1211,61 @@ bool HGraphBuilder::BuildInstanceFieldAccess(const Instruction& instruction,
ArtField* resolved_field =
compiler_driver_->ComputeInstanceFieldInfo(field_index, dex_compilation_unit_, is_put, soa);
- if (resolved_field == nullptr) {
- MaybeRecordStat(MethodCompilationStat::kNotCompiledUnresolvedField);
- return false;
- }
-
- Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType();
HInstruction* object = LoadLocal(obj_reg, Primitive::kPrimNot, dex_pc);
- current_block_->AddInstruction(new (arena_) HNullCheck(object, dex_pc));
+ HInstruction* null_check = new (arena_) HNullCheck(object, dex_pc);
+ current_block_->AddInstruction(null_check);
+
+ Primitive::Type field_type = (resolved_field == nullptr)
+ ? GetFieldAccessType(*dex_file_, field_index)
+ : resolved_field->GetTypeAsPrimitiveType();
if (is_put) {
Temporaries temps(graph_);
- HInstruction* null_check = current_block_->GetLastInstruction();
// We need one temporary for the null check.
temps.Add(null_check);
HInstruction* value = LoadLocal(source_or_dest_reg, field_type, dex_pc);
- current_block_->AddInstruction(new (arena_) HInstanceFieldSet(
- null_check,
- value,
- field_type,
- resolved_field->GetOffset(),
- resolved_field->IsVolatile(),
- field_index,
- *dex_file_,
- dex_compilation_unit_->GetDexCache(),
- dex_pc));
+ HInstruction* field_set = nullptr;
+ if (resolved_field == nullptr) {
+ MaybeRecordStat(MethodCompilationStat::kUnresolvedField);
+ field_set = new (arena_) HUnresolvedInstanceFieldSet(null_check,
+ value,
+ field_type,
+ field_index,
+ dex_pc);
+ } else {
+ field_set = new (arena_) HInstanceFieldSet(null_check,
+ value,
+ field_type,
+ resolved_field->GetOffset(),
+ resolved_field->IsVolatile(),
+ field_index,
+ *dex_file_,
+ dex_compilation_unit_->GetDexCache(),
+ dex_pc);
+ }
+ current_block_->AddInstruction(field_set);
} else {
- current_block_->AddInstruction(new (arena_) HInstanceFieldGet(
- current_block_->GetLastInstruction(),
- field_type,
- resolved_field->GetOffset(),
- resolved_field->IsVolatile(),
- field_index,
- *dex_file_,
- dex_compilation_unit_->GetDexCache(),
- dex_pc));
-
- UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction(), dex_pc);
+ HInstruction* field_get = nullptr;
+ if (resolved_field == nullptr) {
+ MaybeRecordStat(MethodCompilationStat::kUnresolvedField);
+ field_get = new (arena_) HUnresolvedInstanceFieldGet(null_check,
+ field_type,
+ field_index,
+ dex_pc);
+ } else {
+ field_get = new (arena_) HInstanceFieldGet(null_check,
+ field_type,
+ resolved_field->GetOffset(),
+ resolved_field->IsVolatile(),
+ field_index,
+ *dex_file_,
+ dex_compilation_unit_->GetDexCache(),
+ dex_pc);
+ }
+ current_block_->AddInstruction(field_get);
+ UpdateLocal(source_or_dest_reg, field_get, dex_pc);
}
+
return true;
}
@@ -1299,8 +1322,18 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
soa, dex_cache, class_loader, dex_compilation_unit_, field_index, true);
if (resolved_field == nullptr) {
- MaybeRecordStat(MethodCompilationStat::kNotCompiledUnresolvedField);
- return false;
+ MaybeRecordStat(MethodCompilationStat::kUnresolvedField);
+ Primitive::Type field_type = GetFieldAccessType(*dex_file_, field_index);
+ if (is_put) {
+ HInstruction* value = LoadLocal(source_or_dest_reg, field_type, dex_pc);
+ current_block_->AddInstruction(
+ new (arena_) HUnresolvedStaticFieldSet(value, field_type, field_index, dex_pc));
+ } else {
+ current_block_->AddInstruction(
+ new (arena_) HUnresolvedStaticFieldGet(field_type, field_index, dex_pc));
+ UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction(), dex_pc);
+ }
+ return true;
}
const DexFile& outer_dex_file = *outer_compilation_unit_->GetDexFile();
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index be05691741..8254277f96 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -413,6 +413,130 @@ void CodeGenerator::GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invok
InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
}
+void CodeGenerator::CreateUnresolvedFieldLocationSummary(
+ HInstruction* field_access,
+ Primitive::Type field_type,
+ const FieldAccessCallingConvention& calling_convention) {
+ bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
+ || field_access->IsUnresolvedInstanceFieldSet();
+ bool is_get = field_access->IsUnresolvedInstanceFieldGet()
+ || field_access->IsUnresolvedStaticFieldGet();
+
+ ArenaAllocator* allocator = field_access->GetBlock()->GetGraph()->GetArena();
+ LocationSummary* locations =
+ new (allocator) LocationSummary(field_access, LocationSummary::kCall);
+
+ locations->AddTemp(calling_convention.GetFieldIndexLocation());
+
+ if (is_instance) {
+ // Add the `this` object for instance field accesses.
+ locations->SetInAt(0, calling_convention.GetObjectLocation());
+ }
+
+ // Note that pSetXXStatic/pGetXXStatic always takes/returns an int or int64
+ // regardless of the the type. Because of that we forced to special case
+ // the access to floating point values.
+ if (is_get) {
+ if (Primitive::IsFloatingPointType(field_type)) {
+ // The return value will be stored in regular registers while register
+ // allocator expects it in a floating point register.
+ // Note We don't need to request additional temps because the return
+ // register(s) are already blocked due the call and they may overlap with
+ // the input or field index.
+ // The transfer between the two will be done at codegen level.
+ locations->SetOut(calling_convention.GetFpuLocation(field_type));
+ } else {
+ locations->SetOut(calling_convention.GetReturnLocation(field_type));
+ }
+ } else {
+ size_t set_index = is_instance ? 1 : 0;
+ if (Primitive::IsFloatingPointType(field_type)) {
+ // The set value comes from a float location while the calling convention
+ // expects it in a regular register location. Allocate a temp for it and
+ // make the transfer at codegen.
+ AddLocationAsTemp(calling_convention.GetSetValueLocation(field_type, is_instance), locations);
+ locations->SetInAt(set_index, calling_convention.GetFpuLocation(field_type));
+ } else {
+ locations->SetInAt(set_index,
+ calling_convention.GetSetValueLocation(field_type, is_instance));
+ }
+ }
+}
+
+void CodeGenerator::GenerateUnresolvedFieldAccess(
+ HInstruction* field_access,
+ Primitive::Type field_type,
+ uint32_t field_index,
+ uint32_t dex_pc,
+ const FieldAccessCallingConvention& calling_convention) {
+ LocationSummary* locations = field_access->GetLocations();
+
+ MoveConstant(locations->GetTemp(0), field_index);
+
+ bool is_instance = field_access->IsUnresolvedInstanceFieldGet()
+ || field_access->IsUnresolvedInstanceFieldSet();
+ bool is_get = field_access->IsUnresolvedInstanceFieldGet()
+ || field_access->IsUnresolvedStaticFieldGet();
+
+ if (!is_get && Primitive::IsFloatingPointType(field_type)) {
+ // Copy the float value to be set into the calling convention register.
+ // Note that using directly the temp location is problematic as we don't
+ // support temp register pairs. To avoid boilerplate conversion code, use
+ // the location from the calling convention.
+ MoveLocation(calling_convention.GetSetValueLocation(field_type, is_instance),
+ locations->InAt(is_instance ? 1 : 0),
+ (Primitive::Is64BitType(field_type) ? Primitive::kPrimLong : Primitive::kPrimInt));
+ }
+
+ QuickEntrypointEnum entrypoint = kQuickSet8Static; // Initialize to anything to avoid warnings.
+ switch (field_type) {
+ case Primitive::kPrimBoolean:
+ entrypoint = is_instance
+ ? (is_get ? kQuickGetBooleanInstance : kQuickSet8Instance)
+ : (is_get ? kQuickGetBooleanStatic : kQuickSet8Static);
+ break;
+ case Primitive::kPrimByte:
+ entrypoint = is_instance
+ ? (is_get ? kQuickGetByteInstance : kQuickSet8Instance)
+ : (is_get ? kQuickGetByteStatic : kQuickSet8Static);
+ break;
+ case Primitive::kPrimShort:
+ entrypoint = is_instance
+ ? (is_get ? kQuickGetShortInstance : kQuickSet16Instance)
+ : (is_get ? kQuickGetShortStatic : kQuickSet16Static);
+ break;
+ case Primitive::kPrimChar:
+ entrypoint = is_instance
+ ? (is_get ? kQuickGetCharInstance : kQuickSet16Instance)
+ : (is_get ? kQuickGetCharStatic : kQuickSet16Static);
+ break;
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ entrypoint = is_instance
+ ? (is_get ? kQuickGet32Instance : kQuickSet32Instance)
+ : (is_get ? kQuickGet32Static : kQuickSet32Static);
+ break;
+ case Primitive::kPrimNot:
+ entrypoint = is_instance
+ ? (is_get ? kQuickGetObjInstance : kQuickSetObjInstance)
+ : (is_get ? kQuickGetObjStatic : kQuickSetObjStatic);
+ break;
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ entrypoint = is_instance
+ ? (is_get ? kQuickGet64Instance : kQuickSet64Instance)
+ : (is_get ? kQuickGet64Static : kQuickSet64Static);
+ break;
+ default:
+ LOG(FATAL) << "Invalid type " << field_type;
+ }
+ InvokeRuntime(entrypoint, field_access, dex_pc, nullptr);
+
+ if (is_get && Primitive::IsFloatingPointType(field_type)) {
+ MoveLocation(locations->Out(), calling_convention.GetReturnLocation(field_type), field_type);
+ }
+}
+
void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const {
// The DCHECKS below check that a register is not specified twice in
// the summary. The out location can overlap with an input, so we need
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 5da0e59187..a3ebc43f11 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -143,6 +143,22 @@ class InvokeDexCallingConventionVisitor {
DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitor);
};
+class FieldAccessCallingConvention {
+ public:
+ virtual Location GetObjectLocation() const = 0;
+ virtual Location GetFieldIndexLocation() const = 0;
+ virtual Location GetReturnLocation(Primitive::Type type) const = 0;
+ virtual Location GetSetValueLocation(Primitive::Type type, bool is_instance) const = 0;
+ virtual Location GetFpuLocation(Primitive::Type type) const = 0;
+ virtual ~FieldAccessCallingConvention() {}
+
+ protected:
+ FieldAccessCallingConvention() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConvention);
+};
+
class CodeGenerator {
public:
// Compiles the graph to executable instructions. Returns whether the compilation
@@ -177,6 +193,9 @@ class CodeGenerator {
virtual void Bind(HBasicBlock* block) = 0;
virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) = 0;
virtual void MoveConstant(Location destination, int32_t value) = 0;
+ virtual void MoveLocation(Location dst, Location src, Primitive::Type dst_type) = 0;
+ virtual void AddLocationAsTemp(Location location, LocationSummary* locations) = 0;
+
virtual Assembler* GetAssembler() = 0;
virtual const Assembler& GetAssembler() const = 0;
virtual size_t GetWordSize() const = 0;
@@ -385,6 +404,18 @@ class CodeGenerator {
void GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke);
+ void CreateUnresolvedFieldLocationSummary(
+ HInstruction* field_access,
+ Primitive::Type field_type,
+ const FieldAccessCallingConvention& calling_convention);
+
+ void GenerateUnresolvedFieldAccess(
+ HInstruction* field_access,
+ Primitive::Type field_type,
+ uint32_t field_index,
+ uint32_t dex_pc,
+ const FieldAccessCallingConvention& calling_convention);
+
void SetDisassemblyInformation(DisassemblyInformation* info) { disasm_info_ = info; }
DisassemblyInformation* GetDisassemblyInformation() const { return disasm_info_; }
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index a7dbb53382..cf7f5f4e08 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -906,6 +906,10 @@ void CodeGeneratorARM::Move64(Location destination, Location source) {
Primitive::kPrimInt);
} else if (source.IsFpuRegister()) {
UNIMPLEMENTED(FATAL);
+ } else if (source.IsFpuRegisterPair()) {
+ __ vmovrrd(destination.AsRegisterPairLow<Register>(),
+ destination.AsRegisterPairHigh<Register>(),
+ FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()));
} else {
DCHECK(source.IsDoubleStackSlot());
DCHECK(ExpectedPairLayout(destination));
@@ -917,6 +921,10 @@ void CodeGeneratorARM::Move64(Location destination, Location source) {
__ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
SP,
source.GetStackIndex());
+ } else if (source.IsRegisterPair()) {
+ __ vmovdrr(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()),
+ source.AsRegisterPairLow<Register>(),
+ source.AsRegisterPairHigh<Register>());
} else {
UNIMPLEMENTED(FATAL);
}
@@ -1038,6 +1046,25 @@ void CodeGeneratorARM::MoveConstant(Location location, int32_t value) {
__ LoadImmediate(location.AsRegister<Register>(), value);
}
+void CodeGeneratorARM::MoveLocation(Location dst, Location src, Primitive::Type dst_type) {
+ if (Primitive::Is64BitType(dst_type)) {
+ Move64(dst, src);
+ } else {
+ Move32(dst, src);
+ }
+}
+
+void CodeGeneratorARM::AddLocationAsTemp(Location location, LocationSummary* locations) {
+ if (location.IsRegister()) {
+ locations->AddTemp(location);
+ } else if (location.IsRegisterPair()) {
+ locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairLow<Register>()));
+ locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairHigh<Register>()));
+ } else {
+ UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
+ }
+}
+
void CodeGeneratorARM::InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
@@ -3605,6 +3632,74 @@ void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instructi
HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
}
+void LocationsBuilderARM::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionARM calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionARM calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderARM::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionARM calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionARM calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderARM::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionARM calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionARM calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderARM::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionARM calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionARM calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) {
LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
? LocationSummary::kCallOnSlowPath
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 111112e9b2..16d1d383b4 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -96,6 +96,38 @@ class InvokeDexCallingConventionVisitorARM : public InvokeDexCallingConventionVi
DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorARM);
};
+class FieldAccessCallingConventionARM : public FieldAccessCallingConvention {
+ public:
+ FieldAccessCallingConventionARM() {}
+
+ Location GetObjectLocation() const OVERRIDE {
+ return Location::RegisterLocation(R1);
+ }
+ Location GetFieldIndexLocation() const OVERRIDE {
+ return Location::RegisterLocation(R0);
+ }
+ Location GetReturnLocation(Primitive::Type type) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? Location::RegisterPairLocation(R0, R1)
+ : Location::RegisterLocation(R0);
+ }
+ Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? Location::RegisterPairLocation(R2, R3)
+ : (is_instance
+ ? Location::RegisterLocation(R2)
+ : Location::RegisterLocation(R1));
+ }
+ Location GetFpuLocation(Primitive::Type type) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? Location::FpuRegisterPairLocation(S0, S1)
+ : Location::FpuRegisterLocation(S0);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionARM);
+};
+
class ParallelMoveResolverARM : public ParallelMoveResolverWithSwap {
public:
ParallelMoveResolverARM(ArenaAllocator* allocator, CodeGeneratorARM* codegen)
@@ -225,6 +257,9 @@ class CodeGeneratorARM : public CodeGenerator {
void Bind(HBasicBlock* block) OVERRIDE;
void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
void MoveConstant(Location destination, int32_t value) OVERRIDE;
+ void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+
size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 78ecfdec10..af5bbaae3d 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -19,7 +19,6 @@
#include "arch/arm64/instruction_set_features_arm64.h"
#include "art_method.h"
#include "code_generator_utils.h"
-#include "common_arm64.h"
#include "compiled_method.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
@@ -666,7 +665,7 @@ void ParallelMoveResolverARM64::FreeScratchLocation(Location loc) {
void ParallelMoveResolverARM64::EmitMove(size_t index) {
DCHECK_LT(index, moves_.size());
MoveOperands* move = moves_[index];
- codegen_->MoveLocation(move->GetDestination(), move->GetSource());
+ codegen_->MoveLocation(move->GetDestination(), move->GetSource(), Primitive::kPrimVoid);
}
void CodeGeneratorARM64::GenerateFrameEntry() {
@@ -750,7 +749,9 @@ void CodeGeneratorARM64::Move(HInstruction* instruction,
}
if (instruction->IsCurrentMethod()) {
- MoveLocation(location, Location::DoubleStackSlot(kCurrentMethodStackOffset));
+ MoveLocation(location,
+ Location::DoubleStackSlot(kCurrentMethodStackOffset),
+ Primitive::kPrimVoid);
} else if (locations != nullptr && locations->Out().Equals(location)) {
return;
} else if (instruction->IsIntConstant()
@@ -793,6 +794,14 @@ void CodeGeneratorARM64::MoveConstant(Location location, int32_t value) {
__ Mov(RegisterFrom(location, Primitive::kPrimInt), value);
}
+void CodeGeneratorARM64::AddLocationAsTemp(Location location, LocationSummary* locations) {
+ if (location.IsRegister()) {
+ locations->AddTemp(location);
+ } else {
+ UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
+ }
+}
+
Location CodeGeneratorARM64::GetStackLocation(HLoadLocal* load) const {
Primitive::Type type = load->GetType();
@@ -943,7 +952,9 @@ static bool CoherentConstantAndType(Location constant, Primitive::Type type) {
(cst->IsDoubleConstant() && type == Primitive::kPrimDouble);
}
-void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Primitive::Type type) {
+void CodeGeneratorARM64::MoveLocation(Location destination,
+ Location source,
+ Primitive::Type dst_type) {
if (source.Equals(destination)) {
return;
}
@@ -952,7 +963,7 @@ void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Pri
// locations. When moving from and to a register, the argument type can be
// used to generate 32bit instead of 64bit moves. In debug mode we also
// checks the coherency of the locations and the type.
- bool unspecified_type = (type == Primitive::kPrimVoid);
+ bool unspecified_type = (dst_type == Primitive::kPrimVoid);
if (destination.IsRegister() || destination.IsFpuRegister()) {
if (unspecified_type) {
@@ -962,30 +973,44 @@ void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Pri
|| src_cst->IsFloatConstant()
|| src_cst->IsNullConstant()))) {
// For stack slots and 32bit constants, a 64bit type is appropriate.
- type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
+ dst_type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
} else {
// If the source is a double stack slot or a 64bit constant, a 64bit
// type is appropriate. Else the source is a register, and since the
// type has not been specified, we chose a 64bit type to force a 64bit
// move.
- type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
+ dst_type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
}
}
- DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(type)) ||
- (destination.IsRegister() && !Primitive::IsFloatingPointType(type)));
- CPURegister dst = CPURegisterFrom(destination, type);
+ DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(dst_type)) ||
+ (destination.IsRegister() && !Primitive::IsFloatingPointType(dst_type)));
+ CPURegister dst = CPURegisterFrom(destination, dst_type);
if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot());
__ Ldr(dst, StackOperandFrom(source));
} else if (source.IsConstant()) {
- DCHECK(CoherentConstantAndType(source, type));
+ DCHECK(CoherentConstantAndType(source, dst_type));
MoveConstant(dst, source.GetConstant());
+ } else if (source.IsRegister()) {
+ if (destination.IsRegister()) {
+ __ Mov(Register(dst), RegisterFrom(source, dst_type));
+ } else {
+ DCHECK(destination.IsFpuRegister());
+ Primitive::Type source_type = Primitive::Is64BitType(dst_type)
+ ? Primitive::kPrimLong
+ : Primitive::kPrimInt;
+ __ Fmov(FPRegisterFrom(destination, dst_type), RegisterFrom(source, source_type));
+ }
} else {
+ DCHECK(source.IsFpuRegister());
if (destination.IsRegister()) {
- __ Mov(Register(dst), RegisterFrom(source, type));
+ Primitive::Type source_type = Primitive::Is64BitType(dst_type)
+ ? Primitive::kPrimDouble
+ : Primitive::kPrimFloat;
+ __ Fmov(RegisterFrom(destination, dst_type), FPRegisterFrom(source, source_type));
} else {
DCHECK(destination.IsFpuRegister());
- __ Fmov(FPRegister(dst), FPRegisterFrom(source, type));
+ __ Fmov(FPRegister(dst), FPRegisterFrom(source, dst_type));
}
}
} else { // The destination is not a register. It must be a stack slot.
@@ -993,16 +1018,17 @@ void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Pri
if (source.IsRegister() || source.IsFpuRegister()) {
if (unspecified_type) {
if (source.IsRegister()) {
- type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
+ dst_type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
} else {
- type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
+ dst_type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
}
}
- DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(type)) &&
- (source.IsFpuRegister() == Primitive::IsFloatingPointType(type)));
- __ Str(CPURegisterFrom(source, type), StackOperandFrom(destination));
+ DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(dst_type)) &&
+ (source.IsFpuRegister() == Primitive::IsFloatingPointType(dst_type)));
+ __ Str(CPURegisterFrom(source, dst_type), StackOperandFrom(destination));
} else if (source.IsConstant()) {
- DCHECK(unspecified_type || CoherentConstantAndType(source, type)) << source << " " << type;
+ DCHECK(unspecified_type || CoherentConstantAndType(source, dst_type))
+ << source << " " << dst_type;
UseScratchRegisterScope temps(GetVIXLAssembler());
HConstant* src_cst = source.GetConstant();
CPURegister temp;
@@ -3508,6 +3534,74 @@ void InstructionCodeGeneratorARM64::VisitStaticFieldSet(HStaticFieldSet* instruc
HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
}
+void LocationsBuilderARM64::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionARM64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM64::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionARM64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderARM64::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionARM64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM64::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionARM64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderARM64::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionARM64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM64::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionARM64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderARM64::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionARM64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorARM64::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionARM64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
}
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 7178081bf8..a068b48797 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
#include "code_generator.h"
+#include "common_arm64.h"
#include "dex/compiler_enums.h"
#include "driver/compiler_options.h"
#include "nodes.h"
@@ -141,6 +142,34 @@ class InvokeDexCallingConventionVisitorARM64 : public InvokeDexCallingConvention
DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorARM64);
};
+class FieldAccessCallingConventionARM64 : public FieldAccessCallingConvention {
+ public:
+ FieldAccessCallingConventionARM64() {}
+
+ Location GetObjectLocation() const OVERRIDE {
+ return helpers::LocationFrom(vixl::x1);
+ }
+ Location GetFieldIndexLocation() const OVERRIDE {
+ return helpers::LocationFrom(vixl::x0);
+ }
+ Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ return helpers::LocationFrom(vixl::x0);
+ }
+ Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? helpers::LocationFrom(vixl::x2)
+ : (is_instance
+ ? helpers::LocationFrom(vixl::x2)
+ : helpers::LocationFrom(vixl::x1));
+ }
+ Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ return helpers::LocationFrom(vixl::d0);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionARM64);
+};
+
class InstructionCodeGeneratorARM64 : public HGraphVisitor {
public:
InstructionCodeGeneratorARM64(HGraph* graph, CodeGeneratorARM64* codegen);
@@ -334,10 +363,9 @@ class CodeGeneratorARM64 : public CodeGenerator {
// Code generation helpers.
void MoveConstant(vixl::CPURegister destination, HConstant* constant);
void MoveConstant(Location destination, int32_t value) OVERRIDE;
- // The type is optional. When specified it must be coherent with the
- // locations, and is used for optimisation and debugging.
- void MoveLocation(Location destination, Location source,
- Primitive::Type type = Primitive::kPrimVoid);
+ void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+
void Load(Primitive::Type type, vixl::CPURegister dst, const vixl::MemOperand& src);
void Store(Primitive::Type type, vixl::CPURegister rt, const vixl::MemOperand& dst);
void LoadAcquire(HInstruction* instruction, vixl::CPURegister dst, const vixl::MemOperand& src);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index ad0a39c753..e95d283c1a 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -617,7 +617,7 @@ void CodeGeneratorMIPS64::Bind(HBasicBlock* block) {
void CodeGeneratorMIPS64::MoveLocation(Location destination,
Location source,
- Primitive::Type type) {
+ Primitive::Type dst_type) {
if (source.Equals(destination)) {
return;
}
@@ -625,7 +625,7 @@ void CodeGeneratorMIPS64::MoveLocation(Location destination,
// A valid move can always be inferred from the destination and source
// locations. When moving from and to a register, the argument type can be
// used to generate 32bit instead of 64bit moves.
- bool unspecified_type = (type == Primitive::kPrimVoid);
+ bool unspecified_type = (dst_type == Primitive::kPrimVoid);
DCHECK_EQ(unspecified_type, false);
if (destination.IsRegister() || destination.IsFpuRegister()) {
@@ -636,21 +636,21 @@ void CodeGeneratorMIPS64::MoveLocation(Location destination,
|| src_cst->IsFloatConstant()
|| src_cst->IsNullConstant()))) {
// For stack slots and 32bit constants, a 64bit type is appropriate.
- type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
+ dst_type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
} else {
// If the source is a double stack slot or a 64bit constant, a 64bit
// type is appropriate. Else the source is a register, and since the
// type has not been specified, we chose a 64bit type to force a 64bit
// move.
- type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
+ dst_type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
}
}
- DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(type)) ||
- (destination.IsRegister() && !Primitive::IsFloatingPointType(type)));
+ DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(dst_type)) ||
+ (destination.IsRegister() && !Primitive::IsFloatingPointType(dst_type)));
if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
// Move to GPR/FPR from stack
LoadOperandType load_type = source.IsStackSlot() ? kLoadWord : kLoadDoubleword;
- if (Primitive::IsFloatingPointType(type)) {
+ if (Primitive::IsFloatingPointType(dst_type)) {
__ LoadFpuFromOffset(load_type,
destination.AsFpuRegister<FpuRegister>(),
SP,
@@ -665,31 +665,47 @@ void CodeGeneratorMIPS64::MoveLocation(Location destination,
} else if (source.IsConstant()) {
// Move to GPR/FPR from constant
GpuRegister gpr = AT;
- if (!Primitive::IsFloatingPointType(type)) {
+ if (!Primitive::IsFloatingPointType(dst_type)) {
gpr = destination.AsRegister<GpuRegister>();
}
- if (type == Primitive::kPrimInt || type == Primitive::kPrimFloat) {
+ if (dst_type == Primitive::kPrimInt || dst_type == Primitive::kPrimFloat) {
__ LoadConst32(gpr, GetInt32ValueOf(source.GetConstant()->AsConstant()));
} else {
__ LoadConst64(gpr, GetInt64ValueOf(source.GetConstant()->AsConstant()));
}
- if (type == Primitive::kPrimFloat) {
+ if (dst_type == Primitive::kPrimFloat) {
__ Mtc1(gpr, destination.AsFpuRegister<FpuRegister>());
- } else if (type == Primitive::kPrimDouble) {
+ } else if (dst_type == Primitive::kPrimDouble) {
__ Dmtc1(gpr, destination.AsFpuRegister<FpuRegister>());
}
- } else {
+ } else if (source.IsRegister()) {
if (destination.IsRegister()) {
// Move to GPR from GPR
__ Move(destination.AsRegister<GpuRegister>(), source.AsRegister<GpuRegister>());
} else {
+ DCHECK(destination.IsFpuRegister());
+ if (Primitive::Is64BitType(dst_type)) {
+ __ Dmtc1(source.AsRegister<GpuRegister>(), destination.AsFpuRegister<FpuRegister>());
+ } else {
+ __ Mtc1(source.AsRegister<GpuRegister>(), destination.AsFpuRegister<FpuRegister>());
+ }
+ }
+ } else if (source.IsFpuRegister()) {
+ if (destination.IsFpuRegister()) {
// Move to FPR from FPR
- if (type == Primitive::kPrimFloat) {
+ if (dst_type == Primitive::kPrimFloat) {
__ MovS(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
} else {
- DCHECK_EQ(type, Primitive::kPrimDouble);
+ DCHECK_EQ(dst_type, Primitive::kPrimDouble);
__ MovD(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
}
+ } else {
+ DCHECK(destination.IsRegister());
+ if (Primitive::Is64BitType(dst_type)) {
+ __ Dmfc1(destination.AsRegister<GpuRegister>(), source.AsFpuRegister<FpuRegister>());
+ } else {
+ __ Mfc1(destination.AsRegister<GpuRegister>(), source.AsFpuRegister<FpuRegister>());
+ }
}
}
} else { // The destination is not a register. It must be a stack slot.
@@ -697,13 +713,13 @@ void CodeGeneratorMIPS64::MoveLocation(Location destination,
if (source.IsRegister() || source.IsFpuRegister()) {
if (unspecified_type) {
if (source.IsRegister()) {
- type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
+ dst_type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
} else {
- type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
+ dst_type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
}
}
- DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(type)) &&
- (source.IsFpuRegister() == Primitive::IsFloatingPointType(type)));
+ DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(dst_type)) &&
+ (source.IsFpuRegister() == Primitive::IsFloatingPointType(dst_type)));
// Move to stack from GPR/FPR
StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
if (source.IsRegister()) {
@@ -861,6 +877,14 @@ void CodeGeneratorMIPS64::MoveConstant(Location location, int32_t value) {
__ LoadConst32(location.AsRegister<GpuRegister>(), value);
}
+void CodeGeneratorMIPS64::AddLocationAsTemp(Location location, LocationSummary* locations) {
+ if (location.IsRegister()) {
+ locations->AddTemp(location);
+ } else {
+ UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
+ }
+}
+
Location CodeGeneratorMIPS64::GetStackLocation(HLoadLocal* load) const {
Primitive::Type type = load->GetType();
@@ -3118,6 +3142,74 @@ void InstructionCodeGeneratorMIPS64::VisitStaticFieldSet(HStaticFieldSet* instru
HandleFieldSet(instruction, instruction->GetFieldInfo());
}
+void LocationsBuilderMIPS64::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionMIPS64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionMIPS64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderMIPS64::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionMIPS64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionMIPS64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderMIPS64::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionMIPS64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionMIPS64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderMIPS64::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionMIPS64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionMIPS64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
void LocationsBuilderMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
}
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 16461d6c04..5e8f9e7f30 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -106,6 +106,31 @@ class InvokeRuntimeCallingConvention : public CallingConvention<GpuRegister, Fpu
DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
};
+class FieldAccessCallingConventionMIPS64 : public FieldAccessCallingConvention {
+ public:
+ FieldAccessCallingConventionMIPS64() {}
+
+ Location GetObjectLocation() const OVERRIDE {
+ return Location::RegisterLocation(A1);
+ }
+ Location GetFieldIndexLocation() const OVERRIDE {
+ return Location::RegisterLocation(A0);
+ }
+ Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ return Location::RegisterLocation(A0);
+ }
+ Location GetSetValueLocation(
+ Primitive::Type type ATTRIBUTE_UNUSED, bool is_instance) const OVERRIDE {
+ return is_instance ? Location::RegisterLocation(A2) : Location::RegisterLocation(A1);
+ }
+ Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ return Location::FpuRegisterLocation(F0);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionMIPS64);
+};
+
class ParallelMoveResolverMIPS64 : public ParallelMoveResolverWithSwap {
public:
ParallelMoveResolverMIPS64(ArenaAllocator* allocator, CodeGeneratorMIPS64* codegen)
@@ -280,11 +305,13 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
void Finalize(CodeAllocator* allocator) OVERRIDE;
// Code generation helpers.
-
- void MoveLocation(Location destination, Location source, Primitive::Type type);
+ void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
void MoveConstant(Location destination, int32_t value) OVERRIDE;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+
+
void SwapLocations(Location loc1, Location loc2, Primitive::Type type);
// Generate code to invoke a runtime entry point.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 3d97132d9b..5078456eb1 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -827,7 +827,10 @@ void CodeGeneratorX86::Move64(Location destination, Location source) {
Location::RegisterLocation(destination.AsRegisterPairLow<Register>()),
Primitive::kPrimInt);
} else if (source.IsFpuRegister()) {
- LOG(FATAL) << "Unimplemented";
+ XmmRegister src_reg = source.AsFpuRegister<XmmRegister>();
+ __ movd(destination.AsRegisterPairLow<Register>(), src_reg);
+ __ psrlq(src_reg, Immediate(32));
+ __ movd(destination.AsRegisterPairHigh<Register>(), src_reg);
} else {
// No conflict possible, so just do the moves.
DCHECK(source.IsDoubleStackSlot());
@@ -840,6 +843,15 @@ void CodeGeneratorX86::Move64(Location destination, Location source) {
__ movaps(destination.AsFpuRegister<XmmRegister>(), source.AsFpuRegister<XmmRegister>());
} else if (source.IsDoubleStackSlot()) {
__ movsd(destination.AsFpuRegister<XmmRegister>(), Address(ESP, source.GetStackIndex()));
+ } else if (source.IsRegisterPair()) {
+ size_t elem_size = Primitive::ComponentSize(Primitive::kPrimInt);
+ // Create stack space for 2 elements.
+ __ subl(ESP, Immediate(2 * elem_size));
+ __ movl(Address(ESP, 0), source.AsRegisterPairLow<Register>());
+ __ movl(Address(ESP, elem_size), source.AsRegisterPairHigh<Register>());
+ __ movsd(destination.AsFpuRegister<XmmRegister>(), Address(ESP, 0));
+ // And remove the temporary stack space we allocated.
+ __ addl(ESP, Immediate(2 * elem_size));
} else {
LOG(FATAL) << "Unimplemented";
}
@@ -966,6 +978,25 @@ void CodeGeneratorX86::MoveConstant(Location location, int32_t value) {
__ movl(location.AsRegister<Register>(), Immediate(value));
}
+void CodeGeneratorX86::MoveLocation(Location dst, Location src, Primitive::Type dst_type) {
+ if (Primitive::Is64BitType(dst_type)) {
+ Move64(dst, src);
+ } else {
+ Move32(dst, src);
+ }
+}
+
+void CodeGeneratorX86::AddLocationAsTemp(Location location, LocationSummary* locations) {
+ if (location.IsRegister()) {
+ locations->AddTemp(location);
+ } else if (location.IsRegisterPair()) {
+ locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairLow<Register>()));
+ locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairHigh<Register>()));
+ } else {
+ UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
+ }
+}
+
void InstructionCodeGeneratorX86::HandleGoto(HInstruction* got, HBasicBlock* successor) {
DCHECK(!successor->IsExitBlock());
@@ -4085,6 +4116,74 @@ void InstructionCodeGeneratorX86::VisitInstanceFieldGet(HInstanceFieldGet* instr
HandleFieldGet(instruction, instruction->GetFieldInfo());
}
+void LocationsBuilderX86::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionX86 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionX86 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderX86::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionX86 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionX86 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderX86::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionX86 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionX86 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderX86::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionX86 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionX86 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
void LocationsBuilderX86::VisitNullCheck(HNullCheck* instruction) {
LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
? LocationSummary::kCallOnSlowPath
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 2c2fc65444..ae2d84f945 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -91,6 +91,36 @@ class InvokeDexCallingConventionVisitorX86 : public InvokeDexCallingConventionVi
DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorX86);
};
+class FieldAccessCallingConventionX86 : public FieldAccessCallingConvention {
+ public:
+ FieldAccessCallingConventionX86() {}
+
+ Location GetObjectLocation() const OVERRIDE {
+ return Location::RegisterLocation(ECX);
+ }
+ Location GetFieldIndexLocation() const OVERRIDE {
+ return Location::RegisterLocation(EAX);
+ }
+ Location GetReturnLocation(Primitive::Type type) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? Location::RegisterPairLocation(EAX, EDX)
+ : Location::RegisterLocation(EAX);
+ }
+ Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? Location::RegisterPairLocation(EDX, EBX)
+ : (is_instance
+ ? Location::RegisterLocation(EDX)
+ : Location::RegisterLocation(ECX));
+ }
+ Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ return Location::FpuRegisterLocation(XMM0);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionX86);
+};
+
class ParallelMoveResolverX86 : public ParallelMoveResolverWithSwap {
public:
ParallelMoveResolverX86(ArenaAllocator* allocator, CodeGeneratorX86* codegen)
@@ -228,6 +258,9 @@ class CodeGeneratorX86 : public CodeGenerator {
void Bind(HBasicBlock* block) OVERRIDE;
void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
void MoveConstant(Location destination, int32_t value) OVERRIDE;
+ void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+
size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 6ea6138668..791bb9e6aa 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -990,6 +990,19 @@ void CodeGeneratorX86_64::MoveConstant(Location location, int32_t value) {
Load64BitValue(location.AsRegister<CpuRegister>(), static_cast<int64_t>(value));
}
+void CodeGeneratorX86_64::MoveLocation(
+ Location dst, Location src, Primitive::Type dst_type ATTRIBUTE_UNUSED) {
+ Move(dst, src);
+}
+
+void CodeGeneratorX86_64::AddLocationAsTemp(Location location, LocationSummary* locations) {
+ if (location.IsRegister()) {
+ locations->AddTemp(location);
+ } else {
+ UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
+ }
+}
+
void InstructionCodeGeneratorX86_64::HandleGoto(HInstruction* got, HBasicBlock* successor) {
DCHECK(!successor->IsExitBlock());
@@ -3849,6 +3862,74 @@ void InstructionCodeGeneratorX86_64::VisitStaticFieldSet(HStaticFieldSet* instru
HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
}
+void LocationsBuilderX86_64::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionX86_64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86_64::VisitUnresolvedInstanceFieldGet(
+ HUnresolvedInstanceFieldGet* instruction) {
+ FieldAccessCallingConventionX86_64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderX86_64::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionX86_64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86_64::VisitUnresolvedInstanceFieldSet(
+ HUnresolvedInstanceFieldSet* instruction) {
+ FieldAccessCallingConventionX86_64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderX86_64::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionX86_64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86_64::VisitUnresolvedStaticFieldGet(
+ HUnresolvedStaticFieldGet* instruction) {
+ FieldAccessCallingConventionX86_64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
+void LocationsBuilderX86_64::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionX86_64 calling_convention;
+ codegen_->CreateUnresolvedFieldLocationSummary(
+ instruction, instruction->GetFieldType(), calling_convention);
+}
+
+void InstructionCodeGeneratorX86_64::VisitUnresolvedStaticFieldSet(
+ HUnresolvedStaticFieldSet* instruction) {
+ FieldAccessCallingConventionX86_64 calling_convention;
+ codegen_->GenerateUnresolvedFieldAccess(instruction,
+ instruction->GetFieldType(),
+ instruction->GetFieldIndex(),
+ instruction->GetDexPc(),
+ calling_convention);
+}
+
void LocationsBuilderX86_64::VisitNullCheck(HNullCheck* instruction) {
LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
? LocationSummary::kCallOnSlowPath
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 197ce63847..ecc8630e6b 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -70,6 +70,35 @@ class InvokeDexCallingConvention : public CallingConvention<Register, FloatRegis
DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
};
+class FieldAccessCallingConventionX86_64 : public FieldAccessCallingConvention {
+ public:
+ FieldAccessCallingConventionX86_64() {}
+
+ Location GetObjectLocation() const OVERRIDE {
+ return Location::RegisterLocation(RSI);
+ }
+ Location GetFieldIndexLocation() const OVERRIDE {
+ return Location::RegisterLocation(RDI);
+ }
+ Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ return Location::RegisterLocation(RAX);
+ }
+ Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE {
+ return Primitive::Is64BitType(type)
+ ? Location::RegisterLocation(RDX)
+ : (is_instance
+ ? Location::RegisterLocation(RDX)
+ : Location::RegisterLocation(RSI));
+ }
+ Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
+ return Location::FpuRegisterLocation(XMM0);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionX86_64);
+};
+
+
class InvokeDexCallingConventionVisitorX86_64 : public InvokeDexCallingConventionVisitor {
public:
InvokeDexCallingConventionVisitorX86_64() {}
@@ -215,6 +244,9 @@ class CodeGeneratorX86_64 : public CodeGenerator {
void Bind(HBasicBlock* block) OVERRIDE;
void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
void MoveConstant(Location destination, int32_t value) OVERRIDE;
+ void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
+ void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
+
size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 2c6c3b726a..7a83662696 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -398,6 +398,22 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
StartAttributeStream("intrinsic") << invoke->GetIntrinsic();
}
+ void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* field_access) OVERRIDE {
+ StartAttributeStream("field_type") << field_access->GetFieldType();
+ }
+
+ void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* field_access) OVERRIDE {
+ StartAttributeStream("field_type") << field_access->GetFieldType();
+ }
+
+ void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* field_access) OVERRIDE {
+ StartAttributeStream("field_type") << field_access->GetFieldType();
+ }
+
+ void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* field_access) OVERRIDE {
+ StartAttributeStream("field_type") << field_access->GetFieldType();
+ }
+
void VisitTryBoundary(HTryBoundary* try_boundary) OVERRIDE {
StartAttributeStream("kind") << (try_boundary->IsEntry() ? "entry" : "exit");
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index d52f5927de..849f876f36 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1067,6 +1067,10 @@ class HLoopInformationOutwardIterator : public ValueObject {
M(Shr, BinaryOperation) \
M(StaticFieldGet, Instruction) \
M(StaticFieldSet, Instruction) \
+ M(UnresolvedInstanceFieldGet, Instruction) \
+ M(UnresolvedInstanceFieldSet, Instruction) \
+ M(UnresolvedStaticFieldGet, Instruction) \
+ M(UnresolvedStaticFieldSet, Instruction) \
M(StoreLocal, Instruction) \
M(Sub, BinaryOperation) \
M(SuspendCheck, Instruction) \
@@ -4735,6 +4739,112 @@ class HStaticFieldSet : public HTemplateInstruction<2> {
DISALLOW_COPY_AND_ASSIGN(HStaticFieldSet);
};
+class HUnresolvedInstanceFieldGet : public HExpression<1> {
+ public:
+ HUnresolvedInstanceFieldGet(HInstruction* obj,
+ Primitive::Type field_type,
+ uint32_t field_index,
+ uint32_t dex_pc)
+ : HExpression(field_type, SideEffects::AllExceptGCDependency(), dex_pc),
+ field_index_(field_index) {
+ SetRawInputAt(0, obj);
+ }
+
+ bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool CanThrow() const OVERRIDE { return true; }
+
+ Primitive::Type GetFieldType() const { return GetType(); }
+ uint32_t GetFieldIndex() const { return field_index_; }
+
+ DECLARE_INSTRUCTION(UnresolvedInstanceFieldGet);
+
+ private:
+ const uint32_t field_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(HUnresolvedInstanceFieldGet);
+};
+
+class HUnresolvedInstanceFieldSet : public HTemplateInstruction<2> {
+ public:
+ HUnresolvedInstanceFieldSet(HInstruction* obj,
+ HInstruction* value,
+ Primitive::Type field_type,
+ uint32_t field_index,
+ uint32_t dex_pc)
+ : HTemplateInstruction(SideEffects::AllExceptGCDependency(), dex_pc),
+ field_type_(field_type),
+ field_index_(field_index) {
+ DCHECK_EQ(field_type, value->GetType());
+ SetRawInputAt(0, obj);
+ SetRawInputAt(1, value);
+ }
+
+ bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool CanThrow() const OVERRIDE { return true; }
+
+ Primitive::Type GetFieldType() const { return field_type_; }
+ uint32_t GetFieldIndex() const { return field_index_; }
+
+ DECLARE_INSTRUCTION(UnresolvedInstanceFieldSet);
+
+ private:
+ const Primitive::Type field_type_;
+ const uint32_t field_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(HUnresolvedInstanceFieldSet);
+};
+
+class HUnresolvedStaticFieldGet : public HExpression<0> {
+ public:
+ HUnresolvedStaticFieldGet(Primitive::Type field_type,
+ uint32_t field_index,
+ uint32_t dex_pc)
+ : HExpression(field_type, SideEffects::AllExceptGCDependency(), dex_pc),
+ field_index_(field_index) {
+ }
+
+ bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool CanThrow() const OVERRIDE { return true; }
+
+ Primitive::Type GetFieldType() const { return GetType(); }
+ uint32_t GetFieldIndex() const { return field_index_; }
+
+ DECLARE_INSTRUCTION(UnresolvedStaticFieldGet);
+
+ private:
+ const uint32_t field_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(HUnresolvedStaticFieldGet);
+};
+
+class HUnresolvedStaticFieldSet : public HTemplateInstruction<1> {
+ public:
+ HUnresolvedStaticFieldSet(HInstruction* value,
+ Primitive::Type field_type,
+ uint32_t field_index,
+ uint32_t dex_pc)
+ : HTemplateInstruction(SideEffects::AllExceptGCDependency(), dex_pc),
+ field_type_(field_type),
+ field_index_(field_index) {
+ DCHECK_EQ(field_type, value->GetType());
+ SetRawInputAt(0, value);
+ }
+
+ bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool CanThrow() const OVERRIDE { return true; }
+
+ Primitive::Type GetFieldType() const { return field_type_; }
+ uint32_t GetFieldIndex() const { return field_index_; }
+
+ DECLARE_INSTRUCTION(UnresolvedStaticFieldSet);
+
+ private:
+ const Primitive::Type field_type_;
+ const uint32_t field_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(HUnresolvedStaticFieldSet);
+};
+
// Implement the move-exception DEX instruction.
class HLoadException : public HExpression<0> {
public:
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index c7701b70ad..f1d29700d9 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -34,6 +34,7 @@ enum MethodCompilationStat {
kInstructionSimplifications,
kInstructionSimplificationsArch,
kUnresolvedMethod,
+ kUnresolvedField,
kNotCompiledBranchOutsideMethodCode,
kNotCompiledCannotBuildSSA,
kNotCompiledCantAccesType,
@@ -45,7 +46,6 @@ enum MethodCompilationStat {
kNotCompiledPathological,
kNotCompiledSpaceFilter,
kNotCompiledUnhandledInstruction,
- kNotCompiledUnresolvedField,
kNotCompiledUnsupportedIsa,
kNotCompiledVerifyAtRuntime,
kNotOptimizedDisabled,
@@ -104,6 +104,7 @@ class OptimizingCompilerStats {
case kInstructionSimplifications: return "kInstructionSimplifications";
case kInstructionSimplificationsArch: return "kInstructionSimplificationsArch";
case kUnresolvedMethod : return "kUnresolvedMethod";
+ case kUnresolvedField : return "kUnresolvedField";
case kNotCompiledBranchOutsideMethodCode: return "kNotCompiledBranchOutsideMethodCode";
case kNotCompiledCannotBuildSSA : return "kNotCompiledCannotBuildSSA";
case kNotCompiledCantAccesType : return "kNotCompiledCantAccesType";
@@ -115,7 +116,6 @@ class OptimizingCompilerStats {
case kNotCompiledPathological : return "kNotCompiledPathological";
case kNotCompiledSpaceFilter : return "kNotCompiledSpaceFilter";
case kNotCompiledUnhandledInstruction : return "kNotCompiledUnhandledInstruction";
- case kNotCompiledUnresolvedField : return "kNotCompiledUnresolvedField";
case kNotCompiledUnsupportedIsa : return "kNotCompiledUnsupportedIsa";
case kNotCompiledVerifyAtRuntime : return "kNotCompiledVerifyAtRuntime";
case kNotOptimizedDisabled : return "kNotOptimizedDisabled";
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index fe837e4545..d22f2540ad 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -52,6 +52,8 @@ class RTPVisitor : public HGraphDelegateVisitor {
void SetClassAsTypeInfo(HInstruction* instr, mirror::Class* klass, bool is_exact);
void VisitInstanceFieldGet(HInstanceFieldGet* instr) OVERRIDE;
void VisitStaticFieldGet(HStaticFieldGet* instr) OVERRIDE;
+ void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) OVERRIDE;
+ void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instr) OVERRIDE;
void VisitInvoke(HInvoke* instr) OVERRIDE;
void VisitArrayGet(HArrayGet* instr) OVERRIDE;
void VisitCheckCast(HCheckCast* instr) OVERRIDE;
@@ -450,6 +452,22 @@ void RTPVisitor::VisitStaticFieldGet(HStaticFieldGet* instr) {
UpdateFieldAccessTypeInfo(instr, instr->GetFieldInfo());
}
+void RTPVisitor::VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) {
+ // TODO: Use descriptor to get the actual type.
+ if (instr->GetFieldType() == Primitive::kPrimNot) {
+ instr->SetReferenceTypeInfo(
+ ReferenceTypeInfo::Create(object_class_handle_, /* is_exact */ false));
+ }
+}
+
+void RTPVisitor::VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instr) {
+ // TODO: Use descriptor to get the actual type.
+ if (instr->GetFieldType() == Primitive::kPrimNot) {
+ instr->SetReferenceTypeInfo(
+ ReferenceTypeInfo::Create(object_class_handle_, /* is_exact */ false));
+ }
+}
+
void RTPVisitor::VisitLoadClass(HLoadClass* instr) {
ScopedObjectAccess soa(Thread::Current());
mirror::DexCache* dex_cache =
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index dc1cf8ab51..d09631bc71 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -839,13 +839,12 @@ TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_R
TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
/*
* Called by managed code to resolve a static field and store a 64-bit primitive value.
- * On entry r0 holds field index, r1:r2 hold new_val
+ * On entry r0 holds field index, r2:r3 hold new_val
*/
.extern artSet64StaticFromCode
ENTRY art_quick_set64_static
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r3, r12 @ save callee saves in case of GC
- mov r3, r2 @ pass one half of wide argument
- mov r2, r1 @ pass other half of wide argument
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r12 @ save callee saves in case of GC
+ @ r2:r3 contain the wide argument
ldr r1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
.cfi_adjust_cfa_offset 16
@@ -870,6 +869,7 @@ THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RE
.extern artSet64InstanceFromCode
ENTRY art_quick_set64_instance
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r12, lr @ save callee saves in case of GC
+ @ r2:r3 contain the wide argument
ldr r12, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
str r9, [sp, #-12]! @ expand the frame and pass Thread::Current
.cfi_adjust_cfa_offset 12
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 68121781ca..be5a15ec39 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1421,9 +1421,8 @@ THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RE
.extern artSet64StaticFromCode
ENTRY art_quick_set64_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC
- mov x3, x1 // Store value
ldr x1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer
- mov x2, x3 // Put value param
+ // x2 contains the parameter
mov x3, xSELF // pass Thread::Current
bl artSet64StaticFromCode
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index ce1b2f3d24..68156ae7e3 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1244,7 +1244,7 @@ END art_quick_set32_static
.extern artSet64StaticFromCode
ENTRY art_quick_set64_static
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
- move $a2, $a1 # pass new_val
+ # a2 contains the new val
ld $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
jal artSet64StaticFromCode # (field_idx, referrer, new_val, Thread*)
move $a3, rSELF # pass Thread::Current
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index f3b15c9ab2..3afc4d545f 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1434,15 +1434,18 @@ END_FUNCTION art_quick_set64_instance
// Call artSet64StaticFromCode with 3 word size arguments plus with the referrer in the 2nd position
// so that new_val is aligned on even registers were we passing arguments in registers.
DEFINE_FUNCTION art_quick_set64_static
+ // TODO: Implement SETUP_GOT_NOSAVE for got_reg = ecx to avoid moving around the registers.
+ movd %ebx, %xmm0
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC
- mov FRAME_SIZE_REFS_ONLY_CALLEE_SAVE(%esp), %ebx // get referrer
+ movd %xmm0, %ebx
+ mov FRAME_SIZE_REFS_ONLY_CALLEE_SAVE(%esp), %ecx // get referrer
subl LITERAL(12), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(12)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- PUSH edx // pass high half of new_val
- PUSH ecx // pass low half of new_val
- PUSH ebx // pass referrer
+ PUSH ebx // pass high half of new_val
+ PUSH edx // pass low half of new_val
+ PUSH ecx // pass referrer
PUSH eax // pass field_idx
call SYMBOL(artSet64StaticFromCode) // (field_idx, referrer, new_val, Thread*)
addl LITERAL(32), %esp // pop arguments
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 2f438a3c8f..1133203e31 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1383,7 +1383,7 @@ ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_O
// This is singled out as the argument order is different.
DEFINE_FUNCTION art_quick_set64_static
- movq %rsi, %rdx // pass new_val
+ // new_val is already in %rdx
movq 8(%rsp), %rsi // pass referrer
SETUP_REFS_ONLY_CALLEE_SAVE_FRAME
// field_idx is in rdi
diff --git a/test/529-checker-unresolved/src/Main.java b/test/529-checker-unresolved/src/Main.java
index 6f047974b3..adb5adae82 100644
--- a/test/529-checker-unresolved/src/Main.java
+++ b/test/529-checker-unresolved/src/Main.java
@@ -44,6 +44,76 @@ public class Main extends UnresolvedSuperClass {
super.superMethod();
}
+ /// CHECK-START: void Main.callUnresolvedStaticFieldAccess() register (before)
+ /// CHECK: UnresolvedStaticFieldSet field_type:PrimByte
+ /// CHECK: UnresolvedStaticFieldSet field_type:PrimChar
+ /// CHECK: UnresolvedStaticFieldSet field_type:PrimInt
+ /// CHECK: UnresolvedStaticFieldSet field_type:PrimLong
+ /// CHECK: UnresolvedStaticFieldSet field_type:PrimFloat
+ /// CHECK: UnresolvedStaticFieldSet field_type:PrimDouble
+ /// CHECK: UnresolvedStaticFieldSet field_type:PrimNot
+
+ /// CHECK: UnresolvedStaticFieldGet field_type:PrimByte
+ /// CHECK: UnresolvedStaticFieldGet field_type:PrimChar
+ /// CHECK: UnresolvedStaticFieldGet field_type:PrimInt
+ /// CHECK: UnresolvedStaticFieldGet field_type:PrimLong
+ /// CHECK: UnresolvedStaticFieldGet field_type:PrimFloat
+ /// CHECK: UnresolvedStaticFieldGet field_type:PrimDouble
+ /// CHECK: UnresolvedStaticFieldGet field_type:PrimNot
+ static public void callUnresolvedStaticFieldAccess() {
+ Object o = new Object();
+ UnresolvedClass.staticByte = (byte)1;
+ UnresolvedClass.staticChar = '1';
+ UnresolvedClass.staticInt = 123456789;
+ UnresolvedClass.staticLong = 123456789123456789l;
+ UnresolvedClass.staticFloat = 123456789123456789f;
+ UnresolvedClass.staticDouble = 123456789123456789d;
+ UnresolvedClass.staticObject = o;
+
+ expectEquals((byte)1, UnresolvedClass.staticByte);
+ expectEquals('1', UnresolvedClass.staticChar);
+ expectEquals(123456789, UnresolvedClass.staticInt);
+ expectEquals(123456789123456789l, UnresolvedClass.staticLong);
+ expectEquals(123456789123456789f, UnresolvedClass.staticFloat);
+ expectEquals(123456789123456789d, UnresolvedClass.staticDouble);
+ expectEquals(o, UnresolvedClass.staticObject);
+ }
+
+ /// CHECK-START: void Main.callUnresolvedInstanceFieldAccess(UnresolvedClass) register (before)
+ /// CHECK: UnresolvedInstanceFieldSet field_type:PrimByte
+ /// CHECK: UnresolvedInstanceFieldSet field_type:PrimChar
+ /// CHECK: UnresolvedInstanceFieldSet field_type:PrimInt
+ /// CHECK: UnresolvedInstanceFieldSet field_type:PrimLong
+ /// CHECK: UnresolvedInstanceFieldSet field_type:PrimFloat
+ /// CHECK: UnresolvedInstanceFieldSet field_type:PrimDouble
+ /// CHECK: UnresolvedInstanceFieldSet field_type:PrimNot
+
+ /// CHECK: UnresolvedInstanceFieldGet field_type:PrimByte
+ /// CHECK: UnresolvedInstanceFieldGet field_type:PrimChar
+ /// CHECK: UnresolvedInstanceFieldGet field_type:PrimInt
+ /// CHECK: UnresolvedInstanceFieldGet field_type:PrimLong
+ /// CHECK: UnresolvedInstanceFieldGet field_type:PrimFloat
+ /// CHECK: UnresolvedInstanceFieldGet field_type:PrimDouble
+ /// CHECK: UnresolvedInstanceFieldGet field_type:PrimNot
+ static public void callUnresolvedInstanceFieldAccess(UnresolvedClass c) {
+ Object o = new Object();
+ c.instanceByte = (byte)1;
+ c.instanceChar = '1';
+ c.instanceInt = 123456789;
+ c.instanceLong = 123456789123456789l;
+ c.instanceFloat = 123456789123456789f;
+ c.instanceDouble = 123456789123456789d;
+ c.instanceObject = o;
+
+ expectEquals((byte)1, c.instanceByte);
+ expectEquals('1', c.instanceChar);
+ expectEquals(123456789, c.instanceInt);
+ expectEquals(123456789123456789l, c.instanceLong);
+ expectEquals(123456789123456789f, c.instanceFloat);
+ expectEquals(123456789123456789d, c.instanceDouble);
+ expectEquals(o, c.instanceObject);
+ }
+
/// CHECK-START: void Main.main(java.lang.String[]) register (before)
/// CHECK: InvokeUnresolved invoke_type:direct
static public void main(String[] args) {
@@ -52,5 +122,49 @@ public class Main extends UnresolvedSuperClass {
callInvokeUnresolvedVirtual(c);
callInvokeUnresolvedInterface(c);
callInvokeUnresolvedSuper(new Main());
+ callUnresolvedStaticFieldAccess();
+ callUnresolvedInstanceFieldAccess(c);
+ }
+
+ public static void expectEquals(byte expected, byte result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(char expected, char result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(float expected, float result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(double expected, double result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(Object expected, Object result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
}
}
diff --git a/test/529-checker-unresolved/src/Unresolved.java b/test/529-checker-unresolved/src/Unresolved.java
index 5bf92dd331..03ceb6857b 100644
--- a/test/529-checker-unresolved/src/Unresolved.java
+++ b/test/529-checker-unresolved/src/Unresolved.java
@@ -40,6 +40,22 @@ class UnresolvedClass extends UnresolvedSuperClass implements UnresolvedInterfac
public void interfaceMethod() {
System.out.println("UnresolvedClass.interfaceMethod()");
}
+
+ public static byte staticByte;
+ public static char staticChar;
+ public static int staticInt;
+ public static long staticLong;
+ public static float staticFloat;
+ public static double staticDouble;
+ public static Object staticObject;
+
+ public byte instanceByte;
+ public char instanceChar;
+ public int instanceInt;
+ public long instanceLong;
+ public float instanceFloat;
+ public double instanceDouble;
+ public Object instanceObject;
}
final class UnresolvedFinalClass {