diff options
author | 2015-09-29 04:52:17 +0100 | |
---|---|---|
committer | 2015-10-02 02:25:18 +0100 | |
commit | e460d1df1f789c7c8bb97024a8efbd713ac175e9 (patch) | |
tree | 3511036fb18828dd0ee140d33a8bcd0535ebeab6 /compiler/optimizing | |
parent | 25217af2a7cae96b32ba566aaf697288f3374c99 (diff) |
Revert "Revert "Support unresolved fields in optimizing"
The CL also changes the calling convetion for 64bit static field set
to use kArg2 instead of kArg1. This allows optimizing to keep
the asumptions:
- arm pairs are always of form (even_reg, odd_reg)
- ecx_edx is not used as a register on x86.
This reverts commit e6f49b47b6a4dc9c7684e4483757872cfc7ff1a1.
Change-Id: I93159917565824084abc96775f31be1a4249f2f3
Diffstat (limited to 'compiler/optimizing')
-rw-r--r-- | compiler/optimizing/builder.cc | 95 | ||||
-rw-r--r-- | compiler/optimizing/code_generator.cc | 124 | ||||
-rw-r--r-- | compiler/optimizing/code_generator.h | 31 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm.cc | 95 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm.h | 35 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 132 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm64.h | 36 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_mips64.cc | 128 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_mips64.h | 31 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 101 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86.h | 33 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 81 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86_64.h | 32 | ||||
-rw-r--r-- | compiler/optimizing/graph_visualizer.cc | 16 | ||||
-rw-r--r-- | compiler/optimizing/nodes.h | 110 | ||||
-rw-r--r-- | compiler/optimizing/optimizing_compiler_stats.h | 4 | ||||
-rw-r--r-- | compiler/optimizing/reference_type_propagation.cc | 18 |
17 files changed, 1025 insertions, 77 deletions
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc index cb36f62235..7ef79ec111 100644 --- a/compiler/optimizing/builder.cc +++ b/compiler/optimizing/builder.cc @@ -1186,6 +1186,12 @@ void HGraphBuilder::PotentiallySimplifyFakeString(uint16_t original_dex_register } } +static Primitive::Type GetFieldAccessType(const DexFile& dex_file, uint16_t field_index) { + const DexFile::FieldId& field_id = dex_file.GetFieldId(field_index); + const char* type = dex_file.GetFieldTypeDescriptor(field_id); + return Primitive::GetType(type[0]); +} + bool HGraphBuilder::BuildInstanceFieldAccess(const Instruction& instruction, uint32_t dex_pc, bool is_put) { @@ -1205,44 +1211,61 @@ bool HGraphBuilder::BuildInstanceFieldAccess(const Instruction& instruction, ArtField* resolved_field = compiler_driver_->ComputeInstanceFieldInfo(field_index, dex_compilation_unit_, is_put, soa); - if (resolved_field == nullptr) { - MaybeRecordStat(MethodCompilationStat::kNotCompiledUnresolvedField); - return false; - } - - Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType(); HInstruction* object = LoadLocal(obj_reg, Primitive::kPrimNot, dex_pc); - current_block_->AddInstruction(new (arena_) HNullCheck(object, dex_pc)); + HInstruction* null_check = new (arena_) HNullCheck(object, dex_pc); + current_block_->AddInstruction(null_check); + + Primitive::Type field_type = (resolved_field == nullptr) + ? GetFieldAccessType(*dex_file_, field_index) + : resolved_field->GetTypeAsPrimitiveType(); if (is_put) { Temporaries temps(graph_); - HInstruction* null_check = current_block_->GetLastInstruction(); // We need one temporary for the null check. temps.Add(null_check); HInstruction* value = LoadLocal(source_or_dest_reg, field_type, dex_pc); - current_block_->AddInstruction(new (arena_) HInstanceFieldSet( - null_check, - value, - field_type, - resolved_field->GetOffset(), - resolved_field->IsVolatile(), - field_index, - *dex_file_, - dex_compilation_unit_->GetDexCache(), - dex_pc)); + HInstruction* field_set = nullptr; + if (resolved_field == nullptr) { + MaybeRecordStat(MethodCompilationStat::kUnresolvedField); + field_set = new (arena_) HUnresolvedInstanceFieldSet(null_check, + value, + field_type, + field_index, + dex_pc); + } else { + field_set = new (arena_) HInstanceFieldSet(null_check, + value, + field_type, + resolved_field->GetOffset(), + resolved_field->IsVolatile(), + field_index, + *dex_file_, + dex_compilation_unit_->GetDexCache(), + dex_pc); + } + current_block_->AddInstruction(field_set); } else { - current_block_->AddInstruction(new (arena_) HInstanceFieldGet( - current_block_->GetLastInstruction(), - field_type, - resolved_field->GetOffset(), - resolved_field->IsVolatile(), - field_index, - *dex_file_, - dex_compilation_unit_->GetDexCache(), - dex_pc)); - - UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction(), dex_pc); + HInstruction* field_get = nullptr; + if (resolved_field == nullptr) { + MaybeRecordStat(MethodCompilationStat::kUnresolvedField); + field_get = new (arena_) HUnresolvedInstanceFieldGet(null_check, + field_type, + field_index, + dex_pc); + } else { + field_get = new (arena_) HInstanceFieldGet(null_check, + field_type, + resolved_field->GetOffset(), + resolved_field->IsVolatile(), + field_index, + *dex_file_, + dex_compilation_unit_->GetDexCache(), + dex_pc); + } + current_block_->AddInstruction(field_get); + UpdateLocal(source_or_dest_reg, field_get, dex_pc); } + return true; } @@ -1299,8 +1322,18 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction, soa, dex_cache, class_loader, dex_compilation_unit_, field_index, true); if (resolved_field == nullptr) { - MaybeRecordStat(MethodCompilationStat::kNotCompiledUnresolvedField); - return false; + MaybeRecordStat(MethodCompilationStat::kUnresolvedField); + Primitive::Type field_type = GetFieldAccessType(*dex_file_, field_index); + if (is_put) { + HInstruction* value = LoadLocal(source_or_dest_reg, field_type, dex_pc); + current_block_->AddInstruction( + new (arena_) HUnresolvedStaticFieldSet(value, field_type, field_index, dex_pc)); + } else { + current_block_->AddInstruction( + new (arena_) HUnresolvedStaticFieldGet(field_type, field_index, dex_pc)); + UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction(), dex_pc); + } + return true; } const DexFile& outer_dex_file = *outer_compilation_unit_->GetDexFile(); diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index be05691741..8254277f96 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -413,6 +413,130 @@ void CodeGenerator::GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invok InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr); } +void CodeGenerator::CreateUnresolvedFieldLocationSummary( + HInstruction* field_access, + Primitive::Type field_type, + const FieldAccessCallingConvention& calling_convention) { + bool is_instance = field_access->IsUnresolvedInstanceFieldGet() + || field_access->IsUnresolvedInstanceFieldSet(); + bool is_get = field_access->IsUnresolvedInstanceFieldGet() + || field_access->IsUnresolvedStaticFieldGet(); + + ArenaAllocator* allocator = field_access->GetBlock()->GetGraph()->GetArena(); + LocationSummary* locations = + new (allocator) LocationSummary(field_access, LocationSummary::kCall); + + locations->AddTemp(calling_convention.GetFieldIndexLocation()); + + if (is_instance) { + // Add the `this` object for instance field accesses. + locations->SetInAt(0, calling_convention.GetObjectLocation()); + } + + // Note that pSetXXStatic/pGetXXStatic always takes/returns an int or int64 + // regardless of the the type. Because of that we forced to special case + // the access to floating point values. + if (is_get) { + if (Primitive::IsFloatingPointType(field_type)) { + // The return value will be stored in regular registers while register + // allocator expects it in a floating point register. + // Note We don't need to request additional temps because the return + // register(s) are already blocked due the call and they may overlap with + // the input or field index. + // The transfer between the two will be done at codegen level. + locations->SetOut(calling_convention.GetFpuLocation(field_type)); + } else { + locations->SetOut(calling_convention.GetReturnLocation(field_type)); + } + } else { + size_t set_index = is_instance ? 1 : 0; + if (Primitive::IsFloatingPointType(field_type)) { + // The set value comes from a float location while the calling convention + // expects it in a regular register location. Allocate a temp for it and + // make the transfer at codegen. + AddLocationAsTemp(calling_convention.GetSetValueLocation(field_type, is_instance), locations); + locations->SetInAt(set_index, calling_convention.GetFpuLocation(field_type)); + } else { + locations->SetInAt(set_index, + calling_convention.GetSetValueLocation(field_type, is_instance)); + } + } +} + +void CodeGenerator::GenerateUnresolvedFieldAccess( + HInstruction* field_access, + Primitive::Type field_type, + uint32_t field_index, + uint32_t dex_pc, + const FieldAccessCallingConvention& calling_convention) { + LocationSummary* locations = field_access->GetLocations(); + + MoveConstant(locations->GetTemp(0), field_index); + + bool is_instance = field_access->IsUnresolvedInstanceFieldGet() + || field_access->IsUnresolvedInstanceFieldSet(); + bool is_get = field_access->IsUnresolvedInstanceFieldGet() + || field_access->IsUnresolvedStaticFieldGet(); + + if (!is_get && Primitive::IsFloatingPointType(field_type)) { + // Copy the float value to be set into the calling convention register. + // Note that using directly the temp location is problematic as we don't + // support temp register pairs. To avoid boilerplate conversion code, use + // the location from the calling convention. + MoveLocation(calling_convention.GetSetValueLocation(field_type, is_instance), + locations->InAt(is_instance ? 1 : 0), + (Primitive::Is64BitType(field_type) ? Primitive::kPrimLong : Primitive::kPrimInt)); + } + + QuickEntrypointEnum entrypoint = kQuickSet8Static; // Initialize to anything to avoid warnings. + switch (field_type) { + case Primitive::kPrimBoolean: + entrypoint = is_instance + ? (is_get ? kQuickGetBooleanInstance : kQuickSet8Instance) + : (is_get ? kQuickGetBooleanStatic : kQuickSet8Static); + break; + case Primitive::kPrimByte: + entrypoint = is_instance + ? (is_get ? kQuickGetByteInstance : kQuickSet8Instance) + : (is_get ? kQuickGetByteStatic : kQuickSet8Static); + break; + case Primitive::kPrimShort: + entrypoint = is_instance + ? (is_get ? kQuickGetShortInstance : kQuickSet16Instance) + : (is_get ? kQuickGetShortStatic : kQuickSet16Static); + break; + case Primitive::kPrimChar: + entrypoint = is_instance + ? (is_get ? kQuickGetCharInstance : kQuickSet16Instance) + : (is_get ? kQuickGetCharStatic : kQuickSet16Static); + break; + case Primitive::kPrimInt: + case Primitive::kPrimFloat: + entrypoint = is_instance + ? (is_get ? kQuickGet32Instance : kQuickSet32Instance) + : (is_get ? kQuickGet32Static : kQuickSet32Static); + break; + case Primitive::kPrimNot: + entrypoint = is_instance + ? (is_get ? kQuickGetObjInstance : kQuickSetObjInstance) + : (is_get ? kQuickGetObjStatic : kQuickSetObjStatic); + break; + case Primitive::kPrimLong: + case Primitive::kPrimDouble: + entrypoint = is_instance + ? (is_get ? kQuickGet64Instance : kQuickSet64Instance) + : (is_get ? kQuickGet64Static : kQuickSet64Static); + break; + default: + LOG(FATAL) << "Invalid type " << field_type; + } + InvokeRuntime(entrypoint, field_access, dex_pc, nullptr); + + if (is_get && Primitive::IsFloatingPointType(field_type)) { + MoveLocation(locations->Out(), calling_convention.GetReturnLocation(field_type), field_type); + } +} + void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const { // The DCHECKS below check that a register is not specified twice in // the summary. The out location can overlap with an input, so we need diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index 5da0e59187..a3ebc43f11 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -143,6 +143,22 @@ class InvokeDexCallingConventionVisitor { DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitor); }; +class FieldAccessCallingConvention { + public: + virtual Location GetObjectLocation() const = 0; + virtual Location GetFieldIndexLocation() const = 0; + virtual Location GetReturnLocation(Primitive::Type type) const = 0; + virtual Location GetSetValueLocation(Primitive::Type type, bool is_instance) const = 0; + virtual Location GetFpuLocation(Primitive::Type type) const = 0; + virtual ~FieldAccessCallingConvention() {} + + protected: + FieldAccessCallingConvention() {} + + private: + DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConvention); +}; + class CodeGenerator { public: // Compiles the graph to executable instructions. Returns whether the compilation @@ -177,6 +193,9 @@ class CodeGenerator { virtual void Bind(HBasicBlock* block) = 0; virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) = 0; virtual void MoveConstant(Location destination, int32_t value) = 0; + virtual void MoveLocation(Location dst, Location src, Primitive::Type dst_type) = 0; + virtual void AddLocationAsTemp(Location location, LocationSummary* locations) = 0; + virtual Assembler* GetAssembler() = 0; virtual const Assembler& GetAssembler() const = 0; virtual size_t GetWordSize() const = 0; @@ -385,6 +404,18 @@ class CodeGenerator { void GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke); + void CreateUnresolvedFieldLocationSummary( + HInstruction* field_access, + Primitive::Type field_type, + const FieldAccessCallingConvention& calling_convention); + + void GenerateUnresolvedFieldAccess( + HInstruction* field_access, + Primitive::Type field_type, + uint32_t field_index, + uint32_t dex_pc, + const FieldAccessCallingConvention& calling_convention); + void SetDisassemblyInformation(DisassemblyInformation* info) { disasm_info_ = info; } DisassemblyInformation* GetDisassemblyInformation() const { return disasm_info_; } diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index a7dbb53382..cf7f5f4e08 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -906,6 +906,10 @@ void CodeGeneratorARM::Move64(Location destination, Location source) { Primitive::kPrimInt); } else if (source.IsFpuRegister()) { UNIMPLEMENTED(FATAL); + } else if (source.IsFpuRegisterPair()) { + __ vmovrrd(destination.AsRegisterPairLow<Register>(), + destination.AsRegisterPairHigh<Register>(), + FromLowSToD(source.AsFpuRegisterPairLow<SRegister>())); } else { DCHECK(source.IsDoubleStackSlot()); DCHECK(ExpectedPairLayout(destination)); @@ -917,6 +921,10 @@ void CodeGeneratorARM::Move64(Location destination, Location source) { __ LoadDFromOffset(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()), SP, source.GetStackIndex()); + } else if (source.IsRegisterPair()) { + __ vmovdrr(FromLowSToD(destination.AsFpuRegisterPairLow<SRegister>()), + source.AsRegisterPairLow<Register>(), + source.AsRegisterPairHigh<Register>()); } else { UNIMPLEMENTED(FATAL); } @@ -1038,6 +1046,25 @@ void CodeGeneratorARM::MoveConstant(Location location, int32_t value) { __ LoadImmediate(location.AsRegister<Register>(), value); } +void CodeGeneratorARM::MoveLocation(Location dst, Location src, Primitive::Type dst_type) { + if (Primitive::Is64BitType(dst_type)) { + Move64(dst, src); + } else { + Move32(dst, src); + } +} + +void CodeGeneratorARM::AddLocationAsTemp(Location location, LocationSummary* locations) { + if (location.IsRegister()) { + locations->AddTemp(location); + } else if (location.IsRegisterPair()) { + locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairLow<Register>())); + locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairHigh<Register>())); + } else { + UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location; + } +} + void CodeGeneratorARM::InvokeRuntime(QuickEntrypointEnum entrypoint, HInstruction* instruction, uint32_t dex_pc, @@ -3605,6 +3632,74 @@ void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instructi HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull()); } +void LocationsBuilderARM::VisitUnresolvedInstanceFieldGet( + HUnresolvedInstanceFieldGet* instruction) { + FieldAccessCallingConventionARM calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorARM::VisitUnresolvedInstanceFieldGet( + HUnresolvedInstanceFieldGet* instruction) { + FieldAccessCallingConventionARM calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + +void LocationsBuilderARM::VisitUnresolvedInstanceFieldSet( + HUnresolvedInstanceFieldSet* instruction) { + FieldAccessCallingConventionARM calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorARM::VisitUnresolvedInstanceFieldSet( + HUnresolvedInstanceFieldSet* instruction) { + FieldAccessCallingConventionARM calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + +void LocationsBuilderARM::VisitUnresolvedStaticFieldGet( + HUnresolvedStaticFieldGet* instruction) { + FieldAccessCallingConventionARM calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorARM::VisitUnresolvedStaticFieldGet( + HUnresolvedStaticFieldGet* instruction) { + FieldAccessCallingConventionARM calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + +void LocationsBuilderARM::VisitUnresolvedStaticFieldSet( + HUnresolvedStaticFieldSet* instruction) { + FieldAccessCallingConventionARM calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorARM::VisitUnresolvedStaticFieldSet( + HUnresolvedStaticFieldSet* instruction) { + FieldAccessCallingConventionARM calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) { LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() ? LocationSummary::kCallOnSlowPath diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h index 111112e9b2..16d1d383b4 100644 --- a/compiler/optimizing/code_generator_arm.h +++ b/compiler/optimizing/code_generator_arm.h @@ -96,6 +96,38 @@ class InvokeDexCallingConventionVisitorARM : public InvokeDexCallingConventionVi DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorARM); }; +class FieldAccessCallingConventionARM : public FieldAccessCallingConvention { + public: + FieldAccessCallingConventionARM() {} + + Location GetObjectLocation() const OVERRIDE { + return Location::RegisterLocation(R1); + } + Location GetFieldIndexLocation() const OVERRIDE { + return Location::RegisterLocation(R0); + } + Location GetReturnLocation(Primitive::Type type) const OVERRIDE { + return Primitive::Is64BitType(type) + ? Location::RegisterPairLocation(R0, R1) + : Location::RegisterLocation(R0); + } + Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE { + return Primitive::Is64BitType(type) + ? Location::RegisterPairLocation(R2, R3) + : (is_instance + ? Location::RegisterLocation(R2) + : Location::RegisterLocation(R1)); + } + Location GetFpuLocation(Primitive::Type type) const OVERRIDE { + return Primitive::Is64BitType(type) + ? Location::FpuRegisterPairLocation(S0, S1) + : Location::FpuRegisterLocation(S0); + } + + private: + DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionARM); +}; + class ParallelMoveResolverARM : public ParallelMoveResolverWithSwap { public: ParallelMoveResolverARM(ArenaAllocator* allocator, CodeGeneratorARM* codegen) @@ -225,6 +257,9 @@ class CodeGeneratorARM : public CodeGenerator { void Bind(HBasicBlock* block) OVERRIDE; void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE; void MoveConstant(Location destination, int32_t value) OVERRIDE; + void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE; + void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE; + size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 78ecfdec10..af5bbaae3d 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -19,7 +19,6 @@ #include "arch/arm64/instruction_set_features_arm64.h" #include "art_method.h" #include "code_generator_utils.h" -#include "common_arm64.h" #include "compiled_method.h" #include "entrypoints/quick/quick_entrypoints.h" #include "entrypoints/quick/quick_entrypoints_enum.h" @@ -666,7 +665,7 @@ void ParallelMoveResolverARM64::FreeScratchLocation(Location loc) { void ParallelMoveResolverARM64::EmitMove(size_t index) { DCHECK_LT(index, moves_.size()); MoveOperands* move = moves_[index]; - codegen_->MoveLocation(move->GetDestination(), move->GetSource()); + codegen_->MoveLocation(move->GetDestination(), move->GetSource(), Primitive::kPrimVoid); } void CodeGeneratorARM64::GenerateFrameEntry() { @@ -750,7 +749,9 @@ void CodeGeneratorARM64::Move(HInstruction* instruction, } if (instruction->IsCurrentMethod()) { - MoveLocation(location, Location::DoubleStackSlot(kCurrentMethodStackOffset)); + MoveLocation(location, + Location::DoubleStackSlot(kCurrentMethodStackOffset), + Primitive::kPrimVoid); } else if (locations != nullptr && locations->Out().Equals(location)) { return; } else if (instruction->IsIntConstant() @@ -793,6 +794,14 @@ void CodeGeneratorARM64::MoveConstant(Location location, int32_t value) { __ Mov(RegisterFrom(location, Primitive::kPrimInt), value); } +void CodeGeneratorARM64::AddLocationAsTemp(Location location, LocationSummary* locations) { + if (location.IsRegister()) { + locations->AddTemp(location); + } else { + UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location; + } +} + Location CodeGeneratorARM64::GetStackLocation(HLoadLocal* load) const { Primitive::Type type = load->GetType(); @@ -943,7 +952,9 @@ static bool CoherentConstantAndType(Location constant, Primitive::Type type) { (cst->IsDoubleConstant() && type == Primitive::kPrimDouble); } -void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Primitive::Type type) { +void CodeGeneratorARM64::MoveLocation(Location destination, + Location source, + Primitive::Type dst_type) { if (source.Equals(destination)) { return; } @@ -952,7 +963,7 @@ void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Pri // locations. When moving from and to a register, the argument type can be // used to generate 32bit instead of 64bit moves. In debug mode we also // checks the coherency of the locations and the type. - bool unspecified_type = (type == Primitive::kPrimVoid); + bool unspecified_type = (dst_type == Primitive::kPrimVoid); if (destination.IsRegister() || destination.IsFpuRegister()) { if (unspecified_type) { @@ -962,30 +973,44 @@ void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Pri || src_cst->IsFloatConstant() || src_cst->IsNullConstant()))) { // For stack slots and 32bit constants, a 64bit type is appropriate. - type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat; + dst_type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat; } else { // If the source is a double stack slot or a 64bit constant, a 64bit // type is appropriate. Else the source is a register, and since the // type has not been specified, we chose a 64bit type to force a 64bit // move. - type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble; + dst_type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble; } } - DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(type)) || - (destination.IsRegister() && !Primitive::IsFloatingPointType(type))); - CPURegister dst = CPURegisterFrom(destination, type); + DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(dst_type)) || + (destination.IsRegister() && !Primitive::IsFloatingPointType(dst_type))); + CPURegister dst = CPURegisterFrom(destination, dst_type); if (source.IsStackSlot() || source.IsDoubleStackSlot()) { DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot()); __ Ldr(dst, StackOperandFrom(source)); } else if (source.IsConstant()) { - DCHECK(CoherentConstantAndType(source, type)); + DCHECK(CoherentConstantAndType(source, dst_type)); MoveConstant(dst, source.GetConstant()); + } else if (source.IsRegister()) { + if (destination.IsRegister()) { + __ Mov(Register(dst), RegisterFrom(source, dst_type)); + } else { + DCHECK(destination.IsFpuRegister()); + Primitive::Type source_type = Primitive::Is64BitType(dst_type) + ? Primitive::kPrimLong + : Primitive::kPrimInt; + __ Fmov(FPRegisterFrom(destination, dst_type), RegisterFrom(source, source_type)); + } } else { + DCHECK(source.IsFpuRegister()); if (destination.IsRegister()) { - __ Mov(Register(dst), RegisterFrom(source, type)); + Primitive::Type source_type = Primitive::Is64BitType(dst_type) + ? Primitive::kPrimDouble + : Primitive::kPrimFloat; + __ Fmov(RegisterFrom(destination, dst_type), FPRegisterFrom(source, source_type)); } else { DCHECK(destination.IsFpuRegister()); - __ Fmov(FPRegister(dst), FPRegisterFrom(source, type)); + __ Fmov(FPRegister(dst), FPRegisterFrom(source, dst_type)); } } } else { // The destination is not a register. It must be a stack slot. @@ -993,16 +1018,17 @@ void CodeGeneratorARM64::MoveLocation(Location destination, Location source, Pri if (source.IsRegister() || source.IsFpuRegister()) { if (unspecified_type) { if (source.IsRegister()) { - type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong; + dst_type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong; } else { - type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble; + dst_type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble; } } - DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(type)) && - (source.IsFpuRegister() == Primitive::IsFloatingPointType(type))); - __ Str(CPURegisterFrom(source, type), StackOperandFrom(destination)); + DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(dst_type)) && + (source.IsFpuRegister() == Primitive::IsFloatingPointType(dst_type))); + __ Str(CPURegisterFrom(source, dst_type), StackOperandFrom(destination)); } else if (source.IsConstant()) { - DCHECK(unspecified_type || CoherentConstantAndType(source, type)) << source << " " << type; + DCHECK(unspecified_type || CoherentConstantAndType(source, dst_type)) + << source << " " << dst_type; UseScratchRegisterScope temps(GetVIXLAssembler()); HConstant* src_cst = source.GetConstant(); CPURegister temp; @@ -3508,6 +3534,74 @@ void InstructionCodeGeneratorARM64::VisitStaticFieldSet(HStaticFieldSet* instruc HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull()); } +void LocationsBuilderARM64::VisitUnresolvedInstanceFieldGet( + HUnresolvedInstanceFieldGet* instruction) { + FieldAccessCallingConventionARM64 calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorARM64::VisitUnresolvedInstanceFieldGet( + HUnresolvedInstanceFieldGet* instruction) { + FieldAccessCallingConventionARM64 calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + +void LocationsBuilderARM64::VisitUnresolvedInstanceFieldSet( + HUnresolvedInstanceFieldSet* instruction) { + FieldAccessCallingConventionARM64 calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorARM64::VisitUnresolvedInstanceFieldSet( + HUnresolvedInstanceFieldSet* instruction) { + FieldAccessCallingConventionARM64 calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + +void LocationsBuilderARM64::VisitUnresolvedStaticFieldGet( + HUnresolvedStaticFieldGet* instruction) { + FieldAccessCallingConventionARM64 calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorARM64::VisitUnresolvedStaticFieldGet( + HUnresolvedStaticFieldGet* instruction) { + FieldAccessCallingConventionARM64 calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + +void LocationsBuilderARM64::VisitUnresolvedStaticFieldSet( + HUnresolvedStaticFieldSet* instruction) { + FieldAccessCallingConventionARM64 calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorARM64::VisitUnresolvedStaticFieldSet( + HUnresolvedStaticFieldSet* instruction) { + FieldAccessCallingConventionARM64 calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) { new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath); } diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h index 7178081bf8..a068b48797 100644 --- a/compiler/optimizing/code_generator_arm64.h +++ b/compiler/optimizing/code_generator_arm64.h @@ -18,6 +18,7 @@ #define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_ #include "code_generator.h" +#include "common_arm64.h" #include "dex/compiler_enums.h" #include "driver/compiler_options.h" #include "nodes.h" @@ -141,6 +142,34 @@ class InvokeDexCallingConventionVisitorARM64 : public InvokeDexCallingConvention DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorARM64); }; +class FieldAccessCallingConventionARM64 : public FieldAccessCallingConvention { + public: + FieldAccessCallingConventionARM64() {} + + Location GetObjectLocation() const OVERRIDE { + return helpers::LocationFrom(vixl::x1); + } + Location GetFieldIndexLocation() const OVERRIDE { + return helpers::LocationFrom(vixl::x0); + } + Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + return helpers::LocationFrom(vixl::x0); + } + Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE { + return Primitive::Is64BitType(type) + ? helpers::LocationFrom(vixl::x2) + : (is_instance + ? helpers::LocationFrom(vixl::x2) + : helpers::LocationFrom(vixl::x1)); + } + Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + return helpers::LocationFrom(vixl::d0); + } + + private: + DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionARM64); +}; + class InstructionCodeGeneratorARM64 : public HGraphVisitor { public: InstructionCodeGeneratorARM64(HGraph* graph, CodeGeneratorARM64* codegen); @@ -334,10 +363,9 @@ class CodeGeneratorARM64 : public CodeGenerator { // Code generation helpers. void MoveConstant(vixl::CPURegister destination, HConstant* constant); void MoveConstant(Location destination, int32_t value) OVERRIDE; - // The type is optional. When specified it must be coherent with the - // locations, and is used for optimisation and debugging. - void MoveLocation(Location destination, Location source, - Primitive::Type type = Primitive::kPrimVoid); + void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE; + void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE; + void Load(Primitive::Type type, vixl::CPURegister dst, const vixl::MemOperand& src); void Store(Primitive::Type type, vixl::CPURegister rt, const vixl::MemOperand& dst); void LoadAcquire(HInstruction* instruction, vixl::CPURegister dst, const vixl::MemOperand& src); diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index ad0a39c753..e95d283c1a 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -617,7 +617,7 @@ void CodeGeneratorMIPS64::Bind(HBasicBlock* block) { void CodeGeneratorMIPS64::MoveLocation(Location destination, Location source, - Primitive::Type type) { + Primitive::Type dst_type) { if (source.Equals(destination)) { return; } @@ -625,7 +625,7 @@ void CodeGeneratorMIPS64::MoveLocation(Location destination, // A valid move can always be inferred from the destination and source // locations. When moving from and to a register, the argument type can be // used to generate 32bit instead of 64bit moves. - bool unspecified_type = (type == Primitive::kPrimVoid); + bool unspecified_type = (dst_type == Primitive::kPrimVoid); DCHECK_EQ(unspecified_type, false); if (destination.IsRegister() || destination.IsFpuRegister()) { @@ -636,21 +636,21 @@ void CodeGeneratorMIPS64::MoveLocation(Location destination, || src_cst->IsFloatConstant() || src_cst->IsNullConstant()))) { // For stack slots and 32bit constants, a 64bit type is appropriate. - type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat; + dst_type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat; } else { // If the source is a double stack slot or a 64bit constant, a 64bit // type is appropriate. Else the source is a register, and since the // type has not been specified, we chose a 64bit type to force a 64bit // move. - type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble; + dst_type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble; } } - DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(type)) || - (destination.IsRegister() && !Primitive::IsFloatingPointType(type))); + DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(dst_type)) || + (destination.IsRegister() && !Primitive::IsFloatingPointType(dst_type))); if (source.IsStackSlot() || source.IsDoubleStackSlot()) { // Move to GPR/FPR from stack LoadOperandType load_type = source.IsStackSlot() ? kLoadWord : kLoadDoubleword; - if (Primitive::IsFloatingPointType(type)) { + if (Primitive::IsFloatingPointType(dst_type)) { __ LoadFpuFromOffset(load_type, destination.AsFpuRegister<FpuRegister>(), SP, @@ -665,31 +665,47 @@ void CodeGeneratorMIPS64::MoveLocation(Location destination, } else if (source.IsConstant()) { // Move to GPR/FPR from constant GpuRegister gpr = AT; - if (!Primitive::IsFloatingPointType(type)) { + if (!Primitive::IsFloatingPointType(dst_type)) { gpr = destination.AsRegister<GpuRegister>(); } - if (type == Primitive::kPrimInt || type == Primitive::kPrimFloat) { + if (dst_type == Primitive::kPrimInt || dst_type == Primitive::kPrimFloat) { __ LoadConst32(gpr, GetInt32ValueOf(source.GetConstant()->AsConstant())); } else { __ LoadConst64(gpr, GetInt64ValueOf(source.GetConstant()->AsConstant())); } - if (type == Primitive::kPrimFloat) { + if (dst_type == Primitive::kPrimFloat) { __ Mtc1(gpr, destination.AsFpuRegister<FpuRegister>()); - } else if (type == Primitive::kPrimDouble) { + } else if (dst_type == Primitive::kPrimDouble) { __ Dmtc1(gpr, destination.AsFpuRegister<FpuRegister>()); } - } else { + } else if (source.IsRegister()) { if (destination.IsRegister()) { // Move to GPR from GPR __ Move(destination.AsRegister<GpuRegister>(), source.AsRegister<GpuRegister>()); } else { + DCHECK(destination.IsFpuRegister()); + if (Primitive::Is64BitType(dst_type)) { + __ Dmtc1(source.AsRegister<GpuRegister>(), destination.AsFpuRegister<FpuRegister>()); + } else { + __ Mtc1(source.AsRegister<GpuRegister>(), destination.AsFpuRegister<FpuRegister>()); + } + } + } else if (source.IsFpuRegister()) { + if (destination.IsFpuRegister()) { // Move to FPR from FPR - if (type == Primitive::kPrimFloat) { + if (dst_type == Primitive::kPrimFloat) { __ MovS(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>()); } else { - DCHECK_EQ(type, Primitive::kPrimDouble); + DCHECK_EQ(dst_type, Primitive::kPrimDouble); __ MovD(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>()); } + } else { + DCHECK(destination.IsRegister()); + if (Primitive::Is64BitType(dst_type)) { + __ Dmfc1(destination.AsRegister<GpuRegister>(), source.AsFpuRegister<FpuRegister>()); + } else { + __ Mfc1(destination.AsRegister<GpuRegister>(), source.AsFpuRegister<FpuRegister>()); + } } } } else { // The destination is not a register. It must be a stack slot. @@ -697,13 +713,13 @@ void CodeGeneratorMIPS64::MoveLocation(Location destination, if (source.IsRegister() || source.IsFpuRegister()) { if (unspecified_type) { if (source.IsRegister()) { - type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong; + dst_type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong; } else { - type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble; + dst_type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble; } } - DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(type)) && - (source.IsFpuRegister() == Primitive::IsFloatingPointType(type))); + DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(dst_type)) && + (source.IsFpuRegister() == Primitive::IsFloatingPointType(dst_type))); // Move to stack from GPR/FPR StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword; if (source.IsRegister()) { @@ -861,6 +877,14 @@ void CodeGeneratorMIPS64::MoveConstant(Location location, int32_t value) { __ LoadConst32(location.AsRegister<GpuRegister>(), value); } +void CodeGeneratorMIPS64::AddLocationAsTemp(Location location, LocationSummary* locations) { + if (location.IsRegister()) { + locations->AddTemp(location); + } else { + UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location; + } +} + Location CodeGeneratorMIPS64::GetStackLocation(HLoadLocal* load) const { Primitive::Type type = load->GetType(); @@ -3118,6 +3142,74 @@ void InstructionCodeGeneratorMIPS64::VisitStaticFieldSet(HStaticFieldSet* instru HandleFieldSet(instruction, instruction->GetFieldInfo()); } +void LocationsBuilderMIPS64::VisitUnresolvedInstanceFieldGet( + HUnresolvedInstanceFieldGet* instruction) { + FieldAccessCallingConventionMIPS64 calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorMIPS64::VisitUnresolvedInstanceFieldGet( + HUnresolvedInstanceFieldGet* instruction) { + FieldAccessCallingConventionMIPS64 calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + +void LocationsBuilderMIPS64::VisitUnresolvedInstanceFieldSet( + HUnresolvedInstanceFieldSet* instruction) { + FieldAccessCallingConventionMIPS64 calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorMIPS64::VisitUnresolvedInstanceFieldSet( + HUnresolvedInstanceFieldSet* instruction) { + FieldAccessCallingConventionMIPS64 calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + +void LocationsBuilderMIPS64::VisitUnresolvedStaticFieldGet( + HUnresolvedStaticFieldGet* instruction) { + FieldAccessCallingConventionMIPS64 calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorMIPS64::VisitUnresolvedStaticFieldGet( + HUnresolvedStaticFieldGet* instruction) { + FieldAccessCallingConventionMIPS64 calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + +void LocationsBuilderMIPS64::VisitUnresolvedStaticFieldSet( + HUnresolvedStaticFieldSet* instruction) { + FieldAccessCallingConventionMIPS64 calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorMIPS64::VisitUnresolvedStaticFieldSet( + HUnresolvedStaticFieldSet* instruction) { + FieldAccessCallingConventionMIPS64 calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + void LocationsBuilderMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) { new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath); } diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h index 16461d6c04..5e8f9e7f30 100644 --- a/compiler/optimizing/code_generator_mips64.h +++ b/compiler/optimizing/code_generator_mips64.h @@ -106,6 +106,31 @@ class InvokeRuntimeCallingConvention : public CallingConvention<GpuRegister, Fpu DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention); }; +class FieldAccessCallingConventionMIPS64 : public FieldAccessCallingConvention { + public: + FieldAccessCallingConventionMIPS64() {} + + Location GetObjectLocation() const OVERRIDE { + return Location::RegisterLocation(A1); + } + Location GetFieldIndexLocation() const OVERRIDE { + return Location::RegisterLocation(A0); + } + Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + return Location::RegisterLocation(A0); + } + Location GetSetValueLocation( + Primitive::Type type ATTRIBUTE_UNUSED, bool is_instance) const OVERRIDE { + return is_instance ? Location::RegisterLocation(A2) : Location::RegisterLocation(A1); + } + Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + return Location::FpuRegisterLocation(F0); + } + + private: + DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionMIPS64); +}; + class ParallelMoveResolverMIPS64 : public ParallelMoveResolverWithSwap { public: ParallelMoveResolverMIPS64(ArenaAllocator* allocator, CodeGeneratorMIPS64* codegen) @@ -280,11 +305,13 @@ class CodeGeneratorMIPS64 : public CodeGenerator { void Finalize(CodeAllocator* allocator) OVERRIDE; // Code generation helpers. - - void MoveLocation(Location destination, Location source, Primitive::Type type); + void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE; void MoveConstant(Location destination, int32_t value) OVERRIDE; + void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE; + + void SwapLocations(Location loc1, Location loc2, Primitive::Type type); // Generate code to invoke a runtime entry point. diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 3d97132d9b..5078456eb1 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -827,7 +827,10 @@ void CodeGeneratorX86::Move64(Location destination, Location source) { Location::RegisterLocation(destination.AsRegisterPairLow<Register>()), Primitive::kPrimInt); } else if (source.IsFpuRegister()) { - LOG(FATAL) << "Unimplemented"; + XmmRegister src_reg = source.AsFpuRegister<XmmRegister>(); + __ movd(destination.AsRegisterPairLow<Register>(), src_reg); + __ psrlq(src_reg, Immediate(32)); + __ movd(destination.AsRegisterPairHigh<Register>(), src_reg); } else { // No conflict possible, so just do the moves. DCHECK(source.IsDoubleStackSlot()); @@ -840,6 +843,15 @@ void CodeGeneratorX86::Move64(Location destination, Location source) { __ movaps(destination.AsFpuRegister<XmmRegister>(), source.AsFpuRegister<XmmRegister>()); } else if (source.IsDoubleStackSlot()) { __ movsd(destination.AsFpuRegister<XmmRegister>(), Address(ESP, source.GetStackIndex())); + } else if (source.IsRegisterPair()) { + size_t elem_size = Primitive::ComponentSize(Primitive::kPrimInt); + // Create stack space for 2 elements. + __ subl(ESP, Immediate(2 * elem_size)); + __ movl(Address(ESP, 0), source.AsRegisterPairLow<Register>()); + __ movl(Address(ESP, elem_size), source.AsRegisterPairHigh<Register>()); + __ movsd(destination.AsFpuRegister<XmmRegister>(), Address(ESP, 0)); + // And remove the temporary stack space we allocated. + __ addl(ESP, Immediate(2 * elem_size)); } else { LOG(FATAL) << "Unimplemented"; } @@ -966,6 +978,25 @@ void CodeGeneratorX86::MoveConstant(Location location, int32_t value) { __ movl(location.AsRegister<Register>(), Immediate(value)); } +void CodeGeneratorX86::MoveLocation(Location dst, Location src, Primitive::Type dst_type) { + if (Primitive::Is64BitType(dst_type)) { + Move64(dst, src); + } else { + Move32(dst, src); + } +} + +void CodeGeneratorX86::AddLocationAsTemp(Location location, LocationSummary* locations) { + if (location.IsRegister()) { + locations->AddTemp(location); + } else if (location.IsRegisterPair()) { + locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairLow<Register>())); + locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairHigh<Register>())); + } else { + UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location; + } +} + void InstructionCodeGeneratorX86::HandleGoto(HInstruction* got, HBasicBlock* successor) { DCHECK(!successor->IsExitBlock()); @@ -4085,6 +4116,74 @@ void InstructionCodeGeneratorX86::VisitInstanceFieldGet(HInstanceFieldGet* instr HandleFieldGet(instruction, instruction->GetFieldInfo()); } +void LocationsBuilderX86::VisitUnresolvedInstanceFieldGet( + HUnresolvedInstanceFieldGet* instruction) { + FieldAccessCallingConventionX86 calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorX86::VisitUnresolvedInstanceFieldGet( + HUnresolvedInstanceFieldGet* instruction) { + FieldAccessCallingConventionX86 calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + +void LocationsBuilderX86::VisitUnresolvedInstanceFieldSet( + HUnresolvedInstanceFieldSet* instruction) { + FieldAccessCallingConventionX86 calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorX86::VisitUnresolvedInstanceFieldSet( + HUnresolvedInstanceFieldSet* instruction) { + FieldAccessCallingConventionX86 calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + +void LocationsBuilderX86::VisitUnresolvedStaticFieldGet( + HUnresolvedStaticFieldGet* instruction) { + FieldAccessCallingConventionX86 calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorX86::VisitUnresolvedStaticFieldGet( + HUnresolvedStaticFieldGet* instruction) { + FieldAccessCallingConventionX86 calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + +void LocationsBuilderX86::VisitUnresolvedStaticFieldSet( + HUnresolvedStaticFieldSet* instruction) { + FieldAccessCallingConventionX86 calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorX86::VisitUnresolvedStaticFieldSet( + HUnresolvedStaticFieldSet* instruction) { + FieldAccessCallingConventionX86 calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + void LocationsBuilderX86::VisitNullCheck(HNullCheck* instruction) { LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() ? LocationSummary::kCallOnSlowPath diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h index 2c2fc65444..ae2d84f945 100644 --- a/compiler/optimizing/code_generator_x86.h +++ b/compiler/optimizing/code_generator_x86.h @@ -91,6 +91,36 @@ class InvokeDexCallingConventionVisitorX86 : public InvokeDexCallingConventionVi DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorX86); }; +class FieldAccessCallingConventionX86 : public FieldAccessCallingConvention { + public: + FieldAccessCallingConventionX86() {} + + Location GetObjectLocation() const OVERRIDE { + return Location::RegisterLocation(ECX); + } + Location GetFieldIndexLocation() const OVERRIDE { + return Location::RegisterLocation(EAX); + } + Location GetReturnLocation(Primitive::Type type) const OVERRIDE { + return Primitive::Is64BitType(type) + ? Location::RegisterPairLocation(EAX, EDX) + : Location::RegisterLocation(EAX); + } + Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE { + return Primitive::Is64BitType(type) + ? Location::RegisterPairLocation(EDX, EBX) + : (is_instance + ? Location::RegisterLocation(EDX) + : Location::RegisterLocation(ECX)); + } + Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + return Location::FpuRegisterLocation(XMM0); + } + + private: + DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionX86); +}; + class ParallelMoveResolverX86 : public ParallelMoveResolverWithSwap { public: ParallelMoveResolverX86(ArenaAllocator* allocator, CodeGeneratorX86* codegen) @@ -228,6 +258,9 @@ class CodeGeneratorX86 : public CodeGenerator { void Bind(HBasicBlock* block) OVERRIDE; void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE; void MoveConstant(Location destination, int32_t value) OVERRIDE; + void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE; + void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE; + size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 6ea6138668..791bb9e6aa 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -990,6 +990,19 @@ void CodeGeneratorX86_64::MoveConstant(Location location, int32_t value) { Load64BitValue(location.AsRegister<CpuRegister>(), static_cast<int64_t>(value)); } +void CodeGeneratorX86_64::MoveLocation( + Location dst, Location src, Primitive::Type dst_type ATTRIBUTE_UNUSED) { + Move(dst, src); +} + +void CodeGeneratorX86_64::AddLocationAsTemp(Location location, LocationSummary* locations) { + if (location.IsRegister()) { + locations->AddTemp(location); + } else { + UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location; + } +} + void InstructionCodeGeneratorX86_64::HandleGoto(HInstruction* got, HBasicBlock* successor) { DCHECK(!successor->IsExitBlock()); @@ -3849,6 +3862,74 @@ void InstructionCodeGeneratorX86_64::VisitStaticFieldSet(HStaticFieldSet* instru HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull()); } +void LocationsBuilderX86_64::VisitUnresolvedInstanceFieldGet( + HUnresolvedInstanceFieldGet* instruction) { + FieldAccessCallingConventionX86_64 calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorX86_64::VisitUnresolvedInstanceFieldGet( + HUnresolvedInstanceFieldGet* instruction) { + FieldAccessCallingConventionX86_64 calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + +void LocationsBuilderX86_64::VisitUnresolvedInstanceFieldSet( + HUnresolvedInstanceFieldSet* instruction) { + FieldAccessCallingConventionX86_64 calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorX86_64::VisitUnresolvedInstanceFieldSet( + HUnresolvedInstanceFieldSet* instruction) { + FieldAccessCallingConventionX86_64 calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + +void LocationsBuilderX86_64::VisitUnresolvedStaticFieldGet( + HUnresolvedStaticFieldGet* instruction) { + FieldAccessCallingConventionX86_64 calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorX86_64::VisitUnresolvedStaticFieldGet( + HUnresolvedStaticFieldGet* instruction) { + FieldAccessCallingConventionX86_64 calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + +void LocationsBuilderX86_64::VisitUnresolvedStaticFieldSet( + HUnresolvedStaticFieldSet* instruction) { + FieldAccessCallingConventionX86_64 calling_convention; + codegen_->CreateUnresolvedFieldLocationSummary( + instruction, instruction->GetFieldType(), calling_convention); +} + +void InstructionCodeGeneratorX86_64::VisitUnresolvedStaticFieldSet( + HUnresolvedStaticFieldSet* instruction) { + FieldAccessCallingConventionX86_64 calling_convention; + codegen_->GenerateUnresolvedFieldAccess(instruction, + instruction->GetFieldType(), + instruction->GetFieldIndex(), + instruction->GetDexPc(), + calling_convention); +} + void LocationsBuilderX86_64::VisitNullCheck(HNullCheck* instruction) { LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock() ? LocationSummary::kCallOnSlowPath diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h index 197ce63847..ecc8630e6b 100644 --- a/compiler/optimizing/code_generator_x86_64.h +++ b/compiler/optimizing/code_generator_x86_64.h @@ -70,6 +70,35 @@ class InvokeDexCallingConvention : public CallingConvention<Register, FloatRegis DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention); }; +class FieldAccessCallingConventionX86_64 : public FieldAccessCallingConvention { + public: + FieldAccessCallingConventionX86_64() {} + + Location GetObjectLocation() const OVERRIDE { + return Location::RegisterLocation(RSI); + } + Location GetFieldIndexLocation() const OVERRIDE { + return Location::RegisterLocation(RDI); + } + Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + return Location::RegisterLocation(RAX); + } + Location GetSetValueLocation(Primitive::Type type, bool is_instance) const OVERRIDE { + return Primitive::Is64BitType(type) + ? Location::RegisterLocation(RDX) + : (is_instance + ? Location::RegisterLocation(RDX) + : Location::RegisterLocation(RSI)); + } + Location GetFpuLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { + return Location::FpuRegisterLocation(XMM0); + } + + private: + DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionX86_64); +}; + + class InvokeDexCallingConventionVisitorX86_64 : public InvokeDexCallingConventionVisitor { public: InvokeDexCallingConventionVisitorX86_64() {} @@ -215,6 +244,9 @@ class CodeGeneratorX86_64 : public CodeGenerator { void Bind(HBasicBlock* block) OVERRIDE; void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE; void MoveConstant(Location destination, int32_t value) OVERRIDE; + void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE; + void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE; + size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE; diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc index 2c6c3b726a..7a83662696 100644 --- a/compiler/optimizing/graph_visualizer.cc +++ b/compiler/optimizing/graph_visualizer.cc @@ -398,6 +398,22 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { StartAttributeStream("intrinsic") << invoke->GetIntrinsic(); } + void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* field_access) OVERRIDE { + StartAttributeStream("field_type") << field_access->GetFieldType(); + } + + void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* field_access) OVERRIDE { + StartAttributeStream("field_type") << field_access->GetFieldType(); + } + + void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* field_access) OVERRIDE { + StartAttributeStream("field_type") << field_access->GetFieldType(); + } + + void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* field_access) OVERRIDE { + StartAttributeStream("field_type") << field_access->GetFieldType(); + } + void VisitTryBoundary(HTryBoundary* try_boundary) OVERRIDE { StartAttributeStream("kind") << (try_boundary->IsEntry() ? "entry" : "exit"); } diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index d52f5927de..849f876f36 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -1067,6 +1067,10 @@ class HLoopInformationOutwardIterator : public ValueObject { M(Shr, BinaryOperation) \ M(StaticFieldGet, Instruction) \ M(StaticFieldSet, Instruction) \ + M(UnresolvedInstanceFieldGet, Instruction) \ + M(UnresolvedInstanceFieldSet, Instruction) \ + M(UnresolvedStaticFieldGet, Instruction) \ + M(UnresolvedStaticFieldSet, Instruction) \ M(StoreLocal, Instruction) \ M(Sub, BinaryOperation) \ M(SuspendCheck, Instruction) \ @@ -4735,6 +4739,112 @@ class HStaticFieldSet : public HTemplateInstruction<2> { DISALLOW_COPY_AND_ASSIGN(HStaticFieldSet); }; +class HUnresolvedInstanceFieldGet : public HExpression<1> { + public: + HUnresolvedInstanceFieldGet(HInstruction* obj, + Primitive::Type field_type, + uint32_t field_index, + uint32_t dex_pc) + : HExpression(field_type, SideEffects::AllExceptGCDependency(), dex_pc), + field_index_(field_index) { + SetRawInputAt(0, obj); + } + + bool NeedsEnvironment() const OVERRIDE { return true; } + bool CanThrow() const OVERRIDE { return true; } + + Primitive::Type GetFieldType() const { return GetType(); } + uint32_t GetFieldIndex() const { return field_index_; } + + DECLARE_INSTRUCTION(UnresolvedInstanceFieldGet); + + private: + const uint32_t field_index_; + + DISALLOW_COPY_AND_ASSIGN(HUnresolvedInstanceFieldGet); +}; + +class HUnresolvedInstanceFieldSet : public HTemplateInstruction<2> { + public: + HUnresolvedInstanceFieldSet(HInstruction* obj, + HInstruction* value, + Primitive::Type field_type, + uint32_t field_index, + uint32_t dex_pc) + : HTemplateInstruction(SideEffects::AllExceptGCDependency(), dex_pc), + field_type_(field_type), + field_index_(field_index) { + DCHECK_EQ(field_type, value->GetType()); + SetRawInputAt(0, obj); + SetRawInputAt(1, value); + } + + bool NeedsEnvironment() const OVERRIDE { return true; } + bool CanThrow() const OVERRIDE { return true; } + + Primitive::Type GetFieldType() const { return field_type_; } + uint32_t GetFieldIndex() const { return field_index_; } + + DECLARE_INSTRUCTION(UnresolvedInstanceFieldSet); + + private: + const Primitive::Type field_type_; + const uint32_t field_index_; + + DISALLOW_COPY_AND_ASSIGN(HUnresolvedInstanceFieldSet); +}; + +class HUnresolvedStaticFieldGet : public HExpression<0> { + public: + HUnresolvedStaticFieldGet(Primitive::Type field_type, + uint32_t field_index, + uint32_t dex_pc) + : HExpression(field_type, SideEffects::AllExceptGCDependency(), dex_pc), + field_index_(field_index) { + } + + bool NeedsEnvironment() const OVERRIDE { return true; } + bool CanThrow() const OVERRIDE { return true; } + + Primitive::Type GetFieldType() const { return GetType(); } + uint32_t GetFieldIndex() const { return field_index_; } + + DECLARE_INSTRUCTION(UnresolvedStaticFieldGet); + + private: + const uint32_t field_index_; + + DISALLOW_COPY_AND_ASSIGN(HUnresolvedStaticFieldGet); +}; + +class HUnresolvedStaticFieldSet : public HTemplateInstruction<1> { + public: + HUnresolvedStaticFieldSet(HInstruction* value, + Primitive::Type field_type, + uint32_t field_index, + uint32_t dex_pc) + : HTemplateInstruction(SideEffects::AllExceptGCDependency(), dex_pc), + field_type_(field_type), + field_index_(field_index) { + DCHECK_EQ(field_type, value->GetType()); + SetRawInputAt(0, value); + } + + bool NeedsEnvironment() const OVERRIDE { return true; } + bool CanThrow() const OVERRIDE { return true; } + + Primitive::Type GetFieldType() const { return field_type_; } + uint32_t GetFieldIndex() const { return field_index_; } + + DECLARE_INSTRUCTION(UnresolvedStaticFieldSet); + + private: + const Primitive::Type field_type_; + const uint32_t field_index_; + + DISALLOW_COPY_AND_ASSIGN(HUnresolvedStaticFieldSet); +}; + // Implement the move-exception DEX instruction. class HLoadException : public HExpression<0> { public: diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h index c7701b70ad..f1d29700d9 100644 --- a/compiler/optimizing/optimizing_compiler_stats.h +++ b/compiler/optimizing/optimizing_compiler_stats.h @@ -34,6 +34,7 @@ enum MethodCompilationStat { kInstructionSimplifications, kInstructionSimplificationsArch, kUnresolvedMethod, + kUnresolvedField, kNotCompiledBranchOutsideMethodCode, kNotCompiledCannotBuildSSA, kNotCompiledCantAccesType, @@ -45,7 +46,6 @@ enum MethodCompilationStat { kNotCompiledPathological, kNotCompiledSpaceFilter, kNotCompiledUnhandledInstruction, - kNotCompiledUnresolvedField, kNotCompiledUnsupportedIsa, kNotCompiledVerifyAtRuntime, kNotOptimizedDisabled, @@ -104,6 +104,7 @@ class OptimizingCompilerStats { case kInstructionSimplifications: return "kInstructionSimplifications"; case kInstructionSimplificationsArch: return "kInstructionSimplificationsArch"; case kUnresolvedMethod : return "kUnresolvedMethod"; + case kUnresolvedField : return "kUnresolvedField"; case kNotCompiledBranchOutsideMethodCode: return "kNotCompiledBranchOutsideMethodCode"; case kNotCompiledCannotBuildSSA : return "kNotCompiledCannotBuildSSA"; case kNotCompiledCantAccesType : return "kNotCompiledCantAccesType"; @@ -115,7 +116,6 @@ class OptimizingCompilerStats { case kNotCompiledPathological : return "kNotCompiledPathological"; case kNotCompiledSpaceFilter : return "kNotCompiledSpaceFilter"; case kNotCompiledUnhandledInstruction : return "kNotCompiledUnhandledInstruction"; - case kNotCompiledUnresolvedField : return "kNotCompiledUnresolvedField"; case kNotCompiledUnsupportedIsa : return "kNotCompiledUnsupportedIsa"; case kNotCompiledVerifyAtRuntime : return "kNotCompiledVerifyAtRuntime"; case kNotOptimizedDisabled : return "kNotOptimizedDisabled"; diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index fe837e4545..d22f2540ad 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -52,6 +52,8 @@ class RTPVisitor : public HGraphDelegateVisitor { void SetClassAsTypeInfo(HInstruction* instr, mirror::Class* klass, bool is_exact); void VisitInstanceFieldGet(HInstanceFieldGet* instr) OVERRIDE; void VisitStaticFieldGet(HStaticFieldGet* instr) OVERRIDE; + void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) OVERRIDE; + void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instr) OVERRIDE; void VisitInvoke(HInvoke* instr) OVERRIDE; void VisitArrayGet(HArrayGet* instr) OVERRIDE; void VisitCheckCast(HCheckCast* instr) OVERRIDE; @@ -450,6 +452,22 @@ void RTPVisitor::VisitStaticFieldGet(HStaticFieldGet* instr) { UpdateFieldAccessTypeInfo(instr, instr->GetFieldInfo()); } +void RTPVisitor::VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) { + // TODO: Use descriptor to get the actual type. + if (instr->GetFieldType() == Primitive::kPrimNot) { + instr->SetReferenceTypeInfo( + ReferenceTypeInfo::Create(object_class_handle_, /* is_exact */ false)); + } +} + +void RTPVisitor::VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instr) { + // TODO: Use descriptor to get the actual type. + if (instr->GetFieldType() == Primitive::kPrimNot) { + instr->SetReferenceTypeInfo( + ReferenceTypeInfo::Create(object_class_handle_, /* is_exact */ false)); + } +} + void RTPVisitor::VisitLoadClass(HLoadClass* instr) { ScopedObjectAccess soa(Thread::Current()); mirror::DexCache* dex_cache = |