diff options
Diffstat (limited to 'compiler')
25 files changed, 94 insertions, 34 deletions
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc index 36f1be7bdf..651fa66b9d 100644 --- a/compiler/dex/mir_dataflow.cc +++ b/compiler/dex/mir_dataflow.cc @@ -879,7 +879,7 @@ bool MIRGraph::FindLocalLiveIn(BasicBlock* bb) { new (arena_) ArenaBitVector(arena_, cu_->num_dalvik_registers, false, kBitMapLiveIn); for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) { - uint64_t df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode]; + uint64_t df_attributes = GetDataFlowAttributes(mir); DecodedInstruction *d_insn = &mir->dalvikInsn; if (df_attributes & DF_HAS_USES) { @@ -994,7 +994,7 @@ bool MIRGraph::DoSSAConversion(BasicBlock* bb) { static_cast<struct SSARepresentation *>(arena_->Alloc(sizeof(SSARepresentation), kArenaAllocDFInfo)); - uint64_t df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode]; + uint64_t df_attributes = GetDataFlowAttributes(mir); // If not a pseudo-op, note non-leaf or can throw if (static_cast<int>(mir->dalvikInsn.opcode) < @@ -1252,7 +1252,7 @@ void MIRGraph::CountUses(struct BasicBlock* bb) { use_counts_.Put(s_reg, use_counts_.Get(s_reg) + weight); } if (!(cu_->disable_opt & (1 << kPromoteCompilerTemps))) { - uint64_t df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode]; + uint64_t df_attributes = GetDataFlowAttributes(mir); // Implicit use of Method* ? */ if (df_attributes & DF_UMS) { /* diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc index 8ce4f1ff9a..6857edbbe7 100644 --- a/compiler/dex/mir_graph.cc +++ b/compiler/dex/mir_graph.cc @@ -621,7 +621,7 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_ int flags = Instruction::FlagsOf(insn->dalvikInsn.opcode); int verify_flags = Instruction::VerifyFlagsOf(insn->dalvikInsn.opcode); - uint64_t df_flags = oat_data_flow_attributes_[insn->dalvikInsn.opcode]; + uint64_t df_flags = GetDataFlowAttributes(insn); merged_df_flags |= df_flags; if (df_flags & DF_HAS_DEFS) { @@ -743,6 +743,17 @@ void MIRGraph::ShowOpcodeStats() { } } +uint64_t MIRGraph::GetDataFlowAttributes(Instruction::Code opcode) { + DCHECK_LT((size_t) opcode, (sizeof(oat_data_flow_attributes_) / sizeof(oat_data_flow_attributes_[0]))); + return oat_data_flow_attributes_[opcode]; +} + +uint64_t MIRGraph::GetDataFlowAttributes(MIR* mir) { + DCHECK(mir != nullptr); + Instruction::Code opcode = mir->dalvikInsn.opcode; + return GetDataFlowAttributes(opcode); +} + // TODO: use a configurable base prefix, and adjust callers to supply pass name. /* Dump the CFG into a DOT graph */ void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks, const char *suffix) { diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h index 2c125f6aa0..5997e5b5bd 100644 --- a/compiler/dex/mir_graph.h +++ b/compiler/dex/mir_graph.h @@ -851,6 +851,9 @@ class MIRGraph { */ void CountUses(struct BasicBlock* bb); + static uint64_t GetDataFlowAttributes(Instruction::Code opcode); + static uint64_t GetDataFlowAttributes(MIR* mir); + /** * @brief Combine BasicBlocks * @param the BasicBlock we are considering @@ -868,7 +871,6 @@ class MIRGraph { RegLocation* reg_location_; // Map SSA names to location. SafeMap<unsigned int, unsigned int> block_id_map_; // Block collapse lookup cache. - static const uint64_t oat_data_flow_attributes_[kMirOpLast]; static const char* extended_mir_op_names_[kMirOpLast - kMirOpFirst]; static const uint32_t analysis_attributes_[kMirOpLast]; @@ -985,6 +987,7 @@ class MIRGraph { GrowableArray<MirIFieldLoweringInfo> ifield_lowering_infos_; GrowableArray<MirSFieldLoweringInfo> sfield_lowering_infos_; GrowableArray<MirMethodLoweringInfo> method_lowering_infos_; + static const uint64_t oat_data_flow_attributes_[kMirOpLast]; friend class ClassInitCheckEliminationTest; friend class LocalValueNumberingTest; diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc index 937e2585ef..72c46cc975 100644 --- a/compiler/dex/mir_optimization.cc +++ b/compiler/dex/mir_optimization.cc @@ -43,11 +43,11 @@ void MIRGraph::DoConstantPropagation(BasicBlock* bb) { for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) { // Skip pass if BB has MIR without SSA representation. - if (mir->ssa_rep == NULL) { + if (mir->ssa_rep == nullptr) { return; } - uint64_t df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode]; + uint64_t df_attributes = GetDataFlowAttributes(mir); DecodedInstruction *d_insn = &mir->dalvikInsn; @@ -559,7 +559,7 @@ void MIRGraph::CountChecks(struct BasicBlock* bb) { if (mir->ssa_rep == NULL) { continue; } - uint64_t df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode]; + uint64_t df_attributes = GetDataFlowAttributes(mir); if (df_attributes & DF_HAS_NULL_CHKS) { checkstats_->null_checks++; if (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) { @@ -644,7 +644,7 @@ void MIRGraph::CombineBlocks(struct BasicBlock* bb) { MIR* mir = bb->last_mir_insn; // Grab the attributes from the paired opcode MIR* throw_insn = mir->meta.throw_insn; - uint64_t df_attributes = oat_data_flow_attributes_[throw_insn->dalvikInsn.opcode]; + uint64_t df_attributes = GetDataFlowAttributes(throw_insn); bool can_combine = true; if (df_attributes & DF_HAS_NULL_CHKS) { can_combine &= ((throw_insn->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0); @@ -796,7 +796,7 @@ bool MIRGraph::EliminateNullChecksAndInferTypes(BasicBlock* bb) { continue; } - uint64_t df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode]; + uint64_t df_attributes = GetDataFlowAttributes(mir); // Might need a null check? if (df_attributes & DF_HAS_NULL_CHKS) { diff --git a/compiler/dex/mir_optimization_test.cc b/compiler/dex/mir_optimization_test.cc index 40ced70948..891d9fb7ea 100644 --- a/compiler/dex/mir_optimization_test.cc +++ b/compiler/dex/mir_optimization_test.cc @@ -172,7 +172,7 @@ class ClassInitCheckEliminationTest : public testing::Test { mir->offset = 2 * i; // All insns need to be at least 2 code units long. mir->width = 2u; mir->optimization_flags = 0u; - merged_df_flags |= MIRGraph::oat_data_flow_attributes_[def->opcode]; + merged_df_flags |= MIRGraph::GetDataFlowAttributes(def->opcode); } cu_.mir_graph->merged_df_flags_ = merged_df_flags; diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc index 70438ecd50..576e2424fa 100644 --- a/compiler/dex/portable/mir_to_gbc.cc +++ b/compiler/dex/portable/mir_to_gbc.cc @@ -722,7 +722,7 @@ bool MirConverter::ConvertMIRNode(MIR* mir, BasicBlock* bb, /* Prep Src and Dest locations */ int next_sreg = 0; int next_loc = 0; - uint64_t attrs = mir_graph_->oat_data_flow_attributes_[opcode]; + uint64_t attrs = MirGraph::GetDataFlowAttributes(opcode); rl_src[0] = rl_src[1] = rl_src[2] = mir_graph_->GetBadLoc(); if (attrs & DF_UA) { if (attrs & DF_A_WIDE) { diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index b030bb4ec8..0596d4fff0 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -1018,8 +1018,8 @@ CompiledMethod* Mir2Lir::GetCompiledMethod() { vmap_encoder.PushBackUnsigned(fp_vmap_table_[i] + VmapTable::kEntryAdjustment); } } else { - DCHECK_EQ(__builtin_popcount(core_spill_mask_), 0); - DCHECK_EQ(__builtin_popcount(fp_spill_mask_), 0); + DCHECK_EQ(POPCOUNT(core_spill_mask_), 0); + DCHECK_EQ(POPCOUNT(fp_spill_mask_), 0); DCHECK_EQ(core_vmap_table_.size(), 0u); DCHECK_EQ(fp_vmap_table_.size(), 0u); vmap_encoder.PushBackUnsigned(0u); // Size is 0. diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index 8b9a686ece..3cc2ba0042 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -330,9 +330,10 @@ void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest, bool is_type_initialized; // Ignored as an array does not have an initializer. bool use_direct_type_ptr; uintptr_t direct_type_ptr; + bool is_finalizable; if (kEmbedClassInCode && - driver->CanEmbedTypeInCode(*dex_file, type_idx, - &is_type_initialized, &use_direct_type_ptr, &direct_type_ptr)) { + driver->CanEmbedTypeInCode(*dex_file, type_idx, &is_type_initialized, &use_direct_type_ptr, + &direct_type_ptr, &is_finalizable)) { // The fast path. if (!use_direct_type_ptr) { LoadClassType(type_idx, kArg0); @@ -980,9 +981,11 @@ void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) { bool is_type_initialized; bool use_direct_type_ptr; uintptr_t direct_type_ptr; + bool is_finalizable; if (kEmbedClassInCode && - driver->CanEmbedTypeInCode(*dex_file, type_idx, - &is_type_initialized, &use_direct_type_ptr, &direct_type_ptr)) { + driver->CanEmbedTypeInCode(*dex_file, type_idx, &is_type_initialized, &use_direct_type_ptr, + &direct_type_ptr, &is_finalizable) && + !is_finalizable) { // The fast path. if (!use_direct_type_ptr) { LoadClassType(type_idx, kArg0); diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc index df7a7c1077..107987ef0d 100644 --- a/compiler/dex/quick/mir_to_lir.cc +++ b/compiler/dex/quick/mir_to_lir.cc @@ -42,7 +42,7 @@ RegStorage Mir2Lir::LoadArg(int in_position, bool wide) { RegStorage reg_arg_high = wide ? GetArgMappingToPhysicalReg(in_position + 1) : RegStorage::InvalidReg(); - int offset = StackVisitor::GetOutVROffset(in_position); + int offset = StackVisitor::GetOutVROffset(in_position, cu_->instruction_set); if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { /* * When doing a call for x86, it moves the stack pointer in order to push return. @@ -81,7 +81,7 @@ RegStorage Mir2Lir::LoadArg(int in_position, bool wide) { } void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) { - int offset = StackVisitor::GetOutVROffset(in_position); + int offset = StackVisitor::GetOutVROffset(in_position, cu_->instruction_set); if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { /* * When doing a call for x86, it moves the stack pointer in order to push return. @@ -286,7 +286,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list // Prep Src and Dest locations. int next_sreg = 0; int next_loc = 0; - uint64_t attrs = mir_graph_->oat_data_flow_attributes_[opcode]; + uint64_t attrs = MIRGraph::GetDataFlowAttributes(opcode); rl_src[0] = rl_src[1] = rl_src[2] = mir_graph_->GetBadLoc(); if (attrs & DF_UA) { if (attrs & DF_A_WIDE) { diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc index 39783a21ea..6455572470 100644 --- a/compiler/dex/quick/ralloc_util.cc +++ b/compiler/dex/quick/ralloc_util.cc @@ -1141,7 +1141,8 @@ void Mir2Lir::DoPromotion() { /* Returns sp-relative offset in bytes for a VReg */ int Mir2Lir::VRegOffset(int v_reg) { return StackVisitor::GetVRegOffset(cu_->code_item, core_spill_mask_, - fp_spill_mask_, frame_size_, v_reg); + fp_spill_mask_, frame_size_, v_reg, + cu_->instruction_set); } /* Returns sp-relative offset in bytes for a SReg */ diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc index 4d45055927..b972d0885d 100644 --- a/compiler/dex/quick/x86/utility_x86.cc +++ b/compiler/dex/quick/x86/utility_x86.cc @@ -889,7 +889,7 @@ void X86Mir2Lir::AnalyzeMIR(int opcode, BasicBlock * bb, MIR *mir) { void X86Mir2Lir::AnalyzeFPInstruction(int opcode, BasicBlock * bb, MIR *mir) { // Look at all the uses, and see if they are double constants. - uint64_t attrs = mir_graph_->oat_data_flow_attributes_[opcode]; + uint64_t attrs = MIRGraph::GetDataFlowAttributes(static_cast<Instruction::Code>(opcode)); int next_sreg = 0; if (attrs & DF_UA) { if (attrs & DF_A_WIDE) { diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc index 4be0f59071..d5c2598356 100644 --- a/compiler/dex/vreg_analysis.cc +++ b/compiler/dex/vreg_analysis.cc @@ -124,7 +124,7 @@ bool MIRGraph::SetHigh(int index) { bool MIRGraph::InferTypeAndSize(BasicBlock* bb, MIR* mir, bool changed) { SSARepresentation *ssa_rep = mir->ssa_rep; if (ssa_rep) { - uint64_t attrs = oat_data_flow_attributes_[mir->dalvikInsn.opcode]; + uint64_t attrs = GetDataFlowAttributes(mir); const int* uses = ssa_rep->uses; const int* defs = ssa_rep->defs; diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index 0ad30be3fe..bde0fae83f 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -905,13 +905,14 @@ bool CompilerDriver::CanAccessInstantiableTypeWithoutChecks(uint32_t referrer_id bool CompilerDriver::CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_idx, bool* is_type_initialized, bool* use_direct_type_ptr, - uintptr_t* direct_type_ptr) { + uintptr_t* direct_type_ptr, bool* out_is_finalizable) { ScopedObjectAccess soa(Thread::Current()); mirror::DexCache* dex_cache = Runtime::Current()->GetClassLinker()->FindDexCache(dex_file); mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx); if (resolved_class == nullptr) { return false; } + *out_is_finalizable = resolved_class->IsFinalizable(); const bool compiling_boot = Runtime::Current()->GetHeap()->IsCompilingBoot(); if (compiling_boot) { // boot -> boot class pointers. diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h index d7d40d554a..6ac9cf751a 100644 --- a/compiler/driver/compiler_driver.h +++ b/compiler/driver/compiler_driver.h @@ -210,7 +210,7 @@ class CompilerDriver { bool CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_idx, bool* is_type_initialized, bool* use_direct_type_ptr, - uintptr_t* direct_type_ptr); + uintptr_t* direct_type_ptr, bool* out_is_finalizable); // Get the DexCache for the mirror::DexCache* GetDexCache(const DexCompilationUnit* mUnit) diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index e6d983f583..62817e7e2c 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -587,7 +587,7 @@ class FixupVisitor { void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { - Object* ref = obj->GetFieldObject<Object, kVerifyNone, false>(offset); + Object* ref = obj->GetFieldObject<Object, kVerifyNone>(offset); // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the // image. copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>( diff --git a/compiler/jni/quick/arm/calling_convention_arm.h b/compiler/jni/quick/arm/calling_convention_arm.h index 00a239b80d..604ce1c821 100644 --- a/compiler/jni/quick/arm/calling_convention_arm.h +++ b/compiler/jni/quick/arm/calling_convention_arm.h @@ -71,6 +71,11 @@ class ArmJniCallingConvention FINAL : public JniCallingConvention { ManagedRegister CurrentParamRegister() OVERRIDE; FrameOffset CurrentParamStackOffset() OVERRIDE; + // AAPCS mandates return values are extended. + bool RequiresSmallResultTypeExtension() const OVERRIDE { + return false; + } + protected: size_t NumberOfOutgoingStackArgs() OVERRIDE; diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.h b/compiler/jni/quick/arm64/calling_convention_arm64.h index 92f547c533..9fd3265c86 100644 --- a/compiler/jni/quick/arm64/calling_convention_arm64.h +++ b/compiler/jni/quick/arm64/calling_convention_arm64.h @@ -68,6 +68,11 @@ class Arm64JniCallingConvention FINAL : public JniCallingConvention { ManagedRegister CurrentParamRegister() OVERRIDE; FrameOffset CurrentParamStackOffset() OVERRIDE; + // aarch64 calling convention leaves upper bits undefined. + bool RequiresSmallResultTypeExtension() const OVERRIDE { + return true; + } + protected: size_t NumberOfOutgoingStackArgs() OVERRIDE; diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h index 4d25d1ce96..18afd5817f 100644 --- a/compiler/jni/quick/calling_convention.h +++ b/compiler/jni/quick/calling_convention.h @@ -287,6 +287,8 @@ class JniCallingConvention : public CallingConvention { FrameOffset ReturnValueSaveLocation() const; // Register that holds result if it is integer. virtual ManagedRegister IntReturnRegister() = 0; + // Whether the compiler needs to ensure zero-/sign-extension of a small result type + virtual bool RequiresSmallResultTypeExtension() const = 0; // Callee save registers to spill prior to native code (which may clobber) virtual const std::vector<ManagedRegister>& CalleeSaveRegisters() const = 0; diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc index 93b1b5a155..9f439eb991 100644 --- a/compiler/jni/quick/jni_compiler.cc +++ b/compiler/jni/quick/jni_compiler.cc @@ -314,7 +314,7 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver& compiler, mr_conv->InterproceduralScratchRegister()); // 10. Fix differences in result widths. - if (instruction_set == kX86 || instruction_set == kX86_64) { + if (main_jni_conv->RequiresSmallResultTypeExtension()) { if (main_jni_conv->GetReturnType() == Primitive::kPrimByte || main_jni_conv->GetReturnType() == Primitive::kPrimShort) { __ SignExtend(main_jni_conv->ReturnRegister(), diff --git a/compiler/jni/quick/mips/calling_convention_mips.h b/compiler/jni/quick/mips/calling_convention_mips.h index e33fbade55..8d82dceef4 100644 --- a/compiler/jni/quick/mips/calling_convention_mips.h +++ b/compiler/jni/quick/mips/calling_convention_mips.h @@ -71,6 +71,11 @@ class MipsJniCallingConvention FINAL : public JniCallingConvention { ManagedRegister CurrentParamRegister() OVERRIDE; FrameOffset CurrentParamStackOffset() OVERRIDE; + // Mips does not need to extend small return types. + bool RequiresSmallResultTypeExtension() const OVERRIDE { + return false; + } + protected: size_t NumberOfOutgoingStackArgs() OVERRIDE; diff --git a/compiler/jni/quick/x86/calling_convention_x86.h b/compiler/jni/quick/x86/calling_convention_x86.h index 5b9069c26a..025eb6d40e 100644 --- a/compiler/jni/quick/x86/calling_convention_x86.h +++ b/compiler/jni/quick/x86/calling_convention_x86.h @@ -69,6 +69,11 @@ class X86JniCallingConvention FINAL : public JniCallingConvention { ManagedRegister CurrentParamRegister() OVERRIDE; FrameOffset CurrentParamStackOffset() OVERRIDE; + // x86 needs to extend small return types. + bool RequiresSmallResultTypeExtension() const OVERRIDE { + return true; + } + protected: size_t NumberOfOutgoingStackArgs() OVERRIDE; diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.h b/compiler/jni/quick/x86_64/calling_convention_x86_64.h index d545774689..1ba5353289 100644 --- a/compiler/jni/quick/x86_64/calling_convention_x86_64.h +++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.h @@ -69,6 +69,11 @@ class X86_64JniCallingConvention FINAL : public JniCallingConvention { ManagedRegister CurrentParamRegister() OVERRIDE; FrameOffset CurrentParamStackOffset() OVERRIDE; + // x86-64 needs to extend small return types. + bool RequiresSmallResultTypeExtension() const OVERRIDE { + return true; + } + protected: size_t NumberOfOutgoingStackArgs() OVERRIDE; diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc index effc38e4c4..5c839dd54e 100644 --- a/compiler/utils/arm/assembler_arm.cc +++ b/compiler/utils/arm/assembler_arm.cc @@ -1107,7 +1107,7 @@ int32_t ArmAssembler::EncodeBranchOffset(int offset, int32_t inst) { // The offset is off by 8 due to the way the ARM CPUs read PC. offset -= 8; CHECK_ALIGNED(offset, 4); - CHECK(IsInt(CountOneBits(kBranchOffsetMask), offset)) << offset; + CHECK(IsInt(POPCOUNT(kBranchOffsetMask), offset)) << offset; // Properly preserve only the bits supported in the instruction. offset >>= 2; diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc index 1d87eaaa60..b4bb979d42 100644 --- a/compiler/utils/arm64/assembler_arm64.cc +++ b/compiler/utils/arm64/assembler_arm64.cc @@ -467,12 +467,26 @@ void Arm64Assembler::MemoryBarrier(ManagedRegister m_scratch) { #endif } -void Arm64Assembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) { - UNIMPLEMENTED(FATAL) << "no sign extension necessary for Arm64"; +void Arm64Assembler::SignExtend(ManagedRegister mreg, size_t size) { + Arm64ManagedRegister reg = mreg.AsArm64(); + CHECK(size == 1 || size == 2) << size; + CHECK(reg.IsWRegister()) << reg; + if (size == 1) { + ___ sxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister())); + } else { + ___ sxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister())); + } } -void Arm64Assembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) { - UNIMPLEMENTED(FATAL) << "no zero extension necessary for Arm64"; +void Arm64Assembler::ZeroExtend(ManagedRegister mreg, size_t size) { + Arm64ManagedRegister reg = mreg.AsArm64(); + CHECK(size == 1 || size == 2) << size; + CHECK(reg.IsWRegister()) << reg; + if (size == 1) { + ___ uxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister())); + } else { + ___ uxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister())); + } } void Arm64Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) { diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc index 45d3a97ac1..9001f8a41f 100644 --- a/compiler/utils/mips/assembler_mips.cc +++ b/compiler/utils/mips/assembler_mips.cc @@ -123,7 +123,7 @@ void MipsAssembler::EmitJump(Label* label, bool link) { int32_t MipsAssembler::EncodeBranchOffset(int offset, int32_t inst, bool is_jump) { CHECK_ALIGNED(offset, 4); - CHECK(IsInt(CountOneBits(kBranchOffsetMask), offset)) << offset; + CHECK(IsInt(POPCOUNT(kBranchOffsetMask), offset)) << offset; // Properly preserve only the bits supported in the instruction. offset >>= 2; |