diff options
Diffstat (limited to 'compiler/optimizing')
| -rw-r--r-- | compiler/optimizing/code_generator_arm64.h | 6 | ||||
| -rw-r--r-- | compiler/optimizing/intrinsics_arm64.cc | 11 |
2 files changed, 8 insertions, 9 deletions
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h index 240936c176..1b5fa857e7 100644 --- a/compiler/optimizing/code_generator_arm64.h +++ b/compiler/optimizing/code_generator_arm64.h @@ -243,7 +243,7 @@ class InstructionCodeGeneratorARM64 : public InstructionCodeGenerator { } Arm64Assembler* GetAssembler() const { return assembler_; } - vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->vixl_masm_; } + vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); } private: void GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path, @@ -364,7 +364,7 @@ class ParallelMoveResolverARM64 : public ParallelMoveResolverNoSwap { private: Arm64Assembler* GetAssembler() const; vixl::aarch64::MacroAssembler* GetVIXLAssembler() const { - return GetAssembler()->vixl_masm_; + return GetAssembler()->GetVIXLAssembler(); } CodeGeneratorARM64* const codegen_; @@ -413,7 +413,7 @@ class CodeGeneratorARM64 : public CodeGenerator { HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; } Arm64Assembler* GetAssembler() OVERRIDE { return &assembler_; } const Arm64Assembler& GetAssembler() const OVERRIDE { return assembler_; } - vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->vixl_masm_; } + vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); } // Emit a write barrier. void MarkGCCard(vixl::aarch64::Register object, diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 06d1148652..e3a9d27a53 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -26,7 +26,6 @@ #include "mirror/string.h" #include "thread.h" #include "utils/arm64/assembler_arm64.h" -#include "utils/arm64/constants_arm64.h" using namespace vixl::aarch64; // NOLINT(build/namespaces) @@ -62,14 +61,14 @@ ALWAYS_INLINE inline MemOperand AbsoluteHeapOperandFrom(Location location, size_ } // namespace MacroAssembler* IntrinsicCodeGeneratorARM64::GetVIXLAssembler() { - return codegen_->GetAssembler()->vixl_masm_; + return codegen_->GetVIXLAssembler(); } ArenaAllocator* IntrinsicCodeGeneratorARM64::GetAllocator() { return codegen_->GetGraph()->GetArena(); } -#define __ codegen->GetAssembler()->vixl_masm_-> +#define __ codegen->GetVIXLAssembler()-> static void MoveFromReturnRegister(Location trg, Primitive::Type type, @@ -782,7 +781,7 @@ static void GenUnsafeGet(HInvoke* invoke, DCHECK((type == Primitive::kPrimInt) || (type == Primitive::kPrimLong) || (type == Primitive::kPrimNot)); - MacroAssembler* masm = codegen->GetAssembler()->vixl_masm_; + MacroAssembler* masm = codegen->GetVIXLAssembler(); Location base_loc = locations->InAt(1); Register base = WRegisterFrom(base_loc); // Object pointer. Location offset_loc = locations->InAt(2); @@ -916,7 +915,7 @@ static void GenUnsafePut(LocationSummary* locations, bool is_volatile, bool is_ordered, CodeGeneratorARM64* codegen) { - MacroAssembler* masm = codegen->GetAssembler()->vixl_masm_; + MacroAssembler* masm = codegen->GetVIXLAssembler(); Register base = WRegisterFrom(locations->InAt(1)); // Object pointer. Register offset = XRegisterFrom(locations->InAt(2)); // Long offset. @@ -1035,7 +1034,7 @@ static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena, } static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGeneratorARM64* codegen) { - MacroAssembler* masm = codegen->GetAssembler()->vixl_masm_; + MacroAssembler* masm = codegen->GetVIXLAssembler(); Register out = WRegisterFrom(locations->Out()); // Boolean result. |