summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/optimizing/code_generator_arm64.h6
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc11
-rw-r--r--compiler/utils/arm64/assembler_arm64.cc24
-rw-r--r--compiler/utils/arm64/assembler_arm64.h17
-rw-r--r--compiler/utils/arm64/constants_arm64.h37
-rw-r--r--compiler/utils/arm64/managed_register_arm64.h2
6 files changed, 27 insertions, 70 deletions
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 240936c176..1b5fa857e7 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -243,7 +243,7 @@ class InstructionCodeGeneratorARM64 : public InstructionCodeGenerator {
}
Arm64Assembler* GetAssembler() const { return assembler_; }
- vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->vixl_masm_; }
+ vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
private:
void GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path,
@@ -364,7 +364,7 @@ class ParallelMoveResolverARM64 : public ParallelMoveResolverNoSwap {
private:
Arm64Assembler* GetAssembler() const;
vixl::aarch64::MacroAssembler* GetVIXLAssembler() const {
- return GetAssembler()->vixl_masm_;
+ return GetAssembler()->GetVIXLAssembler();
}
CodeGeneratorARM64* const codegen_;
@@ -413,7 +413,7 @@ class CodeGeneratorARM64 : public CodeGenerator {
HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
Arm64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
const Arm64Assembler& GetAssembler() const OVERRIDE { return assembler_; }
- vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->vixl_masm_; }
+ vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return GetAssembler()->GetVIXLAssembler(); }
// Emit a write barrier.
void MarkGCCard(vixl::aarch64::Register object,
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 06d1148652..e3a9d27a53 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -26,7 +26,6 @@
#include "mirror/string.h"
#include "thread.h"
#include "utils/arm64/assembler_arm64.h"
-#include "utils/arm64/constants_arm64.h"
using namespace vixl::aarch64; // NOLINT(build/namespaces)
@@ -62,14 +61,14 @@ ALWAYS_INLINE inline MemOperand AbsoluteHeapOperandFrom(Location location, size_
} // namespace
MacroAssembler* IntrinsicCodeGeneratorARM64::GetVIXLAssembler() {
- return codegen_->GetAssembler()->vixl_masm_;
+ return codegen_->GetVIXLAssembler();
}
ArenaAllocator* IntrinsicCodeGeneratorARM64::GetAllocator() {
return codegen_->GetGraph()->GetArena();
}
-#define __ codegen->GetAssembler()->vixl_masm_->
+#define __ codegen->GetVIXLAssembler()->
static void MoveFromReturnRegister(Location trg,
Primitive::Type type,
@@ -782,7 +781,7 @@ static void GenUnsafeGet(HInvoke* invoke,
DCHECK((type == Primitive::kPrimInt) ||
(type == Primitive::kPrimLong) ||
(type == Primitive::kPrimNot));
- MacroAssembler* masm = codegen->GetAssembler()->vixl_masm_;
+ MacroAssembler* masm = codegen->GetVIXLAssembler();
Location base_loc = locations->InAt(1);
Register base = WRegisterFrom(base_loc); // Object pointer.
Location offset_loc = locations->InAt(2);
@@ -916,7 +915,7 @@ static void GenUnsafePut(LocationSummary* locations,
bool is_volatile,
bool is_ordered,
CodeGeneratorARM64* codegen) {
- MacroAssembler* masm = codegen->GetAssembler()->vixl_masm_;
+ MacroAssembler* masm = codegen->GetVIXLAssembler();
Register base = WRegisterFrom(locations->InAt(1)); // Object pointer.
Register offset = XRegisterFrom(locations->InAt(2)); // Long offset.
@@ -1035,7 +1034,7 @@ static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena,
}
static void GenCas(LocationSummary* locations, Primitive::Type type, CodeGeneratorARM64* codegen) {
- MacroAssembler* masm = codegen->GetAssembler()->vixl_masm_;
+ MacroAssembler* masm = codegen->GetVIXLAssembler();
Register out = WRegisterFrom(locations->Out()); // Boolean result.
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index d82caf57e3..dc1f24a152 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -28,7 +28,7 @@ namespace arm64 {
#ifdef ___
#error "ARM64 Assembler macro already defined."
#else
-#define ___ vixl_masm_->
+#define ___ vixl_masm_.
#endif
void Arm64Assembler::FinalizeCode() {
@@ -39,16 +39,16 @@ void Arm64Assembler::FinalizeCode() {
}
size_t Arm64Assembler::CodeSize() const {
- return vixl_masm_->GetBufferCapacity() - vixl_masm_->GetRemainingBufferSpace();
+ return vixl_masm_.GetBufferCapacity() - vixl_masm_.GetRemainingBufferSpace();
}
const uint8_t* Arm64Assembler::CodeBufferBaseAddress() const {
- return vixl_masm_->GetStartAddress<uint8_t*>();
+ return vixl_masm_.GetStartAddress<uint8_t*>();
}
void Arm64Assembler::FinalizeInstructions(const MemoryRegion& region) {
// Copy the instructions from the buffer.
- MemoryRegion from(vixl_masm_->GetStartAddress<void*>(), CodeSize());
+ MemoryRegion from(vixl_masm_.GetStartAddress<void*>(), CodeSize());
region.CopyFrom(0, from);
}
@@ -86,7 +86,7 @@ void Arm64Assembler::AddConstant(XRegister rd, XRegister rn, int32_t value,
} else {
// temp = rd + value
// rd = cond ? temp : rn
- UseScratchRegisterScope temps(vixl_masm_);
+ UseScratchRegisterScope temps(&vixl_masm_);
temps.Exclude(reg_x(rd), reg_x(rn));
Register temp = temps.AcquireX();
___ Add(temp, reg_x(rn), value);
@@ -183,7 +183,7 @@ void Arm64Assembler::StoreStackOffsetToThread64(ThreadOffset64 tr_offs,
}
void Arm64Assembler::StoreStackPointerToThread64(ThreadOffset64 tr_offs) {
- UseScratchRegisterScope temps(vixl_masm_);
+ UseScratchRegisterScope temps(&vixl_masm_);
Register temp = temps.AcquireX();
___ Mov(temp, reg_x(SP));
___ Str(temp, MEM_OP(reg_x(TR), tr_offs.Int32Value()));
@@ -207,7 +207,7 @@ void Arm64Assembler::LoadImmediate(XRegister dest, int32_t value,
// temp = value
// rd = cond ? temp : rd
if (value != 0) {
- UseScratchRegisterScope temps(vixl_masm_);
+ UseScratchRegisterScope temps(&vixl_masm_);
temps.Exclude(reg_x(dest));
Register temp = temps.AcquireX();
___ Mov(temp, value);
@@ -314,7 +314,7 @@ void Arm64Assembler::LoadRawPtr(ManagedRegister m_dst, ManagedRegister m_base, O
Arm64ManagedRegister base = m_base.AsArm64();
CHECK(dst.IsXRegister() && base.IsXRegister());
// Remove dst and base form the temp list - higher level API uses IP1, IP0.
- UseScratchRegisterScope temps(vixl_masm_);
+ UseScratchRegisterScope temps(&vixl_masm_);
temps.Exclude(reg_x(dst.AsXRegister()), reg_x(base.AsXRegister()));
___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
}
@@ -528,7 +528,7 @@ void Arm64Assembler::JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister
CHECK(base.IsXRegister()) << base;
CHECK(scratch.IsXRegister()) << scratch;
// Remove base and scratch form the temp list - higher level API uses IP1, IP0.
- UseScratchRegisterScope temps(vixl_masm_);
+ UseScratchRegisterScope temps(&vixl_masm_);
temps.Exclude(reg_x(base.AsXRegister()), reg_x(scratch.AsXRegister()));
___ Ldr(reg_x(scratch.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
___ Br(reg_x(scratch.AsXRegister()));
@@ -621,7 +621,7 @@ void Arm64Assembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjus
}
void Arm64Assembler::EmitExceptionPoll(Arm64Exception *exception) {
- UseScratchRegisterScope temps(vixl_masm_);
+ UseScratchRegisterScope temps(&vixl_masm_);
temps.Exclude(reg_x(exception->scratch_.AsXRegister()));
Register temp = temps.AcquireX();
@@ -653,7 +653,7 @@ static inline dwarf::Reg DWARFReg(CPURegister reg) {
void Arm64Assembler::SpillRegisters(CPURegList registers, int offset) {
int size = registers.GetRegisterSizeInBytes();
- const Register sp = vixl_masm_->StackPointer();
+ const Register sp = vixl_masm_.StackPointer();
// Since we are operating on register pairs, we would like to align on
// double the standard size; on the other hand, we don't want to insert
// an extra store, which will happen if the number of registers is even.
@@ -681,7 +681,7 @@ void Arm64Assembler::SpillRegisters(CPURegList registers, int offset) {
void Arm64Assembler::UnspillRegisters(CPURegList registers, int offset) {
int size = registers.GetRegisterSizeInBytes();
- const Register sp = vixl_masm_->StackPointer();
+ const Register sp = vixl_masm_.StackPointer();
// Be consistent with the logic for spilling registers.
if (!IsAlignedParam(offset, 2 * size) && registers.GetCount() % 2 != 0) {
const CPURegister& dst0 = registers.PopLowestIndex();
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 24b798201a..b8434b9263 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -23,7 +23,6 @@
#include "base/arena_containers.h"
#include "base/logging.h"
-#include "constants_arm64.h"
#include "utils/arm64/managed_register_arm64.h"
#include "utils/assembler.h"
#include "offsets.h"
@@ -84,16 +83,13 @@ class Arm64Exception {
class Arm64Assembler FINAL : public Assembler {
public:
- // We indicate the size of the initial code generation buffer to the VIXL
- // assembler. From there we it will automatically manage the buffer.
explicit Arm64Assembler(ArenaAllocator* arena)
: Assembler(arena),
- exception_blocks_(arena->Adapter(kArenaAllocAssembler)),
- vixl_masm_(new vixl::aarch64::MacroAssembler(kArm64BaseBufferSize)) {}
+ exception_blocks_(arena->Adapter(kArenaAllocAssembler)) {}
- virtual ~Arm64Assembler() {
- delete vixl_masm_;
- }
+ virtual ~Arm64Assembler() {}
+
+ vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return &vixl_masm_; }
// Finalize the code.
void FinalizeCode() OVERRIDE;
@@ -287,9 +283,8 @@ class Arm64Assembler FINAL : public Assembler {
// List of exception blocks to generate at the end of the code cache.
ArenaVector<std::unique_ptr<Arm64Exception>> exception_blocks_;
- public:
- // Vixl assembler.
- vixl::aarch64::MacroAssembler* const vixl_masm_;
+ // VIXL assembler.
+ vixl::aarch64::MacroAssembler vixl_masm_;
// Used for testing.
friend class Arm64ManagedRegister_VixlRegisters_Test;
diff --git a/compiler/utils/arm64/constants_arm64.h b/compiler/utils/arm64/constants_arm64.h
deleted file mode 100644
index 01e8be9de6..0000000000
--- a/compiler/utils/arm64/constants_arm64.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_UTILS_ARM64_CONSTANTS_ARM64_H_
-#define ART_COMPILER_UTILS_ARM64_CONSTANTS_ARM64_H_
-
-#include <stdint.h>
-#include <iosfwd>
-#include "arch/arm64/registers_arm64.h"
-#include "base/casts.h"
-#include "base/logging.h"
-#include "globals.h"
-
-// TODO: Extend this file by adding missing functionality.
-
-namespace art {
-namespace arm64 {
-
-constexpr size_t kArm64BaseBufferSize = 4096;
-
-} // namespace arm64
-} // namespace art
-
-#endif // ART_COMPILER_UTILS_ARM64_CONSTANTS_ARM64_H_
diff --git a/compiler/utils/arm64/managed_register_arm64.h b/compiler/utils/arm64/managed_register_arm64.h
index f7d74d2af4..7378a0a081 100644
--- a/compiler/utils/arm64/managed_register_arm64.h
+++ b/compiler/utils/arm64/managed_register_arm64.h
@@ -17,8 +17,8 @@
#ifndef ART_COMPILER_UTILS_ARM64_MANAGED_REGISTER_ARM64_H_
#define ART_COMPILER_UTILS_ARM64_MANAGED_REGISTER_ARM64_H_
+#include "arch/arm64/registers_arm64.h"
#include "base/logging.h"
-#include "constants_arm64.h"
#include "debug/dwarf/register.h"
#include "utils/managed_register.h"