summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/Android.mk1
-rw-r--r--compiler/driver/compiler_options.cc27
-rw-r--r--compiler/driver/compiler_options.h24
-rw-r--r--compiler/jit/jit_compiler.cc5
-rw-r--r--compiler/optimizing/code_generator_arm.cc24
-rw-r--r--compiler/optimizing/code_generator_arm64.cc31
-rw-r--r--compiler/optimizing/dead_code_elimination.h6
-rw-r--r--compiler/optimizing/dex_cache_array_fixups_arm.h4
-rw-r--r--compiler/optimizing/dex_cache_array_fixups_mips.h4
-rw-r--r--compiler/optimizing/induction_var_analysis.h2
-rw-r--r--compiler/optimizing/instruction_simplifier_arm.h4
-rw-r--r--compiler/optimizing/instruction_simplifier_arm64.h5
-rw-r--r--compiler/optimizing/instruction_simplifier_shared.cc16
-rw-r--r--compiler/optimizing/optimization.h5
-rw-r--r--compiler/optimizing/optimizing_compiler.cc188
-rw-r--r--compiler/optimizing/pc_relative_fixups_mips.h2
-rw-r--r--compiler/optimizing/pc_relative_fixups_x86.h4
-rw-r--r--compiler/optimizing/x86_memory_gen.cc4
-rw-r--r--compiler/optimizing/x86_memory_gen.h4
-rw-r--r--compiler/utils/jni_macro_assembler.cc4
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc537
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h129
-rw-r--r--compiler/utils/x86_64/assembler_x86_64_test.cc123
-rw-r--r--compiler/utils/x86_64/jni_macro_assembler_x86_64.cc603
-rw-r--r--compiler/utils/x86_64/jni_macro_assembler_x86_64.h190
25 files changed, 1145 insertions, 801 deletions
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 61ebf1d618..16a158cf6f 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -153,6 +153,7 @@ LIBART_COMPILER_SRC_FILES_x86_64 := \
optimizing/intrinsics_x86_64.cc \
optimizing/code_generator_x86_64.cc \
utils/x86_64/assembler_x86_64.cc \
+ utils/x86_64/jni_macro_assembler_x86_64.cc \
utils/x86_64/managed_register_x86_64.cc \
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index f20dba34a6..30ba8c9e74 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -44,7 +44,9 @@ CompilerOptions::CompilerOptions()
init_failure_output_(nullptr),
dump_cfg_file_name_(""),
dump_cfg_append_(false),
- force_determinism_(false) {
+ force_determinism_(false),
+ register_allocation_strategy_(RegisterAllocator::kRegisterAllocatorDefault),
+ passes_to_run_(nullptr) {
}
CompilerOptions::~CompilerOptions() {
@@ -74,7 +76,9 @@ CompilerOptions::CompilerOptions(CompilerFilter::Filter compiler_filter,
bool abort_on_hard_verifier_failure,
const std::string& dump_cfg_file_name,
bool dump_cfg_append,
- bool force_determinism
+ bool force_determinism,
+ RegisterAllocator::Strategy regalloc_strategy,
+ const std::vector<std::string>* passes_to_run
) : // NOLINT(whitespace/parens)
compiler_filter_(compiler_filter),
huge_method_threshold_(huge_method_threshold),
@@ -99,7 +103,9 @@ CompilerOptions::CompilerOptions(CompilerFilter::Filter compiler_filter,
init_failure_output_(init_failure_output),
dump_cfg_file_name_(dump_cfg_file_name),
dump_cfg_append_(dump_cfg_append),
- force_determinism_(force_determinism) {
+ force_determinism_(force_determinism),
+ register_allocation_strategy_(regalloc_strategy),
+ passes_to_run_(passes_to_run) {
}
void CompilerOptions::ParseHugeMethodMax(const StringPiece& option, UsageFn Usage) {
@@ -144,6 +150,19 @@ void CompilerOptions::ParseDumpInitFailures(const StringPiece& option,
}
}
+void CompilerOptions::ParseRegisterAllocationStrategy(const StringPiece& option,
+ UsageFn Usage) {
+ DCHECK(option.starts_with("--register-allocation-strategy="));
+ StringPiece choice = option.substr(strlen("--register-allocation-strategy=")).data();
+ if (choice == "linear-scan") {
+ register_allocation_strategy_ = RegisterAllocator::Strategy::kRegisterAllocatorLinearScan;
+ } else if (choice == "graph-color") {
+ register_allocation_strategy_ = RegisterAllocator::Strategy::kRegisterAllocatorGraphColor;
+ } else {
+ Usage("Unrecognized register allocation strategy. Try linear-scan, or graph-color.");
+ }
+}
+
bool CompilerOptions::ParseCompilerOption(const StringPiece& option, UsageFn Usage) {
if (option.starts_with("--compiler-filter=")) {
const char* compiler_filter_string = option.substr(strlen("--compiler-filter=")).data();
@@ -190,6 +209,8 @@ bool CompilerOptions::ParseCompilerOption(const StringPiece& option, UsageFn Usa
dump_cfg_file_name_ = option.substr(strlen("--dump-cfg=")).data();
} else if (option.starts_with("--dump-cfg-append")) {
dump_cfg_append_ = true;
+ } else if (option.starts_with("--register-allocation-strategy=")) {
+ ParseRegisterAllocationStrategy(option, Usage);
} else {
// Option not recognized.
return false;
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 60b700ad91..abc58d7dda 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -24,6 +24,7 @@
#include "base/macros.h"
#include "compiler_filter.h"
#include "globals.h"
+#include "optimizing/register_allocator.h"
#include "utils.h"
namespace art {
@@ -74,7 +75,9 @@ class CompilerOptions FINAL {
bool abort_on_hard_verifier_failure,
const std::string& dump_cfg_file_name,
bool dump_cfg_append,
- bool force_determinism);
+ bool force_determinism,
+ RegisterAllocator::Strategy regalloc_strategy,
+ const std::vector<std::string>* passes_to_run);
CompilerFilter::Filter GetCompilerFilter() const {
return compiler_filter_;
@@ -244,6 +247,14 @@ class CompilerOptions FINAL {
return force_determinism_;
}
+ RegisterAllocator::Strategy GetRegisterAllocationStrategy() const {
+ return register_allocation_strategy_;
+ }
+
+ const std::vector<std::string>* GetPassesToRun() const {
+ return passes_to_run_;
+ }
+
private:
void ParseDumpInitFailures(const StringPiece& option, UsageFn Usage);
void ParseDumpCfgPasses(const StringPiece& option, UsageFn Usage);
@@ -254,6 +265,7 @@ class CompilerOptions FINAL {
void ParseSmallMethodMax(const StringPiece& option, UsageFn Usage);
void ParseLargeMethodMax(const StringPiece& option, UsageFn Usage);
void ParseHugeMethodMax(const StringPiece& option, UsageFn Usage);
+ void ParseRegisterAllocationStrategy(const StringPiece& option, UsageFn Usage);
CompilerFilter::Filter compiler_filter_;
size_t huge_method_threshold_;
@@ -297,6 +309,16 @@ class CompilerOptions FINAL {
// outcomes.
bool force_determinism_;
+ RegisterAllocator::Strategy register_allocation_strategy_;
+
+ // If not null, specifies optimization passes which will be run instead of defaults.
+ // Note that passes_to_run_ is not checked for correctness and providing an incorrect
+ // list of passes can lead to unexpected compiler behaviour. This is caused by dependencies
+ // between passes. Failing to satisfy them can for example lead to compiler crashes.
+ // Passing pass names which are not recognized by the compiler will result in
+ // compiler-dependant behavior.
+ const std::vector<std::string>* passes_to_run_;
+
friend class Dex2Oat;
DISALLOW_COPY_AND_ASSIGN(CompilerOptions);
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 178533849b..6f6a8f5a3b 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -32,6 +32,7 @@
#include "oat_file-inl.h"
#include "oat_quick_method_header.h"
#include "object_lock.h"
+#include "optimizing/register_allocator.h"
#include "thread_list.h"
namespace art {
@@ -110,7 +111,9 @@ JitCompiler::JitCompiler() {
/* abort_on_hard_verifier_failure */ false,
/* dump_cfg_file_name */ "",
/* dump_cfg_append */ false,
- /* force_determinism */ false));
+ /* force_determinism */ false,
+ RegisterAllocator::kRegisterAllocatorDefault,
+ /* passes_to_run */ nullptr));
for (const std::string& argument : Runtime::Current()->GetCompilerOptions()) {
compiler_options_->ParseCompilerOption(argument, Usage);
}
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index c105940f28..5eaf11e9fb 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -432,6 +432,11 @@ class ReadBarrierMarkSlowPathARM : public SlowPathCode {
(instruction_->IsInvokeVirtual()) && instruction_->GetLocations()->Intrinsified())
<< "Unexpected instruction in read barrier marking slow path: "
<< instruction_->DebugName();
+ // The read barrier instrumentation of object ArrayGet
+ // instructions does not support the HIntermediateAddress
+ // instruction.
+ DCHECK(!(instruction_->IsArrayGet() &&
+ instruction_->AsArrayGet()->GetArray()->IsIntermediateAddress()));
__ Bind(GetEntryLabel());
// No need to save live registers; it's taken care of by the
@@ -512,6 +517,11 @@ class ReadBarrierForHeapReferenceSlowPathARM : public SlowPathCode {
(instruction_->IsInvokeVirtual()) && instruction_->GetLocations()->Intrinsified())
<< "Unexpected instruction in read barrier for heap reference slow path: "
<< instruction_->DebugName();
+ // The read barrier instrumentation of object ArrayGet
+ // instructions does not support the HIntermediateAddress
+ // instruction.
+ DCHECK(!(instruction_->IsArrayGet() &&
+ instruction_->AsArrayGet()->GetArray()->IsIntermediateAddress()));
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
@@ -4467,8 +4477,6 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
Primitive::Type type = instruction->GetType();
HInstruction* array_instr = instruction->GetArray();
bool has_intermediate_address = array_instr->IsIntermediateAddress();
- // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
- DCHECK(!(has_intermediate_address && kEmitCompilerReadBarrier));
switch (type) {
case Primitive::kPrimBoolean:
@@ -4503,6 +4511,11 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
}
case Primitive::kPrimNot: {
+ // The read barrier instrumentation of object ArrayGet
+ // instructions does not support the HIntermediateAddress
+ // instruction.
+ DCHECK(!(has_intermediate_address && kEmitCompilerReadBarrier));
+
static_assert(
sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
"art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
@@ -4645,8 +4658,6 @@ void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
Location value_loc = locations->InAt(2);
HInstruction* array_instr = instruction->GetArray();
bool has_intermediate_address = array_instr->IsIntermediateAddress();
- // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
- DCHECK(!(has_intermediate_address && kEmitCompilerReadBarrier));
switch (value_type) {
case Primitive::kPrimBoolean:
@@ -4911,8 +4922,6 @@ void InstructionCodeGeneratorARM::VisitArrayLength(HArrayLength* instruction) {
}
void LocationsBuilderARM::VisitIntermediateAddress(HIntermediateAddress* instruction) {
- // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
- DCHECK(!kEmitCompilerReadBarrier);
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
@@ -4927,9 +4936,6 @@ void InstructionCodeGeneratorARM::VisitIntermediateAddress(HIntermediateAddress*
Location first = locations->InAt(0);
Location second = locations->InAt(1);
- // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
- DCHECK(!kEmitCompilerReadBarrier);
-
if (second.IsRegister()) {
__ add(out.AsRegister<Register>(),
first.AsRegister<Register>(),
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 54c9efcafe..9ceb3109cd 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -598,6 +598,11 @@ class ReadBarrierMarkSlowPathARM64 : public SlowPathCodeARM64 {
(instruction_->IsInvokeVirtual()) && instruction_->GetLocations()->Intrinsified())
<< "Unexpected instruction in read barrier marking slow path: "
<< instruction_->DebugName();
+ // The read barrier instrumentation of object ArrayGet
+ // instructions does not support the HIntermediateAddress
+ // instruction.
+ DCHECK(!(instruction_->IsArrayGet() &&
+ instruction_->AsArrayGet()->GetArray()->IsIntermediateAddress()));
__ Bind(GetEntryLabel());
// No need to save live registers; it's taken care of by the
@@ -680,7 +685,9 @@ class ReadBarrierForHeapReferenceSlowPathARM64 : public SlowPathCodeARM64 {
(instruction_->IsInvokeVirtual()) && instruction_->GetLocations()->Intrinsified())
<< "Unexpected instruction in read barrier for heap reference slow path: "
<< instruction_->DebugName();
- // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
+ // The read barrier instrumentation of object ArrayGet
+ // instructions does not support the HIntermediateAddress
+ // instruction.
DCHECK(!(instruction_->IsArrayGet() &&
instruction_->AsArrayGet()->GetArray()->IsIntermediateAddress()));
@@ -1983,8 +1990,6 @@ void InstructionCodeGeneratorARM64::VisitArm64DataProcWithShifterOp(
}
void LocationsBuilderARM64::VisitIntermediateAddress(HIntermediateAddress* instruction) {
- // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
- DCHECK(!kEmitCompilerReadBarrier);
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
@@ -1992,10 +1997,7 @@ void LocationsBuilderARM64::VisitIntermediateAddress(HIntermediateAddress* instr
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorARM64::VisitIntermediateAddress(
- HIntermediateAddress* instruction) {
- // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
- DCHECK(!kEmitCompilerReadBarrier);
+void InstructionCodeGeneratorARM64::VisitIntermediateAddress(HIntermediateAddress* instruction) {
__ Add(OutputRegister(instruction),
InputRegisterAt(instruction, 0),
Operand(InputOperandAt(instruction, 1)));
@@ -2091,11 +2093,15 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
// Block pools between `Load` and `MaybeRecordImplicitNullCheck`.
BlockPoolsScope block_pools(masm);
+ // The read barrier instrumentation of object ArrayGet instructions
+ // does not support the HIntermediateAddress instruction.
+ DCHECK(!((type == Primitive::kPrimNot) &&
+ instruction->GetArray()->IsIntermediateAddress() &&
+ kEmitCompilerReadBarrier));
+
if (type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// Object ArrayGet with Baker's read barrier case.
Register temp = temps.AcquireW();
- // The read barrier instrumentation does not support the HIntermediateAddress instruction yet.
- DCHECK(!instruction->GetArray()->IsIntermediateAddress());
// Note that a potential implicit null check is handled in the
// CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier call.
codegen_->GenerateArrayLoadWithBakerReadBarrier(
@@ -2109,9 +2115,6 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
} else {
Register temp = temps.AcquireSameSizeAs(obj);
if (instruction->GetArray()->IsIntermediateAddress()) {
- // The read barrier instrumentation does not support the
- // HIntermediateAddress instruction yet.
- DCHECK(!kEmitCompilerReadBarrier);
// We do not need to compute the intermediate address from the array: the
// input instruction has done it already. See the comment in
// `TryExtractArrayAccessAddress()`.
@@ -2201,9 +2204,6 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
UseScratchRegisterScope temps(masm);
Register temp = temps.AcquireSameSizeAs(array);
if (instruction->GetArray()->IsIntermediateAddress()) {
- // The read barrier instrumentation does not support the
- // HIntermediateAddress instruction yet.
- DCHECK(!kEmitCompilerReadBarrier);
// We do not need to compute the intermediate address from the array: the
// input instruction has done it already. See the comment in
// `TryExtractArrayAccessAddress()`.
@@ -2223,7 +2223,6 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
codegen_->Store(value_type, value, destination);
codegen_->MaybeRecordImplicitNullCheck(instruction);
} else {
- DCHECK(needs_write_barrier);
DCHECK(!instruction->GetArray()->IsIntermediateAddress());
vixl::aarch64::Label done;
SlowPathCodeARM64* slow_path = nullptr;
diff --git a/compiler/optimizing/dead_code_elimination.h b/compiler/optimizing/dead_code_elimination.h
index 0ce0ec1402..58e700deba 100644
--- a/compiler/optimizing/dead_code_elimination.h
+++ b/compiler/optimizing/dead_code_elimination.h
@@ -31,13 +31,11 @@ class HDeadCodeElimination : public HOptimization {
public:
HDeadCodeElimination(HGraph* graph,
OptimizingCompilerStats* stats = nullptr,
- const char* name = kInitialDeadCodeEliminationPassName)
+ const char* name = kDeadCodeEliminationPassName)
: HOptimization(graph, name, stats) {}
void Run() OVERRIDE;
-
- static constexpr const char* kInitialDeadCodeEliminationPassName = "dead_code_elimination";
- static constexpr const char* kFinalDeadCodeEliminationPassName = "dead_code_elimination_final";
+ static constexpr const char* kDeadCodeEliminationPassName = "dead_code_elimination";
private:
void MaybeRecordDeadBlock(HBasicBlock* block);
diff --git a/compiler/optimizing/dex_cache_array_fixups_arm.h b/compiler/optimizing/dex_cache_array_fixups_arm.h
index 015f910328..9142e29eff 100644
--- a/compiler/optimizing/dex_cache_array_fixups_arm.h
+++ b/compiler/optimizing/dex_cache_array_fixups_arm.h
@@ -26,7 +26,9 @@ namespace arm {
class DexCacheArrayFixups : public HOptimization {
public:
DexCacheArrayFixups(HGraph* graph, OptimizingCompilerStats* stats)
- : HOptimization(graph, "dex_cache_array_fixups_arm", stats) {}
+ : HOptimization(graph, kDexCacheArrayFixupsArmPassName, stats) {}
+
+ static constexpr const char* kDexCacheArrayFixupsArmPassName = "dex_cache_array_fixups_arm";
void Run() OVERRIDE;
};
diff --git a/compiler/optimizing/dex_cache_array_fixups_mips.h b/compiler/optimizing/dex_cache_array_fixups_mips.h
index 21056e130a..861a199d6c 100644
--- a/compiler/optimizing/dex_cache_array_fixups_mips.h
+++ b/compiler/optimizing/dex_cache_array_fixups_mips.h
@@ -29,9 +29,11 @@ namespace mips {
class DexCacheArrayFixups : public HOptimization {
public:
DexCacheArrayFixups(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
- : HOptimization(graph, "dex_cache_array_fixups_mips", stats),
+ : HOptimization(graph, kDexCacheArrayFixupsMipsPassName, stats),
codegen_(codegen) {}
+ static constexpr const char* kDexCacheArrayFixupsMipsPassName = "dex_cache_array_fixups_mips";
+
void Run() OVERRIDE;
private:
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index 7c74816c26..cd4c830645 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -39,9 +39,9 @@ class HInductionVarAnalysis : public HOptimization {
void Run() OVERRIDE;
- private:
static constexpr const char* kInductionPassName = "induction_var_analysis";
+ private:
struct NodeInfo {
explicit NodeInfo(uint32_t d) : depth(d), done(false) {}
uint32_t depth;
diff --git a/compiler/optimizing/instruction_simplifier_arm.h b/compiler/optimizing/instruction_simplifier_arm.h
index 3d297dacc0..782110c40a 100644
--- a/compiler/optimizing/instruction_simplifier_arm.h
+++ b/compiler/optimizing/instruction_simplifier_arm.h
@@ -48,7 +48,9 @@ class InstructionSimplifierArmVisitor : public HGraphVisitor {
class InstructionSimplifierArm : public HOptimization {
public:
InstructionSimplifierArm(HGraph* graph, OptimizingCompilerStats* stats)
- : HOptimization(graph, "instruction_simplifier_arm", stats) {}
+ : HOptimization(graph, kInstructionSimplifierArmPassName, stats) {}
+
+ static constexpr const char* kInstructionSimplifierArmPassName = "instruction_simplifier_arm";
void Run() OVERRIDE {
InstructionSimplifierArmVisitor visitor(graph_, stats_);
diff --git a/compiler/optimizing/instruction_simplifier_arm64.h b/compiler/optimizing/instruction_simplifier_arm64.h
index 28648b3bea..f71684efe9 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.h
+++ b/compiler/optimizing/instruction_simplifier_arm64.h
@@ -82,8 +82,9 @@ class InstructionSimplifierArm64Visitor : public HGraphVisitor {
class InstructionSimplifierArm64 : public HOptimization {
public:
InstructionSimplifierArm64(HGraph* graph, OptimizingCompilerStats* stats)
- : HOptimization(graph, "instruction_simplifier_arm64", stats) {}
-
+ : HOptimization(graph, kInstructionSimplifierArm64PassName, stats) {}
+ static constexpr const char* kInstructionSimplifierArm64PassName
+ = "instruction_simplifier_arm64";
void Run() OVERRIDE {
InstructionSimplifierArm64Visitor visitor(graph_, stats_);
visitor.VisitReversePostOrder();
diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc
index 8f7778fe68..6632cd9969 100644
--- a/compiler/optimizing/instruction_simplifier_shared.cc
+++ b/compiler/optimizing/instruction_simplifier_shared.cc
@@ -231,15 +231,6 @@ bool TryExtractArrayAccessAddress(HInstruction* access,
HInstruction* array,
HInstruction* index,
size_t data_offset) {
- if (kEmitCompilerReadBarrier) {
- // The read barrier instrumentation does not support the
- // HIntermediateAddress instruction yet.
- //
- // TODO: Handle this case properly in the ARM64 and ARM code generator and
- // re-enable this optimization; otherwise, remove this TODO.
- // b/26601270
- return false;
- }
if (index->IsConstant() ||
(index->IsBoundsCheck() && index->AsBoundsCheck()->GetIndex()->IsConstant())) {
// When the index is a constant all the addressing can be fitted in the
@@ -251,6 +242,13 @@ bool TryExtractArrayAccessAddress(HInstruction* access,
// The access may require a runtime call or the original array pointer.
return false;
}
+ if (kEmitCompilerReadBarrier &&
+ access->IsArrayGet() &&
+ access->AsArrayGet()->GetType() == Primitive::kPrimNot) {
+ // For object arrays, the read barrier instrumentation requires
+ // the original array pointer.
+ return false;
+ }
// Proceed to extract the base address computation.
HGraph* graph = access->GetBlock()->GetGraph();
diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h
index 2f59d4cd5b..0819fb01ac 100644
--- a/compiler/optimizing/optimization.h
+++ b/compiler/optimizing/optimization.h
@@ -37,7 +37,10 @@ class HOptimization : public ArenaObject<kArenaAllocOptimization> {
virtual ~HOptimization() {}
- // Return the name of the pass.
+ // Return the name of the pass. Pass names for a single HOptimization should be of form
+ // <optimization_name> or <optimization_name>$<pass_name> for common <optimization_name> prefix.
+ // Example: 'instruction_simplifier', 'instruction_simplifier$after_bce',
+ // 'instruction_simplifier$before_codegen'.
const char* GetPassName() const { return pass_name_; }
// Perform the analysis itself.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index d5b0d77fe5..f7c82d1987 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -95,6 +95,8 @@ namespace art {
static constexpr size_t kArenaAllocatorMemoryReportThreshold = 8 * MB;
+static constexpr const char* kPassNameSeparator = "$";
+
/**
* Used by the code generator, to allocate the code in a vector.
*/
@@ -266,7 +268,7 @@ class PassScope : public ValueObject {
class OptimizingCompiler FINAL : public Compiler {
public:
explicit OptimizingCompiler(CompilerDriver* driver);
- ~OptimizingCompiler();
+ ~OptimizingCompiler() OVERRIDE;
bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const OVERRIDE;
@@ -305,17 +307,17 @@ class OptimizingCompiler FINAL : public Compiler {
OVERRIDE
SHARED_REQUIRES(Locks::mutator_lock_);
- protected:
- virtual void RunOptimizations(HGraph* graph,
- CodeGenerator* codegen,
- CompilerDriver* driver,
- const DexCompilationUnit& dex_compilation_unit,
- PassObserver* pass_observer,
- StackHandleScopeCollection* handles) const;
+ private:
+ void RunOptimizations(HGraph* graph,
+ CodeGenerator* codegen,
+ CompilerDriver* driver,
+ const DexCompilationUnit& dex_compilation_unit,
+ PassObserver* pass_observer,
+ StackHandleScopeCollection* handles) const;
- virtual void RunOptimizations(HOptimization* optimizations[],
- size_t length,
- PassObserver* pass_observer) const;
+ void RunOptimizations(HOptimization* optimizations[],
+ size_t length,
+ PassObserver* pass_observer) const;
private:
// Create a 'CompiledMethod' for an optimized graph.
@@ -420,6 +422,117 @@ static bool InstructionSetSupportsReadBarrier(InstructionSet instruction_set) {
|| instruction_set == kX86_64;
}
+static HOptimization* BuildOptimization(
+ const std::string& opt_name,
+ ArenaAllocator* arena,
+ HGraph* graph,
+ OptimizingCompilerStats* stats,
+ CodeGenerator* codegen,
+ CompilerDriver* driver,
+ const DexCompilationUnit& dex_compilation_unit,
+ StackHandleScopeCollection* handles,
+ SideEffectsAnalysis* most_recent_side_effects,
+ HInductionVarAnalysis* most_recent_induction) {
+ if (opt_name == arm::InstructionSimplifierArm::kInstructionSimplifierArmPassName) {
+ return new (arena) arm::InstructionSimplifierArm(graph, stats);
+ } else if (opt_name == arm64::InstructionSimplifierArm64::kInstructionSimplifierArm64PassName) {
+ return new (arena) arm64::InstructionSimplifierArm64(graph, stats);
+ } else if (opt_name == BoundsCheckElimination::kBoundsCheckEliminationPassName) {
+ CHECK(most_recent_side_effects != nullptr && most_recent_induction != nullptr);
+ return new (arena) BoundsCheckElimination(graph,
+ *most_recent_side_effects,
+ most_recent_induction);
+ } else if (opt_name == GVNOptimization::kGlobalValueNumberingPassName) {
+ CHECK(most_recent_side_effects != nullptr);
+ return new (arena) GVNOptimization(graph, *most_recent_side_effects);
+ } else if (opt_name == HConstantFolding::kConstantFoldingPassName) {
+ return new (arena) HConstantFolding(graph);
+ } else if (opt_name == HDeadCodeElimination::kDeadCodeEliminationPassName) {
+ return new (arena) HDeadCodeElimination(graph, stats);
+ } else if (opt_name == HInliner::kInlinerPassName) {
+ size_t number_of_dex_registers = dex_compilation_unit.GetCodeItem()->registers_size_;
+ return new (arena) HInliner(graph, // outer_graph
+ graph, // outermost_graph
+ codegen,
+ dex_compilation_unit, // outer_compilation_unit
+ dex_compilation_unit, // outermost_compilation_unit
+ driver,
+ handles,
+ stats,
+ number_of_dex_registers,
+ /* depth */ 0);
+ } else if (opt_name == HSharpening::kSharpeningPassName) {
+ return new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver);
+ } else if (opt_name == HSelectGenerator::kSelectGeneratorPassName) {
+ return new (arena) HSelectGenerator(graph, stats);
+ } else if (opt_name == HInductionVarAnalysis::kInductionPassName) {
+ return new (arena) HInductionVarAnalysis(graph);
+ } else if (opt_name == InstructionSimplifier::kInstructionSimplifierPassName) {
+ return new (arena) InstructionSimplifier(graph, stats);
+ } else if (opt_name == IntrinsicsRecognizer::kIntrinsicsRecognizerPassName) {
+ return new (arena) IntrinsicsRecognizer(graph, driver, stats);
+ } else if (opt_name == LICM::kLoopInvariantCodeMotionPassName) {
+ CHECK(most_recent_side_effects != nullptr);
+ return new (arena) LICM(graph, *most_recent_side_effects, stats);
+ } else if (opt_name == LoadStoreElimination::kLoadStoreEliminationPassName) {
+ CHECK(most_recent_side_effects != nullptr);
+ return new (arena) LoadStoreElimination(graph, *most_recent_side_effects);
+ } else if (opt_name == mips::DexCacheArrayFixups::kDexCacheArrayFixupsMipsPassName) {
+ return new (arena) mips::DexCacheArrayFixups(graph, codegen, stats);
+ } else if (opt_name == mips::PcRelativeFixups::kPcRelativeFixupsMipsPassName) {
+ return new (arena) mips::PcRelativeFixups(graph, codegen, stats);
+ } else if (opt_name == SideEffectsAnalysis::kSideEffectsAnalysisPassName) {
+ return new (arena) SideEffectsAnalysis(graph);
+ } else if (opt_name == x86::PcRelativeFixups::kPcRelativeFixupsX86PassName) {
+ return new (arena) x86::PcRelativeFixups(graph, codegen, stats);
+ } else if (opt_name == x86::X86MemoryOperandGeneration::kX86MemoryOperandGenerationPassName) {
+ return new (arena) x86::X86MemoryOperandGeneration(graph, codegen, stats);
+ }
+ return nullptr;
+}
+
+static ArenaVector<HOptimization*> BuildOptimizations(
+ const std::vector<std::string>& pass_names,
+ ArenaAllocator* arena,
+ HGraph* graph,
+ OptimizingCompilerStats* stats,
+ CodeGenerator* codegen,
+ CompilerDriver* driver,
+ const DexCompilationUnit& dex_compilation_unit,
+ StackHandleScopeCollection* handles) {
+ // Few HOptimizations constructors require SideEffectsAnalysis or HInductionVarAnalysis
+ // instances. This method assumes that each of them expects the nearest instance preceeding it
+ // in the pass name list.
+ SideEffectsAnalysis* most_recent_side_effects = nullptr;
+ HInductionVarAnalysis* most_recent_induction = nullptr;
+ ArenaVector<HOptimization*> ret(arena->Adapter());
+ for (std::string pass_name : pass_names) {
+ size_t pos = pass_name.find(kPassNameSeparator); // Strip suffix to get base pass name.
+ std::string opt_name = pos == std::string::npos ? pass_name : pass_name.substr(0, pos);
+
+ HOptimization* opt = BuildOptimization(
+ opt_name,
+ arena,
+ graph,
+ stats,
+ codegen,
+ driver,
+ dex_compilation_unit,
+ handles,
+ most_recent_side_effects,
+ most_recent_induction);
+ CHECK(opt != nullptr) << "Couldn't build optimization: \"" << pass_name << "\"";
+ ret.push_back(opt);
+
+ if (opt_name == SideEffectsAnalysis::kSideEffectsAnalysisPassName) {
+ most_recent_side_effects = down_cast<SideEffectsAnalysis*>(opt);
+ } else if (opt_name == HInductionVarAnalysis::kInductionPassName) {
+ most_recent_induction = down_cast<HInductionVarAnalysis*>(opt);
+ }
+ }
+ return ret;
+}
+
void OptimizingCompiler::RunOptimizations(HOptimization* optimizations[],
size_t length,
PassObserver* pass_observer) const {
@@ -444,11 +557,11 @@ void OptimizingCompiler::MaybeRunInliner(HGraph* graph,
}
size_t number_of_dex_registers = dex_compilation_unit.GetCodeItem()->registers_size_;
HInliner* inliner = new (graph->GetArena()) HInliner(
- graph,
- graph,
+ graph, // outer_graph
+ graph, // outermost_graph
codegen,
- dex_compilation_unit,
- dex_compilation_unit,
+ dex_compilation_unit, // outer_compilation_unit
+ dex_compilation_unit, // outermost_compilation_unit
driver,
handles,
stats,
@@ -473,7 +586,7 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
arm::InstructionSimplifierArm* simplifier =
new (arena) arm::InstructionSimplifierArm(graph, stats);
SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN_after_arch");
+ GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch");
HOptimization* arm_optimizations[] = {
simplifier,
side_effects,
@@ -489,7 +602,7 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
arm64::InstructionSimplifierArm64* simplifier =
new (arena) arm64::InstructionSimplifierArm64(graph, stats);
SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
- GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN_after_arch");
+ GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects, "GVN$after_arch");
HOptimization* arm64_optimizations[] = {
simplifier,
side_effects,
@@ -518,7 +631,7 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
x86::PcRelativeFixups* pc_relative_fixups =
new (arena) x86::PcRelativeFixups(graph, codegen, stats);
x86::X86MemoryOperandGeneration* memory_gen =
- new(arena) x86::X86MemoryOperandGeneration(graph, stats, codegen);
+ new (arena) x86::X86MemoryOperandGeneration(graph, codegen, stats);
HOptimization* x86_optimizations[] = {
pc_relative_fixups,
memory_gen
@@ -530,7 +643,7 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64: {
x86::X86MemoryOperandGeneration* memory_gen =
- new(arena) x86::X86MemoryOperandGeneration(graph, stats, codegen);
+ new (arena) x86::X86MemoryOperandGeneration(graph, codegen, stats);
HOptimization* x86_64_optimizations[] = {
memory_gen
};
@@ -546,7 +659,8 @@ void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
NO_INLINE // Avoid increasing caller's frame size by large stack-allocated objects.
static void AllocateRegisters(HGraph* graph,
CodeGenerator* codegen,
- PassObserver* pass_observer) {
+ PassObserver* pass_observer,
+ RegisterAllocator::Strategy strategy) {
{
PassScope scope(PrepareForRegisterAllocation::kPrepareForRegisterAllocationPassName,
pass_observer);
@@ -559,7 +673,7 @@ static void AllocateRegisters(HGraph* graph,
}
{
PassScope scope(RegisterAllocator::kRegisterAllocatorPassName, pass_observer);
- RegisterAllocator::Create(graph->GetArena(), codegen, liveness)->AllocateRegisters();
+ RegisterAllocator::Create(graph->GetArena(), codegen, liveness, strategy)->AllocateRegisters();
}
}
@@ -571,15 +685,30 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph,
StackHandleScopeCollection* handles) const {
OptimizingCompilerStats* stats = compilation_stats_.get();
ArenaAllocator* arena = graph->GetArena();
+ if (driver->GetCompilerOptions().GetPassesToRun() != nullptr) {
+ ArenaVector<HOptimization*> optimizations = BuildOptimizations(
+ *driver->GetCompilerOptions().GetPassesToRun(),
+ arena,
+ graph,
+ stats,
+ codegen,
+ driver,
+ dex_compilation_unit,
+ handles);
+ RunOptimizations(&optimizations[0], optimizations.size(), pass_observer);
+ return;
+ }
+
HDeadCodeElimination* dce1 = new (arena) HDeadCodeElimination(
- graph, stats, HDeadCodeElimination::kInitialDeadCodeEliminationPassName);
+ graph, stats, "dead_code_elimination$initial");
HDeadCodeElimination* dce2 = new (arena) HDeadCodeElimination(
- graph, stats, HDeadCodeElimination::kFinalDeadCodeEliminationPassName);
+ graph, stats, "dead_code_elimination$final");
HConstantFolding* fold1 = new (arena) HConstantFolding(graph);
InstructionSimplifier* simplify1 = new (arena) InstructionSimplifier(graph, stats);
HSelectGenerator* select_generator = new (arena) HSelectGenerator(graph, stats);
- HConstantFolding* fold2 = new (arena) HConstantFolding(graph, "constant_folding_after_inlining");
- HConstantFolding* fold3 = new (arena) HConstantFolding(graph, "constant_folding_after_bce");
+ HConstantFolding* fold2 = new (arena) HConstantFolding(
+ graph, "constant_folding$after_inlining");
+ HConstantFolding* fold3 = new (arena) HConstantFolding(graph, "constant_folding$after_bce");
SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects);
LICM* licm = new (arena) LICM(graph, *side_effects, stats);
@@ -588,9 +717,9 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph,
BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, *side_effects, induction);
HSharpening* sharpening = new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver);
InstructionSimplifier* simplify2 = new (arena) InstructionSimplifier(
- graph, stats, "instruction_simplifier_after_bce");
+ graph, stats, "instruction_simplifier$after_bce");
InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier(
- graph, stats, "instruction_simplifier_before_codegen");
+ graph, stats, "instruction_simplifier$before_codegen");
IntrinsicsRecognizer* intrinsics = new (arena) IntrinsicsRecognizer(graph, driver, stats);
HOptimization* optimizations1[] = {
@@ -626,7 +755,6 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph,
RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer);
RunArchOptimizations(driver->GetInstructionSet(), graph, codegen, pass_observer);
- AllocateRegisters(graph, codegen, pass_observer);
}
static ArenaVector<LinkerPatch> EmitAndSortLinkerPatches(CodeGenerator* codegen) {
@@ -841,6 +969,10 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
&pass_observer,
&handles);
+ RegisterAllocator::Strategy regalloc_strategy =
+ compiler_options.GetRegisterAllocationStrategy();
+ AllocateRegisters(graph, codegen.get(), &pass_observer, regalloc_strategy);
+
codegen->Compile(code_allocator);
pass_observer.DumpDisassembly();
}
diff --git a/compiler/optimizing/pc_relative_fixups_mips.h b/compiler/optimizing/pc_relative_fixups_mips.h
index 1e8b071bb3..5a7397bf9d 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.h
+++ b/compiler/optimizing/pc_relative_fixups_mips.h
@@ -32,6 +32,8 @@ class PcRelativeFixups : public HOptimization {
: HOptimization(graph, "pc_relative_fixups_mips", stats),
codegen_(codegen) {}
+ static constexpr const char* kPcRelativeFixupsMipsPassName = "pc_relative_fixups_mips";
+
void Run() OVERRIDE;
private:
diff --git a/compiler/optimizing/pc_relative_fixups_x86.h b/compiler/optimizing/pc_relative_fixups_x86.h
index 03de2fcece..72fa71ea94 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.h
+++ b/compiler/optimizing/pc_relative_fixups_x86.h
@@ -29,9 +29,11 @@ namespace x86 {
class PcRelativeFixups : public HOptimization {
public:
PcRelativeFixups(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
- : HOptimization(graph, "pc_relative_fixups_x86", stats),
+ : HOptimization(graph, kPcRelativeFixupsX86PassName, stats),
codegen_(codegen) {}
+ static constexpr const char* kPcRelativeFixupsX86PassName = "pc_relative_fixups_x86";
+
void Run() OVERRIDE;
private:
diff --git a/compiler/optimizing/x86_memory_gen.cc b/compiler/optimizing/x86_memory_gen.cc
index 195159f61b..8aa315a7e3 100644
--- a/compiler/optimizing/x86_memory_gen.cc
+++ b/compiler/optimizing/x86_memory_gen.cc
@@ -69,8 +69,8 @@ class MemoryOperandVisitor : public HGraphVisitor {
};
X86MemoryOperandGeneration::X86MemoryOperandGeneration(HGraph* graph,
- OptimizingCompilerStats* stats,
- CodeGenerator* codegen)
+ CodeGenerator* codegen,
+ OptimizingCompilerStats* stats)
: HOptimization(graph, kX86MemoryOperandGenerationPassName, stats),
do_implicit_null_checks_(codegen->GetCompilerOptions().GetImplicitNullChecks()) {
}
diff --git a/compiler/optimizing/x86_memory_gen.h b/compiler/optimizing/x86_memory_gen.h
index 7e886819bb..5f15d9f1e6 100644
--- a/compiler/optimizing/x86_memory_gen.h
+++ b/compiler/optimizing/x86_memory_gen.h
@@ -28,8 +28,8 @@ namespace x86 {
class X86MemoryOperandGeneration : public HOptimization {
public:
X86MemoryOperandGeneration(HGraph* graph,
- OptimizingCompilerStats* stats,
- CodeGenerator* codegen);
+ CodeGenerator* codegen,
+ OptimizingCompilerStats* stats);
void Run() OVERRIDE;
diff --git a/compiler/utils/jni_macro_assembler.cc b/compiler/utils/jni_macro_assembler.cc
index 6fdf09baa3..1acc90ca6f 100644
--- a/compiler/utils/jni_macro_assembler.cc
+++ b/compiler/utils/jni_macro_assembler.cc
@@ -36,7 +36,7 @@
#include "x86/jni_macro_assembler_x86.h"
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
-#include "x86_64/assembler_x86_64.h"
+#include "x86_64/jni_macro_assembler_x86_64.h"
#endif
#include "base/casts.h"
#include "globals.h"
@@ -98,7 +98,7 @@ MacroAsm64UniquePtr JNIMacroAssembler<PointerSize::k64>::Create(
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64:
- return MacroAsm64UniquePtr(new (arena) x86_64::X86_64Assembler(arena));
+ return MacroAsm64UniquePtr(new (arena) x86_64::X86_64JNIMacroAssembler(arena));
#endif
default:
LOG(FATAL) << "Unknown/unsupported 8B InstructionSet: " << instruction_set;
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index ce4ea1d8fd..ddc824425e 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -2639,543 +2639,6 @@ void X86_64Assembler::EmitOptionalByteRegNormalizingRex32(CpuRegister dst, const
}
}
-static dwarf::Reg DWARFReg(Register reg) {
- return dwarf::Reg::X86_64Core(static_cast<int>(reg));
-}
-static dwarf::Reg DWARFReg(FloatRegister reg) {
- return dwarf::Reg::X86_64Fp(static_cast<int>(reg));
-}
-
-constexpr size_t kFramePointerSize = 8;
-
-void X86_64Assembler::BuildFrame(size_t frame_size,
- ManagedRegister method_reg,
- ArrayRef<const ManagedRegister> spill_regs,
- const ManagedRegisterEntrySpills& entry_spills) {
- DCHECK_EQ(buffer_.Size(), 0U); // Nothing emitted yet.
- cfi_.SetCurrentCFAOffset(8); // Return address on stack.
- CHECK_ALIGNED(frame_size, kStackAlignment);
- int gpr_count = 0;
- for (int i = spill_regs.size() - 1; i >= 0; --i) {
- x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
- if (spill.IsCpuRegister()) {
- pushq(spill.AsCpuRegister());
- gpr_count++;
- cfi_.AdjustCFAOffset(kFramePointerSize);
- cfi_.RelOffset(DWARFReg(spill.AsCpuRegister().AsRegister()), 0);
- }
- }
- // return address then method on stack.
- int64_t rest_of_frame = static_cast<int64_t>(frame_size)
- - (gpr_count * kFramePointerSize)
- - kFramePointerSize /*return address*/;
- subq(CpuRegister(RSP), Immediate(rest_of_frame));
- cfi_.AdjustCFAOffset(rest_of_frame);
-
- // spill xmms
- int64_t offset = rest_of_frame;
- for (int i = spill_regs.size() - 1; i >= 0; --i) {
- x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
- if (spill.IsXmmRegister()) {
- offset -= sizeof(double);
- movsd(Address(CpuRegister(RSP), offset), spill.AsXmmRegister());
- cfi_.RelOffset(DWARFReg(spill.AsXmmRegister().AsFloatRegister()), offset);
- }
- }
-
- static_assert(static_cast<size_t>(kX86_64PointerSize) == kFramePointerSize,
- "Unexpected frame pointer size.");
-
- movq(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister());
-
- for (size_t i = 0; i < entry_spills.size(); ++i) {
- ManagedRegisterSpill spill = entry_spills.at(i);
- if (spill.AsX86_64().IsCpuRegister()) {
- if (spill.getSize() == 8) {
- movq(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
- spill.AsX86_64().AsCpuRegister());
- } else {
- CHECK_EQ(spill.getSize(), 4);
- movl(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()), spill.AsX86_64().AsCpuRegister());
- }
- } else {
- if (spill.getSize() == 8) {
- movsd(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()), spill.AsX86_64().AsXmmRegister());
- } else {
- CHECK_EQ(spill.getSize(), 4);
- movss(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()), spill.AsX86_64().AsXmmRegister());
- }
- }
- }
-}
-
-void X86_64Assembler::RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> spill_regs) {
- CHECK_ALIGNED(frame_size, kStackAlignment);
- cfi_.RememberState();
- int gpr_count = 0;
- // unspill xmms
- int64_t offset = static_cast<int64_t>(frame_size) - (spill_regs.size() * kFramePointerSize) - 2 * kFramePointerSize;
- for (size_t i = 0; i < spill_regs.size(); ++i) {
- x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
- if (spill.IsXmmRegister()) {
- offset += sizeof(double);
- movsd(spill.AsXmmRegister(), Address(CpuRegister(RSP), offset));
- cfi_.Restore(DWARFReg(spill.AsXmmRegister().AsFloatRegister()));
- } else {
- gpr_count++;
- }
- }
- int adjust = static_cast<int>(frame_size) - (gpr_count * kFramePointerSize) - kFramePointerSize;
- addq(CpuRegister(RSP), Immediate(adjust));
- cfi_.AdjustCFAOffset(-adjust);
- for (size_t i = 0; i < spill_regs.size(); ++i) {
- x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
- if (spill.IsCpuRegister()) {
- popq(spill.AsCpuRegister());
- cfi_.AdjustCFAOffset(-static_cast<int>(kFramePointerSize));
- cfi_.Restore(DWARFReg(spill.AsCpuRegister().AsRegister()));
- }
- }
- ret();
- // The CFI should be restored for any code that follows the exit block.
- cfi_.RestoreState();
- cfi_.DefCFAOffset(frame_size);
-}
-
-void X86_64Assembler::IncreaseFrameSize(size_t adjust) {
- CHECK_ALIGNED(adjust, kStackAlignment);
- addq(CpuRegister(RSP), Immediate(-static_cast<int64_t>(adjust)));
- cfi_.AdjustCFAOffset(adjust);
-}
-
-void X86_64Assembler::DecreaseFrameSize(size_t adjust) {
- CHECK_ALIGNED(adjust, kStackAlignment);
- addq(CpuRegister(RSP), Immediate(adjust));
- cfi_.AdjustCFAOffset(-adjust);
-}
-
-void X86_64Assembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
- X86_64ManagedRegister src = msrc.AsX86_64();
- if (src.IsNoRegister()) {
- CHECK_EQ(0u, size);
- } else if (src.IsCpuRegister()) {
- if (size == 4) {
- CHECK_EQ(4u, size);
- movl(Address(CpuRegister(RSP), offs), src.AsCpuRegister());
- } else {
- CHECK_EQ(8u, size);
- movq(Address(CpuRegister(RSP), offs), src.AsCpuRegister());
- }
- } else if (src.IsRegisterPair()) {
- CHECK_EQ(0u, size);
- movq(Address(CpuRegister(RSP), offs), src.AsRegisterPairLow());
- movq(Address(CpuRegister(RSP), FrameOffset(offs.Int32Value()+4)),
- src.AsRegisterPairHigh());
- } else if (src.IsX87Register()) {
- if (size == 4) {
- fstps(Address(CpuRegister(RSP), offs));
- } else {
- fstpl(Address(CpuRegister(RSP), offs));
- }
- } else {
- CHECK(src.IsXmmRegister());
- if (size == 4) {
- movss(Address(CpuRegister(RSP), offs), src.AsXmmRegister());
- } else {
- movsd(Address(CpuRegister(RSP), offs), src.AsXmmRegister());
- }
- }
-}
-
-void X86_64Assembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
- X86_64ManagedRegister src = msrc.AsX86_64();
- CHECK(src.IsCpuRegister());
- movl(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
-}
-
-void X86_64Assembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
- X86_64ManagedRegister src = msrc.AsX86_64();
- CHECK(src.IsCpuRegister());
- movq(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
-}
-
-void X86_64Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
- ManagedRegister) {
- movl(Address(CpuRegister(RSP), dest), Immediate(imm)); // TODO(64) movq?
-}
-
-void X86_64Assembler::StoreStackOffsetToThread(ThreadOffset64 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
- X86_64ManagedRegister scratch = mscratch.AsX86_64();
- CHECK(scratch.IsCpuRegister());
- leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), fr_offs));
- gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
-}
-
-void X86_64Assembler::StoreStackPointerToThread(ThreadOffset64 thr_offs) {
- gs()->movq(Address::Absolute(thr_offs, true), CpuRegister(RSP));
-}
-
-void X86_64Assembler::StoreSpanning(FrameOffset /*dst*/, ManagedRegister /*src*/,
- FrameOffset /*in_off*/, ManagedRegister /*scratch*/) {
- UNIMPLEMENTED(FATAL); // this case only currently exists for ARM
-}
-
-void X86_64Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
- X86_64ManagedRegister dest = mdest.AsX86_64();
- if (dest.IsNoRegister()) {
- CHECK_EQ(0u, size);
- } else if (dest.IsCpuRegister()) {
- if (size == 4) {
- CHECK_EQ(4u, size);
- movl(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
- } else {
- CHECK_EQ(8u, size);
- movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
- }
- } else if (dest.IsRegisterPair()) {
- CHECK_EQ(0u, size);
- movq(dest.AsRegisterPairLow(), Address(CpuRegister(RSP), src));
- movq(dest.AsRegisterPairHigh(), Address(CpuRegister(RSP), FrameOffset(src.Int32Value()+4)));
- } else if (dest.IsX87Register()) {
- if (size == 4) {
- flds(Address(CpuRegister(RSP), src));
- } else {
- fldl(Address(CpuRegister(RSP), src));
- }
- } else {
- CHECK(dest.IsXmmRegister());
- if (size == 4) {
- movss(dest.AsXmmRegister(), Address(CpuRegister(RSP), src));
- } else {
- movsd(dest.AsXmmRegister(), Address(CpuRegister(RSP), src));
- }
- }
-}
-
-void X86_64Assembler::LoadFromThread(ManagedRegister mdest, ThreadOffset64 src, size_t size) {
- X86_64ManagedRegister dest = mdest.AsX86_64();
- if (dest.IsNoRegister()) {
- CHECK_EQ(0u, size);
- } else if (dest.IsCpuRegister()) {
- CHECK_EQ(4u, size);
- gs()->movl(dest.AsCpuRegister(), Address::Absolute(src, true));
- } else if (dest.IsRegisterPair()) {
- CHECK_EQ(8u, size);
- gs()->movq(dest.AsRegisterPairLow(), Address::Absolute(src, true));
- } else if (dest.IsX87Register()) {
- if (size == 4) {
- gs()->flds(Address::Absolute(src, true));
- } else {
- gs()->fldl(Address::Absolute(src, true));
- }
- } else {
- CHECK(dest.IsXmmRegister());
- if (size == 4) {
- gs()->movss(dest.AsXmmRegister(), Address::Absolute(src, true));
- } else {
- gs()->movsd(dest.AsXmmRegister(), Address::Absolute(src, true));
- }
- }
-}
-
-void X86_64Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
- X86_64ManagedRegister dest = mdest.AsX86_64();
- CHECK(dest.IsCpuRegister());
- movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
-}
-
-void X86_64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) {
- X86_64ManagedRegister dest = mdest.AsX86_64();
- CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
- movl(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs));
- if (unpoison_reference) {
- MaybeUnpoisonHeapReference(dest.AsCpuRegister());
- }
-}
-
-void X86_64Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
- Offset offs) {
- X86_64ManagedRegister dest = mdest.AsX86_64();
- CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
- movq(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs));
-}
-
-void X86_64Assembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) {
- X86_64ManagedRegister dest = mdest.AsX86_64();
- CHECK(dest.IsCpuRegister());
- gs()->movq(dest.AsCpuRegister(), Address::Absolute(offs, true));
-}
-
-void X86_64Assembler::SignExtend(ManagedRegister mreg, size_t size) {
- X86_64ManagedRegister reg = mreg.AsX86_64();
- CHECK(size == 1 || size == 2) << size;
- CHECK(reg.IsCpuRegister()) << reg;
- if (size == 1) {
- movsxb(reg.AsCpuRegister(), reg.AsCpuRegister());
- } else {
- movsxw(reg.AsCpuRegister(), reg.AsCpuRegister());
- }
-}
-
-void X86_64Assembler::ZeroExtend(ManagedRegister mreg, size_t size) {
- X86_64ManagedRegister reg = mreg.AsX86_64();
- CHECK(size == 1 || size == 2) << size;
- CHECK(reg.IsCpuRegister()) << reg;
- if (size == 1) {
- movzxb(reg.AsCpuRegister(), reg.AsCpuRegister());
- } else {
- movzxw(reg.AsCpuRegister(), reg.AsCpuRegister());
- }
-}
-
-void X86_64Assembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
- X86_64ManagedRegister dest = mdest.AsX86_64();
- X86_64ManagedRegister src = msrc.AsX86_64();
- if (!dest.Equals(src)) {
- if (dest.IsCpuRegister() && src.IsCpuRegister()) {
- movq(dest.AsCpuRegister(), src.AsCpuRegister());
- } else if (src.IsX87Register() && dest.IsXmmRegister()) {
- // Pass via stack and pop X87 register
- subl(CpuRegister(RSP), Immediate(16));
- if (size == 4) {
- CHECK_EQ(src.AsX87Register(), ST0);
- fstps(Address(CpuRegister(RSP), 0));
- movss(dest.AsXmmRegister(), Address(CpuRegister(RSP), 0));
- } else {
- CHECK_EQ(src.AsX87Register(), ST0);
- fstpl(Address(CpuRegister(RSP), 0));
- movsd(dest.AsXmmRegister(), Address(CpuRegister(RSP), 0));
- }
- addq(CpuRegister(RSP), Immediate(16));
- } else {
- // TODO: x87, SSE
- UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
- }
- }
-}
-
-void X86_64Assembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) {
- X86_64ManagedRegister scratch = mscratch.AsX86_64();
- CHECK(scratch.IsCpuRegister());
- movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), src));
- movl(Address(CpuRegister(RSP), dest), scratch.AsCpuRegister());
-}
-
-void X86_64Assembler::CopyRawPtrFromThread(FrameOffset fr_offs,
- ThreadOffset64 thr_offs,
- ManagedRegister mscratch) {
- X86_64ManagedRegister scratch = mscratch.AsX86_64();
- CHECK(scratch.IsCpuRegister());
- gs()->movq(scratch.AsCpuRegister(), Address::Absolute(thr_offs, true));
- Store(fr_offs, scratch, 8);
-}
-
-void X86_64Assembler::CopyRawPtrToThread(ThreadOffset64 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
- X86_64ManagedRegister scratch = mscratch.AsX86_64();
- CHECK(scratch.IsCpuRegister());
- Load(scratch, fr_offs, 8);
- gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
-}
-
-void X86_64Assembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch,
- size_t size) {
- X86_64ManagedRegister scratch = mscratch.AsX86_64();
- if (scratch.IsCpuRegister() && size == 8) {
- Load(scratch, src, 4);
- Store(dest, scratch, 4);
- Load(scratch, FrameOffset(src.Int32Value() + 4), 4);
- Store(FrameOffset(dest.Int32Value() + 4), scratch, 4);
- } else {
- Load(scratch, src, size);
- Store(dest, scratch, size);
- }
-}
-
-void X86_64Assembler::Copy(FrameOffset /*dst*/, ManagedRegister /*src_base*/, Offset /*src_offset*/,
- ManagedRegister /*scratch*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL);
-}
-
-void X86_64Assembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
- ManagedRegister scratch, size_t size) {
- CHECK(scratch.IsNoRegister());
- CHECK_EQ(size, 4u);
- pushq(Address(CpuRegister(RSP), src));
- popq(Address(dest_base.AsX86_64().AsCpuRegister(), dest_offset));
-}
-
-void X86_64Assembler::Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
- ManagedRegister mscratch, size_t size) {
- CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
- CHECK_EQ(size, 4u);
- movq(scratch, Address(CpuRegister(RSP), src_base));
- movq(scratch, Address(scratch, src_offset));
- movq(Address(CpuRegister(RSP), dest), scratch);
-}
-
-void X86_64Assembler::Copy(ManagedRegister dest, Offset dest_offset,
- ManagedRegister src, Offset src_offset,
- ManagedRegister scratch, size_t size) {
- CHECK_EQ(size, 4u);
- CHECK(scratch.IsNoRegister());
- pushq(Address(src.AsX86_64().AsCpuRegister(), src_offset));
- popq(Address(dest.AsX86_64().AsCpuRegister(), dest_offset));
-}
-
-void X86_64Assembler::Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
- ManagedRegister mscratch, size_t size) {
- CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
- CHECK_EQ(size, 4u);
- CHECK_EQ(dest.Int32Value(), src.Int32Value());
- movq(scratch, Address(CpuRegister(RSP), src));
- pushq(Address(scratch, src_offset));
- popq(Address(scratch, dest_offset));
-}
-
-void X86_64Assembler::MemoryBarrier(ManagedRegister) {
- mfence();
-}
-
-void X86_64Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
- FrameOffset handle_scope_offset,
- ManagedRegister min_reg, bool null_allowed) {
- X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
- X86_64ManagedRegister in_reg = min_reg.AsX86_64();
- if (in_reg.IsNoRegister()) { // TODO(64): && null_allowed
- // Use out_reg as indicator of null.
- in_reg = out_reg;
- // TODO: movzwl
- movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
- }
- CHECK(in_reg.IsCpuRegister());
- CHECK(out_reg.IsCpuRegister());
- VerifyObject(in_reg, null_allowed);
- if (null_allowed) {
- Label null_arg;
- if (!out_reg.Equals(in_reg)) {
- xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
- }
- testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
- j(kZero, &null_arg);
- leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
- Bind(&null_arg);
- } else {
- leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
- }
-}
-
-void X86_64Assembler::CreateHandleScopeEntry(FrameOffset out_off,
- FrameOffset handle_scope_offset,
- ManagedRegister mscratch,
- bool null_allowed) {
- X86_64ManagedRegister scratch = mscratch.AsX86_64();
- CHECK(scratch.IsCpuRegister());
- if (null_allowed) {
- Label null_arg;
- movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
- testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
- j(kZero, &null_arg);
- leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
- Bind(&null_arg);
- } else {
- leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
- }
- Store(out_off, scratch, 8);
-}
-
-// Given a handle scope entry, load the associated reference.
-void X86_64Assembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
- ManagedRegister min_reg) {
- X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
- X86_64ManagedRegister in_reg = min_reg.AsX86_64();
- CHECK(out_reg.IsCpuRegister());
- CHECK(in_reg.IsCpuRegister());
- Label null_arg;
- if (!out_reg.Equals(in_reg)) {
- xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
- }
- testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
- j(kZero, &null_arg);
- movq(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
- Bind(&null_arg);
-}
-
-void X86_64Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references
-}
-
-void X86_64Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references
-}
-
-void X86_64Assembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) {
- X86_64ManagedRegister base = mbase.AsX86_64();
- CHECK(base.IsCpuRegister());
- call(Address(base.AsCpuRegister(), offset.Int32Value()));
- // TODO: place reference map on call
-}
-
-void X86_64Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
- CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
- movq(scratch, Address(CpuRegister(RSP), base));
- call(Address(scratch, offset));
-}
-
-void X86_64Assembler::CallFromThread(ThreadOffset64 offset, ManagedRegister /*mscratch*/) {
- gs()->call(Address::Absolute(offset, true));
-}
-
-void X86_64Assembler::GetCurrentThread(ManagedRegister tr) {
- gs()->movq(tr.AsX86_64().AsCpuRegister(),
- Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
-}
-
-void X86_64Assembler::GetCurrentThread(FrameOffset offset, ManagedRegister mscratch) {
- X86_64ManagedRegister scratch = mscratch.AsX86_64();
- gs()->movq(scratch.AsCpuRegister(),
- Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
- movq(Address(CpuRegister(RSP), offset), scratch.AsCpuRegister());
-}
-
-// Slowpath entered when Thread::Current()->_exception is non-null
-class X86_64ExceptionSlowPath FINAL : public SlowPath {
- public:
- explicit X86_64ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
- virtual void Emit(Assembler *sp_asm) OVERRIDE;
- private:
- const size_t stack_adjust_;
-};
-
-void X86_64Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
- X86_64ExceptionSlowPath* slow = new (GetArena()) X86_64ExceptionSlowPath(stack_adjust);
- buffer_.EnqueueSlowPath(slow);
- gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true), Immediate(0));
- j(kNotEqual, slow->Entry());
-}
-
-void X86_64ExceptionSlowPath::Emit(Assembler *sasm) {
- X86_64Assembler* sp_asm = down_cast<X86_64Assembler*>(sasm);
-#define __ sp_asm->
- __ Bind(&entry_);
- // Note: the return value is dead
- if (stack_adjust_ != 0) { // Fix up the frame.
- __ DecreaseFrameSize(stack_adjust_);
- }
- // Pass exception as argument in RDI
- __ gs()->movq(CpuRegister(RDI),
- Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true));
- __ gs()->call(
- Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize, pDeliverException), true));
- // this call should never return
- __ int3();
-#undef __
-}
-
void X86_64Assembler::AddConstantArea() {
ArrayRef<const int32_t> area = constant_area_.GetBuffer();
for (size_t i = 0, e = area.size(); i < e; i++) {
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index d298da2e6c..370f49cb05 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -333,20 +333,11 @@ class NearLabel : private Label {
};
-class X86_64Assembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k64> {
+class X86_64Assembler FINAL : public Assembler {
public:
explicit X86_64Assembler(ArenaAllocator* arena) : Assembler(arena), constant_area_(arena) {}
virtual ~X86_64Assembler() {}
- size_t CodeSize() const OVERRIDE { return Assembler::CodeSize(); }
- DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); }
- void FinalizeCode() OVERRIDE {
- Assembler::FinalizeCode();
- }
- void FinalizeInstructions(const MemoryRegion& region) {
- Assembler::FinalizeInstructions(region);
- }
-
/*
* Emit Machine Instructions.
*/
@@ -709,124 +700,6 @@ class X86_64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer
}
void Bind(NearLabel* label);
- //
- // Overridden common assembler high-level functionality
- //
-
- // Emit code that will create an activation on the stack
- void BuildFrame(size_t frame_size,
- ManagedRegister method_reg,
- ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
-
- // Emit code that will remove an activation from the stack
- void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
- OVERRIDE;
-
- void IncreaseFrameSize(size_t adjust) OVERRIDE;
- void DecreaseFrameSize(size_t adjust) OVERRIDE;
-
- // Store routines
- void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
- void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
- void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
-
- void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
-
- void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister scratch) OVERRIDE;
-
- void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
-
- void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
- ManagedRegister scratch) OVERRIDE;
-
- // Load routines
- void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
-
- void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
-
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
-
- void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) OVERRIDE;
-
- void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
-
- void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
-
- // Copying routines
- void Move(ManagedRegister dest, ManagedRegister src, size_t size);
-
- void CopyRawPtrFromThread(FrameOffset fr_offs,
- ThreadOffset64 thr_offs,
- ManagedRegister scratch) OVERRIDE;
-
- void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
- OVERRIDE;
-
- void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
-
- void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
-
- void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
-
- void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
- size_t size) OVERRIDE;
-
- void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
-
- void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
-
- void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
-
- void MemoryBarrier(ManagedRegister) OVERRIDE;
-
- // Sign extension
- void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
- // Zero extension
- void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
- // Exploit fast access in managed code to Thread::Current()
- void GetCurrentThread(ManagedRegister tr) OVERRIDE;
- void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
-
- // Set up out_reg to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the handle scope entry to see if the value is
- // null.
- void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
- ManagedRegister in_reg, bool null_allowed) OVERRIDE;
-
- // Set up out_off to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
- ManagedRegister scratch, bool null_allowed) OVERRIDE;
-
- // src holds a handle scope entry (Object**) load this into dst
- virtual void LoadReferenceFromHandleScope(ManagedRegister dst,
- ManagedRegister src);
-
- // Heap::VerifyObject on src. In some cases (such as a reference to this) we
- // know that src may not be null.
- void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
- void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
-
- // Call to address held at [base+offset]
- void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
-
- // Generate code to check if Thread::Current()->exception_ is non-null
- // and branch to a ExceptionSlowPath if it is.
- void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
-
// Add a double to the constant area, returning the offset into
// the constant area where the literal resides.
size_t AddDouble(double v) { return constant_area_.AddDouble(v); }
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 788c7253cf..36c966b3cf 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -22,7 +22,9 @@
#include "base/bit_utils.h"
#include "base/stl_util.h"
+#include "jni_macro_assembler_x86_64.h"
#include "utils/assembler_test.h"
+#include "utils/jni_macro_assembler_test.h"
namespace art {
@@ -1485,6 +1487,62 @@ TEST_F(AssemblerX86_64Test, SetCC) {
DriverFn(&setcc_test_fn, "setcc");
}
+TEST_F(AssemblerX86_64Test, MovzxbRegs) {
+ DriverStr(Repeatrb(&x86_64::X86_64Assembler::movzxb, "movzbl %{reg2}, %{reg1}"), "movzxb");
+}
+
+TEST_F(AssemblerX86_64Test, MovsxbRegs) {
+ DriverStr(Repeatrb(&x86_64::X86_64Assembler::movsxb, "movsbl %{reg2}, %{reg1}"), "movsxb");
+}
+
+TEST_F(AssemblerX86_64Test, Repnescasw) {
+ GetAssembler()->repne_scasw();
+ const char* expected = "repne scasw\n";
+ DriverStr(expected, "Repnescasw");
+}
+
+TEST_F(AssemblerX86_64Test, Repecmpsw) {
+ GetAssembler()->repe_cmpsw();
+ const char* expected = "repe cmpsw\n";
+ DriverStr(expected, "Repecmpsw");
+}
+
+TEST_F(AssemblerX86_64Test, Repecmpsl) {
+ GetAssembler()->repe_cmpsl();
+ const char* expected = "repe cmpsl\n";
+ DriverStr(expected, "Repecmpsl");
+}
+
+TEST_F(AssemblerX86_64Test, Repecmpsq) {
+ GetAssembler()->repe_cmpsq();
+ const char* expected = "repe cmpsq\n";
+ DriverStr(expected, "Repecmpsq");
+}
+
+TEST_F(AssemblerX86_64Test, Cmpb) {
+ GetAssembler()->cmpb(x86_64::Address(x86_64::CpuRegister(x86_64::RDI), 128),
+ x86_64::Immediate(0));
+ const char* expected = "cmpb $0, 128(%RDI)\n";
+ DriverStr(expected, "cmpb");
+}
+
+class JNIMacroAssemblerX86_64Test : public JNIMacroAssemblerTest<x86_64::X86_64JNIMacroAssembler> {
+ public:
+ using Base = JNIMacroAssemblerTest<x86_64::X86_64JNIMacroAssembler>;
+
+ protected:
+ // Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
+ std::string GetArchitectureString() OVERRIDE {
+ return "x86_64";
+ }
+
+ std::string GetDisassembleParameters() OVERRIDE {
+ return " -D -bbinary -mi386:x86-64 -Mx86-64,addr64,data32 --no-show-raw-insn";
+ }
+
+ private:
+};
+
static x86_64::X86_64ManagedRegister ManagedFromCpu(x86_64::Register r) {
return x86_64::X86_64ManagedRegister::FromCpuRegister(r);
}
@@ -1493,8 +1551,8 @@ static x86_64::X86_64ManagedRegister ManagedFromFpu(x86_64::FloatRegister r) {
return x86_64::X86_64ManagedRegister::FromXmmRegister(r);
}
-std::string buildframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
- x86_64::X86_64Assembler* assembler) {
+std::string buildframe_test_fn(JNIMacroAssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+ x86_64::X86_64JNIMacroAssembler* assembler) {
// TODO: more interesting spill registers / entry spills.
// Two random spill regs.
@@ -1536,12 +1594,12 @@ std::string buildframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBU
return str.str();
}
-TEST_F(AssemblerX86_64Test, BuildFrame) {
+TEST_F(JNIMacroAssemblerX86_64Test, BuildFrame) {
DriverFn(&buildframe_test_fn, "BuildFrame");
}
-std::string removeframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
- x86_64::X86_64Assembler* assembler) {
+std::string removeframe_test_fn(JNIMacroAssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+ x86_64::X86_64JNIMacroAssembler* assembler) {
// TODO: more interesting spill registers / entry spills.
// Two random spill regs.
@@ -1567,12 +1625,13 @@ std::string removeframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIB
return str.str();
}
-TEST_F(AssemblerX86_64Test, RemoveFrame) {
+TEST_F(JNIMacroAssemblerX86_64Test, RemoveFrame) {
DriverFn(&removeframe_test_fn, "RemoveFrame");
}
-std::string increaseframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
- x86_64::X86_64Assembler* assembler) {
+std::string increaseframe_test_fn(
+ JNIMacroAssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+ x86_64::X86_64JNIMacroAssembler* assembler) {
assembler->IncreaseFrameSize(0U);
assembler->IncreaseFrameSize(kStackAlignment);
assembler->IncreaseFrameSize(10 * kStackAlignment);
@@ -1586,12 +1645,13 @@ std::string increaseframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTR
return str.str();
}
-TEST_F(AssemblerX86_64Test, IncreaseFrame) {
+TEST_F(JNIMacroAssemblerX86_64Test, IncreaseFrame) {
DriverFn(&increaseframe_test_fn, "IncreaseFrame");
}
-std::string decreaseframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
- x86_64::X86_64Assembler* assembler) {
+std::string decreaseframe_test_fn(
+ JNIMacroAssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+ x86_64::X86_64JNIMacroAssembler* assembler) {
assembler->DecreaseFrameSize(0U);
assembler->DecreaseFrameSize(kStackAlignment);
assembler->DecreaseFrameSize(10 * kStackAlignment);
@@ -1605,47 +1665,8 @@ std::string decreaseframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTR
return str.str();
}
-TEST_F(AssemblerX86_64Test, DecreaseFrame) {
+TEST_F(JNIMacroAssemblerX86_64Test, DecreaseFrame) {
DriverFn(&decreaseframe_test_fn, "DecreaseFrame");
}
-TEST_F(AssemblerX86_64Test, MovzxbRegs) {
- DriverStr(Repeatrb(&x86_64::X86_64Assembler::movzxb, "movzbl %{reg2}, %{reg1}"), "movzxb");
-}
-
-TEST_F(AssemblerX86_64Test, MovsxbRegs) {
- DriverStr(Repeatrb(&x86_64::X86_64Assembler::movsxb, "movsbl %{reg2}, %{reg1}"), "movsxb");
-}
-
-TEST_F(AssemblerX86_64Test, Repnescasw) {
- GetAssembler()->repne_scasw();
- const char* expected = "repne scasw\n";
- DriverStr(expected, "Repnescasw");
-}
-
-TEST_F(AssemblerX86_64Test, Repecmpsw) {
- GetAssembler()->repe_cmpsw();
- const char* expected = "repe cmpsw\n";
- DriverStr(expected, "Repecmpsw");
-}
-
-TEST_F(AssemblerX86_64Test, Repecmpsl) {
- GetAssembler()->repe_cmpsl();
- const char* expected = "repe cmpsl\n";
- DriverStr(expected, "Repecmpsl");
-}
-
-TEST_F(AssemblerX86_64Test, Repecmpsq) {
- GetAssembler()->repe_cmpsq();
- const char* expected = "repe cmpsq\n";
- DriverStr(expected, "Repecmpsq");
-}
-
-TEST_F(AssemblerX86_64Test, Cmpb) {
- GetAssembler()->cmpb(x86_64::Address(x86_64::CpuRegister(x86_64::RDI), 128),
- x86_64::Immediate(0));
- const char* expected = "cmpb $0, 128(%RDI)\n";
- DriverStr(expected, "cmpb");
-}
-
} // namespace art
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
new file mode 100644
index 0000000000..47fb59b1d8
--- /dev/null
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
@@ -0,0 +1,603 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_macro_assembler_x86_64.h"
+
+#include "base/casts.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "memory_region.h"
+#include "thread.h"
+
+namespace art {
+namespace x86_64 {
+
+static dwarf::Reg DWARFReg(Register reg) {
+ return dwarf::Reg::X86_64Core(static_cast<int>(reg));
+}
+static dwarf::Reg DWARFReg(FloatRegister reg) {
+ return dwarf::Reg::X86_64Fp(static_cast<int>(reg));
+}
+
+constexpr size_t kFramePointerSize = 8;
+
+#define __ asm_.
+
+void X86_64JNIMacroAssembler::BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> spill_regs,
+ const ManagedRegisterEntrySpills& entry_spills) {
+ DCHECK_EQ(CodeSize(), 0U); // Nothing emitted yet.
+ cfi().SetCurrentCFAOffset(8); // Return address on stack.
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ int gpr_count = 0;
+ for (int i = spill_regs.size() - 1; i >= 0; --i) {
+ x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
+ if (spill.IsCpuRegister()) {
+ __ pushq(spill.AsCpuRegister());
+ gpr_count++;
+ cfi().AdjustCFAOffset(kFramePointerSize);
+ cfi().RelOffset(DWARFReg(spill.AsCpuRegister().AsRegister()), 0);
+ }
+ }
+ // return address then method on stack.
+ int64_t rest_of_frame = static_cast<int64_t>(frame_size)
+ - (gpr_count * kFramePointerSize)
+ - kFramePointerSize /*return address*/;
+ __ subq(CpuRegister(RSP), Immediate(rest_of_frame));
+ cfi().AdjustCFAOffset(rest_of_frame);
+
+ // spill xmms
+ int64_t offset = rest_of_frame;
+ for (int i = spill_regs.size() - 1; i >= 0; --i) {
+ x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
+ if (spill.IsXmmRegister()) {
+ offset -= sizeof(double);
+ __ movsd(Address(CpuRegister(RSP), offset), spill.AsXmmRegister());
+ cfi().RelOffset(DWARFReg(spill.AsXmmRegister().AsFloatRegister()), offset);
+ }
+ }
+
+ static_assert(static_cast<size_t>(kX86_64PointerSize) == kFramePointerSize,
+ "Unexpected frame pointer size.");
+
+ __ movq(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister());
+
+ for (size_t i = 0; i < entry_spills.size(); ++i) {
+ ManagedRegisterSpill spill = entry_spills.at(i);
+ if (spill.AsX86_64().IsCpuRegister()) {
+ if (spill.getSize() == 8) {
+ __ movq(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
+ spill.AsX86_64().AsCpuRegister());
+ } else {
+ CHECK_EQ(spill.getSize(), 4);
+ __ movl(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
+ spill.AsX86_64().AsCpuRegister());
+ }
+ } else {
+ if (spill.getSize() == 8) {
+ __ movsd(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
+ spill.AsX86_64().AsXmmRegister());
+ } else {
+ CHECK_EQ(spill.getSize(), 4);
+ __ movss(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
+ spill.AsX86_64().AsXmmRegister());
+ }
+ }
+ }
+}
+
+void X86_64JNIMacroAssembler::RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> spill_regs) {
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ cfi().RememberState();
+ int gpr_count = 0;
+ // unspill xmms
+ int64_t offset = static_cast<int64_t>(frame_size)
+ - (spill_regs.size() * kFramePointerSize)
+ - 2 * kFramePointerSize;
+ for (size_t i = 0; i < spill_regs.size(); ++i) {
+ x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
+ if (spill.IsXmmRegister()) {
+ offset += sizeof(double);
+ __ movsd(spill.AsXmmRegister(), Address(CpuRegister(RSP), offset));
+ cfi().Restore(DWARFReg(spill.AsXmmRegister().AsFloatRegister()));
+ } else {
+ gpr_count++;
+ }
+ }
+ int adjust = static_cast<int>(frame_size) - (gpr_count * kFramePointerSize) - kFramePointerSize;
+ __ addq(CpuRegister(RSP), Immediate(adjust));
+ cfi().AdjustCFAOffset(-adjust);
+ for (size_t i = 0; i < spill_regs.size(); ++i) {
+ x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
+ if (spill.IsCpuRegister()) {
+ __ popq(spill.AsCpuRegister());
+ cfi().AdjustCFAOffset(-static_cast<int>(kFramePointerSize));
+ cfi().Restore(DWARFReg(spill.AsCpuRegister().AsRegister()));
+ }
+ }
+ __ ret();
+ // The CFI should be restored for any code that follows the exit block.
+ cfi().RestoreState();
+ cfi().DefCFAOffset(frame_size);
+}
+
+void X86_64JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ __ addq(CpuRegister(RSP), Immediate(-static_cast<int64_t>(adjust)));
+ cfi().AdjustCFAOffset(adjust);
+}
+
+static void DecreaseFrameSizeImpl(size_t adjust, X86_64Assembler* assembler) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ assembler->addq(CpuRegister(RSP), Immediate(adjust));
+ assembler->cfi().AdjustCFAOffset(-adjust);
+}
+
+void X86_64JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
+ DecreaseFrameSizeImpl(adjust, &asm_);
+}
+
+void X86_64JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
+ X86_64ManagedRegister src = msrc.AsX86_64();
+ if (src.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (src.IsCpuRegister()) {
+ if (size == 4) {
+ CHECK_EQ(4u, size);
+ __ movl(Address(CpuRegister(RSP), offs), src.AsCpuRegister());
+ } else {
+ CHECK_EQ(8u, size);
+ __ movq(Address(CpuRegister(RSP), offs), src.AsCpuRegister());
+ }
+ } else if (src.IsRegisterPair()) {
+ CHECK_EQ(0u, size);
+ __ movq(Address(CpuRegister(RSP), offs), src.AsRegisterPairLow());
+ __ movq(Address(CpuRegister(RSP), FrameOffset(offs.Int32Value()+4)),
+ src.AsRegisterPairHigh());
+ } else if (src.IsX87Register()) {
+ if (size == 4) {
+ __ fstps(Address(CpuRegister(RSP), offs));
+ } else {
+ __ fstpl(Address(CpuRegister(RSP), offs));
+ }
+ } else {
+ CHECK(src.IsXmmRegister());
+ if (size == 4) {
+ __ movss(Address(CpuRegister(RSP), offs), src.AsXmmRegister());
+ } else {
+ __ movsd(Address(CpuRegister(RSP), offs), src.AsXmmRegister());
+ }
+ }
+}
+
+void X86_64JNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
+ X86_64ManagedRegister src = msrc.AsX86_64();
+ CHECK(src.IsCpuRegister());
+ __ movl(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
+}
+
+void X86_64JNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
+ X86_64ManagedRegister src = msrc.AsX86_64();
+ CHECK(src.IsCpuRegister());
+ __ movq(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
+}
+
+void X86_64JNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest,
+ uint32_t imm,
+ ManagedRegister) {
+ __ movl(Address(CpuRegister(RSP), dest), Immediate(imm)); // TODO(64) movq?
+}
+
+void X86_64JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ CHECK(scratch.IsCpuRegister());
+ __ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), fr_offs));
+ __ gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
+}
+
+void X86_64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 thr_offs) {
+ __ gs()->movq(Address::Absolute(thr_offs, true), CpuRegister(RSP));
+}
+
+void X86_64JNIMacroAssembler::StoreSpanning(FrameOffset /*dst*/,
+ ManagedRegister /*src*/,
+ FrameOffset /*in_off*/,
+ ManagedRegister /*scratch*/) {
+ UNIMPLEMENTED(FATAL); // this case only currently exists for ARM
+}
+
+void X86_64JNIMacroAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ if (dest.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (dest.IsCpuRegister()) {
+ if (size == 4) {
+ CHECK_EQ(4u, size);
+ __ movl(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
+ } else {
+ CHECK_EQ(8u, size);
+ __ movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
+ }
+ } else if (dest.IsRegisterPair()) {
+ CHECK_EQ(0u, size);
+ __ movq(dest.AsRegisterPairLow(), Address(CpuRegister(RSP), src));
+ __ movq(dest.AsRegisterPairHigh(), Address(CpuRegister(RSP), FrameOffset(src.Int32Value()+4)));
+ } else if (dest.IsX87Register()) {
+ if (size == 4) {
+ __ flds(Address(CpuRegister(RSP), src));
+ } else {
+ __ fldl(Address(CpuRegister(RSP), src));
+ }
+ } else {
+ CHECK(dest.IsXmmRegister());
+ if (size == 4) {
+ __ movss(dest.AsXmmRegister(), Address(CpuRegister(RSP), src));
+ } else {
+ __ movsd(dest.AsXmmRegister(), Address(CpuRegister(RSP), src));
+ }
+ }
+}
+
+void X86_64JNIMacroAssembler::LoadFromThread(ManagedRegister mdest,
+ ThreadOffset64 src, size_t size) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ if (dest.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (dest.IsCpuRegister()) {
+ CHECK_EQ(4u, size);
+ __ gs()->movl(dest.AsCpuRegister(), Address::Absolute(src, true));
+ } else if (dest.IsRegisterPair()) {
+ CHECK_EQ(8u, size);
+ __ gs()->movq(dest.AsRegisterPairLow(), Address::Absolute(src, true));
+ } else if (dest.IsX87Register()) {
+ if (size == 4) {
+ __ gs()->flds(Address::Absolute(src, true));
+ } else {
+ __ gs()->fldl(Address::Absolute(src, true));
+ }
+ } else {
+ CHECK(dest.IsXmmRegister());
+ if (size == 4) {
+ __ gs()->movss(dest.AsXmmRegister(), Address::Absolute(src, true));
+ } else {
+ __ gs()->movsd(dest.AsXmmRegister(), Address::Absolute(src, true));
+ }
+ }
+}
+
+void X86_64JNIMacroAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ CHECK(dest.IsCpuRegister());
+ __ movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
+}
+
+void X86_64JNIMacroAssembler::LoadRef(ManagedRegister mdest,
+ ManagedRegister base,
+ MemberOffset offs,
+ bool unpoison_reference) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
+ __ movl(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs));
+ if (unpoison_reference) {
+ __ MaybeUnpoisonHeapReference(dest.AsCpuRegister());
+ }
+}
+
+void X86_64JNIMacroAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
+ __ movq(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs));
+}
+
+void X86_64JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ CHECK(dest.IsCpuRegister());
+ __ gs()->movq(dest.AsCpuRegister(), Address::Absolute(offs, true));
+}
+
+void X86_64JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) {
+ X86_64ManagedRegister reg = mreg.AsX86_64();
+ CHECK(size == 1 || size == 2) << size;
+ CHECK(reg.IsCpuRegister()) << reg;
+ if (size == 1) {
+ __ movsxb(reg.AsCpuRegister(), reg.AsCpuRegister());
+ } else {
+ __ movsxw(reg.AsCpuRegister(), reg.AsCpuRegister());
+ }
+}
+
+void X86_64JNIMacroAssembler::ZeroExtend(ManagedRegister mreg, size_t size) {
+ X86_64ManagedRegister reg = mreg.AsX86_64();
+ CHECK(size == 1 || size == 2) << size;
+ CHECK(reg.IsCpuRegister()) << reg;
+ if (size == 1) {
+ __ movzxb(reg.AsCpuRegister(), reg.AsCpuRegister());
+ } else {
+ __ movzxw(reg.AsCpuRegister(), reg.AsCpuRegister());
+ }
+}
+
+void X86_64JNIMacroAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ X86_64ManagedRegister src = msrc.AsX86_64();
+ if (!dest.Equals(src)) {
+ if (dest.IsCpuRegister() && src.IsCpuRegister()) {
+ __ movq(dest.AsCpuRegister(), src.AsCpuRegister());
+ } else if (src.IsX87Register() && dest.IsXmmRegister()) {
+ // Pass via stack and pop X87 register
+ __ subl(CpuRegister(RSP), Immediate(16));
+ if (size == 4) {
+ CHECK_EQ(src.AsX87Register(), ST0);
+ __ fstps(Address(CpuRegister(RSP), 0));
+ __ movss(dest.AsXmmRegister(), Address(CpuRegister(RSP), 0));
+ } else {
+ CHECK_EQ(src.AsX87Register(), ST0);
+ __ fstpl(Address(CpuRegister(RSP), 0));
+ __ movsd(dest.AsXmmRegister(), Address(CpuRegister(RSP), 0));
+ }
+ __ addq(CpuRegister(RSP), Immediate(16));
+ } else {
+ // TODO: x87, SSE
+ UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
+ }
+ }
+}
+
+void X86_64JNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ CHECK(scratch.IsCpuRegister());
+ __ movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), src));
+ __ movl(Address(CpuRegister(RSP), dest), scratch.AsCpuRegister());
+}
+
+void X86_64JNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset64 thr_offs,
+ ManagedRegister mscratch) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ CHECK(scratch.IsCpuRegister());
+ __ gs()->movq(scratch.AsCpuRegister(), Address::Absolute(thr_offs, true));
+ Store(fr_offs, scratch, 8);
+}
+
+void X86_64JNIMacroAssembler::CopyRawPtrToThread(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ CHECK(scratch.IsCpuRegister());
+ Load(scratch, fr_offs, 8);
+ __ gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
+}
+
+void X86_64JNIMacroAssembler::Copy(FrameOffset dest,
+ FrameOffset src,
+ ManagedRegister mscratch,
+ size_t size) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ if (scratch.IsCpuRegister() && size == 8) {
+ Load(scratch, src, 4);
+ Store(dest, scratch, 4);
+ Load(scratch, FrameOffset(src.Int32Value() + 4), 4);
+ Store(FrameOffset(dest.Int32Value() + 4), scratch, 4);
+ } else {
+ Load(scratch, src, size);
+ Store(dest, scratch, size);
+ }
+}
+
+void X86_64JNIMacroAssembler::Copy(FrameOffset /*dst*/,
+ ManagedRegister /*src_base*/,
+ Offset /*src_offset*/,
+ ManagedRegister /*scratch*/,
+ size_t /*size*/) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void X86_64JNIMacroAssembler::Copy(ManagedRegister dest_base,
+ Offset dest_offset,
+ FrameOffset src,
+ ManagedRegister scratch,
+ size_t size) {
+ CHECK(scratch.IsNoRegister());
+ CHECK_EQ(size, 4u);
+ __ pushq(Address(CpuRegister(RSP), src));
+ __ popq(Address(dest_base.AsX86_64().AsCpuRegister(), dest_offset));
+}
+
+void X86_64JNIMacroAssembler::Copy(FrameOffset dest,
+ FrameOffset src_base,
+ Offset src_offset,
+ ManagedRegister mscratch,
+ size_t size) {
+ CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
+ CHECK_EQ(size, 4u);
+ __ movq(scratch, Address(CpuRegister(RSP), src_base));
+ __ movq(scratch, Address(scratch, src_offset));
+ __ movq(Address(CpuRegister(RSP), dest), scratch);
+}
+
+void X86_64JNIMacroAssembler::Copy(ManagedRegister dest,
+ Offset dest_offset,
+ ManagedRegister src,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) {
+ CHECK_EQ(size, 4u);
+ CHECK(scratch.IsNoRegister());
+ __ pushq(Address(src.AsX86_64().AsCpuRegister(), src_offset));
+ __ popq(Address(dest.AsX86_64().AsCpuRegister(), dest_offset));
+}
+
+void X86_64JNIMacroAssembler::Copy(FrameOffset dest,
+ Offset dest_offset,
+ FrameOffset src,
+ Offset src_offset,
+ ManagedRegister mscratch,
+ size_t size) {
+ CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
+ CHECK_EQ(size, 4u);
+ CHECK_EQ(dest.Int32Value(), src.Int32Value());
+ __ movq(scratch, Address(CpuRegister(RSP), src));
+ __ pushq(Address(scratch, src_offset));
+ __ popq(Address(scratch, dest_offset));
+}
+
+void X86_64JNIMacroAssembler::MemoryBarrier(ManagedRegister) {
+ __ mfence();
+}
+
+void X86_64JNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
+ ManagedRegister min_reg,
+ bool null_allowed) {
+ X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
+ X86_64ManagedRegister in_reg = min_reg.AsX86_64();
+ if (in_reg.IsNoRegister()) { // TODO(64): && null_allowed
+ // Use out_reg as indicator of null.
+ in_reg = out_reg;
+ // TODO: movzwl
+ __ movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+ }
+ CHECK(in_reg.IsCpuRegister());
+ CHECK(out_reg.IsCpuRegister());
+ VerifyObject(in_reg, null_allowed);
+ if (null_allowed) {
+ Label null_arg;
+ if (!out_reg.Equals(in_reg)) {
+ __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
+ }
+ __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
+ __ j(kZero, &null_arg);
+ __ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+ __ Bind(&null_arg);
+ } else {
+ __ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+ }
+}
+
+void X86_64JNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
+ ManagedRegister mscratch,
+ bool null_allowed) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ CHECK(scratch.IsCpuRegister());
+ if (null_allowed) {
+ Label null_arg;
+ __ movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+ __ testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
+ __ j(kZero, &null_arg);
+ __ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+ __ Bind(&null_arg);
+ } else {
+ __ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+ }
+ Store(out_off, scratch, 8);
+}
+
+// Given a handle scope entry, load the associated reference.
+void X86_64JNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
+ ManagedRegister min_reg) {
+ X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
+ X86_64ManagedRegister in_reg = min_reg.AsX86_64();
+ CHECK(out_reg.IsCpuRegister());
+ CHECK(in_reg.IsCpuRegister());
+ Label null_arg;
+ if (!out_reg.Equals(in_reg)) {
+ __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
+ }
+ __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
+ __ j(kZero, &null_arg);
+ __ movq(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
+ __ Bind(&null_arg);
+}
+
+void X86_64JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references
+}
+
+void X86_64JNIMacroAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references
+}
+
+void X86_64JNIMacroAssembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) {
+ X86_64ManagedRegister base = mbase.AsX86_64();
+ CHECK(base.IsCpuRegister());
+ __ call(Address(base.AsCpuRegister(), offset.Int32Value()));
+ // TODO: place reference map on call
+}
+
+void X86_64JNIMacroAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
+ CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
+ __ movq(scratch, Address(CpuRegister(RSP), base));
+ __ call(Address(scratch, offset));
+}
+
+void X86_64JNIMacroAssembler::CallFromThread(ThreadOffset64 offset, ManagedRegister /*mscratch*/) {
+ __ gs()->call(Address::Absolute(offset, true));
+}
+
+void X86_64JNIMacroAssembler::GetCurrentThread(ManagedRegister tr) {
+ __ gs()->movq(tr.AsX86_64().AsCpuRegister(),
+ Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
+}
+
+void X86_64JNIMacroAssembler::GetCurrentThread(FrameOffset offset, ManagedRegister mscratch) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ __ gs()->movq(scratch.AsCpuRegister(),
+ Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
+ __ movq(Address(CpuRegister(RSP), offset), scratch.AsCpuRegister());
+}
+
+// Slowpath entered when Thread::Current()->_exception is non-null
+class X86_64ExceptionSlowPath FINAL : public SlowPath {
+ public:
+ explicit X86_64ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
+ virtual void Emit(Assembler *sp_asm) OVERRIDE;
+ private:
+ const size_t stack_adjust_;
+};
+
+void X86_64JNIMacroAssembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
+ X86_64ExceptionSlowPath* slow = new (__ GetArena()) X86_64ExceptionSlowPath(stack_adjust);
+ __ GetBuffer()->EnqueueSlowPath(slow);
+ __ gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true), Immediate(0));
+ __ j(kNotEqual, slow->Entry());
+}
+
+#undef __
+
+void X86_64ExceptionSlowPath::Emit(Assembler *sasm) {
+ X86_64Assembler* sp_asm = down_cast<X86_64Assembler*>(sasm);
+#define __ sp_asm->
+ __ Bind(&entry_);
+ // Note: the return value is dead
+ if (stack_adjust_ != 0) { // Fix up the frame.
+ DecreaseFrameSizeImpl(stack_adjust_, sp_asm);
+ }
+ // Pass exception as argument in RDI
+ __ gs()->movq(CpuRegister(RDI),
+ Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true));
+ __ gs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize, pDeliverException), true));
+ // this call should never return
+ __ int3();
+#undef __
+}
+
+} // namespace x86_64
+} // namespace art
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
new file mode 100644
index 0000000000..cc4e57c999
--- /dev/null
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_X86_64_JNI_MACRO_ASSEMBLER_X86_64_H_
+#define ART_COMPILER_UTILS_X86_64_JNI_MACRO_ASSEMBLER_X86_64_H_
+
+#include <vector>
+
+#include "assembler_x86_64.h"
+#include "base/arena_containers.h"
+#include "base/enums.h"
+#include "base/macros.h"
+#include "offsets.h"
+#include "utils/array_ref.h"
+#include "utils/assembler.h"
+#include "utils/jni_macro_assembler.h"
+
+namespace art {
+namespace x86_64 {
+
+class X86_64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86_64Assembler,
+ PointerSize::k64> {
+ public:
+ explicit X86_64JNIMacroAssembler(ArenaAllocator* arena)
+ : JNIMacroAssemblerFwd<X86_64Assembler, PointerSize::k64>(arena) {}
+ virtual ~X86_64JNIMacroAssembler() {}
+
+ //
+ // Overridden common assembler high-level functionality
+ //
+
+ // Emit code that will create an activation on the stack
+ void BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+
+ // Emit code that will remove an activation from the stack
+ void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
+ OVERRIDE;
+
+ void IncreaseFrameSize(size_t adjust) OVERRIDE;
+ void DecreaseFrameSize(size_t adjust) OVERRIDE;
+
+ // Store routines
+ void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
+ void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+
+ void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch) OVERRIDE;
+
+ void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
+
+ void StoreSpanning(FrameOffset dest,
+ ManagedRegister src,
+ FrameOffset in_off,
+ ManagedRegister scratch) OVERRIDE;
+
+ // Load routines
+ void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+
+ void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
+
+ void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+
+ void LoadRef(ManagedRegister dest,
+ ManagedRegister base,
+ MemberOffset offs,
+ bool unpoison_reference) OVERRIDE;
+
+ void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+
+ void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
+
+ // Copying routines
+ void Move(ManagedRegister dest, ManagedRegister src, size_t size);
+
+ void CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset64 thr_offs,
+ ManagedRegister scratch) OVERRIDE;
+
+ void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+ OVERRIDE;
+
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest,
+ ManagedRegister src_base,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(ManagedRegister dest_base,
+ Offset dest_offset,
+ FrameOffset src,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest,
+ FrameOffset src_base,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(ManagedRegister dest,
+ Offset dest_offset,
+ ManagedRegister src,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest,
+ Offset dest_offset,
+ FrameOffset src,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void MemoryBarrier(ManagedRegister) OVERRIDE;
+
+ // Sign extension
+ void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Zero extension
+ void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Exploit fast access in managed code to Thread::Current()
+ void GetCurrentThread(ManagedRegister tr) OVERRIDE;
+ void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
+
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed. in_reg holds a possibly stale reference
+ // that can be used to avoid loading the handle scope entry to see if the value is
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg,
+ FrameOffset handlescope_offset,
+ ManagedRegister in_reg,
+ bool null_allowed) OVERRIDE;
+
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed.
+ void CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handlescope_offset,
+ ManagedRegister scratch,
+ bool null_allowed) OVERRIDE;
+
+ // src holds a handle scope entry (Object**) load this into dst
+ virtual void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+
+ // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+ // know that src may not be null.
+ void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
+ void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+
+ // Call to address held at [base+offset]
+ void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
+
+ // Generate code to check if Thread::Current()->exception_ is non-null
+ // and branch to a ExceptionSlowPath if it is.
+ void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(X86_64JNIMacroAssembler);
+};
+
+} // namespace x86_64
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_X86_64_JNI_MACRO_ASSEMBLER_X86_64_H_