summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/Android.mk1
-rw-r--r--compiler/driver/compiler_driver.cc4
-rw-r--r--compiler/driver/compiler_driver.h10
-rw-r--r--compiler/optimizing/builder.cc25
-rw-r--r--compiler/optimizing/code_generator.cc4
-rw-r--r--compiler/optimizing/code_generator_arm.cc91
-rw-r--r--compiler/optimizing/code_generator_arm64.cc1205
-rw-r--r--compiler/optimizing/code_generator_arm64.h236
-rw-r--r--compiler/optimizing/code_generator_x86.cc92
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc87
-rw-r--r--compiler/optimizing/codegen_test.cc163
-rw-r--r--compiler/optimizing/graph_visualizer.h2
-rw-r--r--compiler/optimizing/instruction_simplifier.cc2
-rw-r--r--compiler/optimizing/nodes.h14
-rw-r--r--compiler/optimizing/optimizing_compiler.cc5
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h1
-rw-r--r--compiler/utils/arm64/assembler_arm64.h7
-rw-r--r--compiler/utils/assembler.h1
-rw-r--r--compiler/utils/assembler_test.h13
-rw-r--r--compiler/utils/x86/assembler_x86.h11
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h10
-rw-r--r--compiler/utils/x86_64/assembler_x86_64_test.cc4
-rw-r--r--dex2oat/dex2oat.cc6
-rw-r--r--disassembler/disassembler.cc2
-rw-r--r--disassembler/disassembler_arm.cc2
-rw-r--r--disassembler/disassembler_arm64.cc2
-rw-r--r--disassembler/disassembler_mips.cc2
-rw-r--r--disassembler/disassembler_x86.cc5
-rw-r--r--runtime/arch/arm64/context_arm64.h2
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S2
-rw-r--r--runtime/arch/stub_test.cc2
-rw-r--r--runtime/base/casts.h13
-rw-r--r--runtime/base/logging.h2
-rw-r--r--runtime/base/macros.h26
-rw-r--r--runtime/base/mutex-inl.h44
-rw-r--r--runtime/base/mutex.cc50
-rw-r--r--runtime/base/mutex.h3
-rw-r--r--runtime/base/stringpiece.cc2
-rw-r--r--runtime/base/value_object.h12
-rw-r--r--runtime/check_reference_map_visitor.h2
-rw-r--r--runtime/class_linker.cc3
-rw-r--r--runtime/debugger.cc4
-rw-r--r--runtime/dex_instruction_visitor_test.cc1
-rw-r--r--runtime/dex_method_iterator_test.cc2
-rw-r--r--runtime/gc/allocator/rosalloc.cc2
-rw-r--r--runtime/gc/collector/garbage_collector.cc3
-rw-r--r--runtime/gc/collector/mark_sweep.cc3
-rw-r--r--runtime/gc/heap.cc10
-rw-r--r--runtime/gc/heap.h3
-rw-r--r--runtime/gc/space/malloc_space.h2
-rw-r--r--runtime/gc/space/rosalloc_space.cc8
-rw-r--r--runtime/indirect_reference_table.cc10
-rw-r--r--runtime/interpreter/interpreter_common.h1
-rw-r--r--runtime/mirror/object.h1
-rw-r--r--runtime/native_bridge_art_interface.cc5
-rw-r--r--runtime/native_bridge_art_interface.h2
-rw-r--r--runtime/offsets.cc2
-rw-r--r--runtime/offsets.h3
-rw-r--r--runtime/profiler.cc2
-rw-r--r--runtime/quick_exception_handler.cc3
-rw-r--r--runtime/runtime.cc33
-rw-r--r--runtime/runtime.h10
-rw-r--r--runtime/signal_catcher.cc2
-rw-r--r--runtime/trace.cc3
-rw-r--r--sigchainlib/sigchain_dummy.cc5
-rw-r--r--test/115-native-bridge/run4
-rw-r--r--test/414-optimizing-arith-sub/expected.txt0
-rw-r--r--test/414-optimizing-arith-sub/info.txt1
-rw-r--r--test/414-optimizing-arith-sub/src/Main.java170
-rw-r--r--test/800-smali/build32
-rw-r--r--test/800-smali/info.txt2
-rw-r--r--test/800-smali/smali/b_17790197.smali (renamed from test/800-smali/src/b_17790197.smali)0
-rw-r--r--test/Android.libarttest.mk7
-rw-r--r--test/Android.libnativebridgetest.mk2
-rw-r--r--test/Android.run-test.mk39
-rwxr-xr-xtest/run-test3
76 files changed, 2199 insertions, 351 deletions
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 25b23a2ee9..172c96cfde 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -88,6 +88,7 @@ LIBART_COMPILER_SRC_FILES := \
optimizing/builder.cc \
optimizing/code_generator.cc \
optimizing/code_generator_arm.cc \
+ optimizing/code_generator_arm64.cc \
optimizing/code_generator_x86.cc \
optimizing/code_generator_x86_64.cc \
optimizing/constant_folding.cc \
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 452868870d..a60c5bc98e 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -347,15 +347,11 @@ CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options,
image_(image),
image_classes_(image_classes),
thread_count_(thread_count),
- start_ns_(0),
stats_(new AOTCompilationStats),
dump_stats_(dump_stats),
dump_passes_(dump_passes),
timings_logger_(timer),
- compiler_library_(nullptr),
compiler_context_(nullptr),
- compiler_enable_auto_elf_loading_(nullptr),
- compiler_get_method_code_addr_(nullptr),
support_boot_image_fixup_(instruction_set != kMips),
dedupe_code_("dedupe code"),
dedupe_src_mapping_table_("dedupe source mapping table"),
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 3d59ef1e80..0796f4878a 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -503,7 +503,6 @@ class CompilerDriver {
std::unique_ptr<std::set<std::string>> image_classes_;
size_t thread_count_;
- uint64_t start_ns_;
class AOTCompilationStats;
std::unique_ptr<AOTCompilationStats> stats_;
@@ -516,8 +515,6 @@ class CompilerDriver {
typedef void (*CompilerCallbackFn)(CompilerDriver& driver);
typedef MutexLock* (*CompilerMutexLockFn)(CompilerDriver& driver);
- void* compiler_library_;
-
typedef void (*DexToDexCompilerFn)(CompilerDriver& driver,
const DexFile::CodeItem* code_item,
uint32_t access_flags, InvokeType invoke_type,
@@ -533,13 +530,6 @@ class CompilerDriver {
// Arena pool used by the compiler.
ArenaPool arena_pool_;
- typedef void (*CompilerEnableAutoElfLoadingFn)(CompilerDriver& driver);
- CompilerEnableAutoElfLoadingFn compiler_enable_auto_elf_loading_;
-
- typedef const void* (*CompilerGetMethodCodeAddrFn)
- (const CompilerDriver& driver, const CompiledMethod* cm, const mirror::ArtMethod* method);
- CompilerGetMethodCodeAddrFn compiler_get_method_code_addr_;
-
bool support_boot_image_fixup_;
// DeDuplication data structures, these own the corresponding byte arrays.
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 4d575cbdcf..79cbd0ee21 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -748,6 +748,11 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::NOT_INT: {
+ Unop_12x<HNot>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
case Instruction::ADD_INT: {
Binop_23x<HAdd>(instruction, Primitive::kPrimInt);
break;
@@ -778,6 +783,16 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::SUB_FLOAT: {
+ Binop_23x<HSub>(instruction, Primitive::kPrimFloat);
+ break;
+ }
+
+ case Instruction::SUB_DOUBLE: {
+ Binop_23x<HSub>(instruction, Primitive::kPrimDouble);
+ break;
+ }
+
case Instruction::ADD_INT_2ADDR: {
Binop_12x<HAdd>(instruction, Primitive::kPrimInt);
break;
@@ -828,6 +843,16 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::SUB_FLOAT_2ADDR: {
+ Binop_12x<HSub>(instruction, Primitive::kPrimFloat);
+ break;
+ }
+
+ case Instruction::SUB_DOUBLE_2ADDR: {
+ Binop_12x<HSub>(instruction, Primitive::kPrimDouble);
+ break;
+ }
+
case Instruction::MUL_INT_2ADDR: {
Binop_12x<HMul>(instruction, Primitive::kPrimInt);
break;
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index d5cd490d13..c4286a401b 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -17,6 +17,7 @@
#include "code_generator.h"
#include "code_generator_arm.h"
+#include "code_generator_arm64.h"
#include "code_generator_x86.h"
#include "code_generator_x86_64.h"
#include "compiled_method.h"
@@ -281,6 +282,9 @@ CodeGenerator* CodeGenerator::Create(ArenaAllocator* allocator,
case kThumb2: {
return new (allocator) arm::CodeGeneratorARM(graph);
}
+ case kArm64: {
+ return new (allocator) arm64::CodeGeneratorARM64(graph);
+ }
case kMips:
return nullptr;
case kX86: {
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 7ed802ec7b..7b00d2f523 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1157,53 +1157,60 @@ void LocationsBuilderARM::VisitSub(HSub* sub) {
locations->SetOut(Location::RequiresRegister(), output_overlaps);
break;
}
-
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
break;
-
+ }
default:
- LOG(FATAL) << "Unimplemented sub type " << sub->GetResultType();
+ LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
}
}
void InstructionCodeGeneratorARM::VisitSub(HSub* sub) {
LocationSummary* locations = sub->GetLocations();
+ Location out = locations->Out();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
switch (sub->GetResultType()) {
case Primitive::kPrimInt: {
- if (locations->InAt(1).IsRegister()) {
- __ sub(locations->Out().As<Register>(),
- locations->InAt(0).As<Register>(),
- ShifterOperand(locations->InAt(1).As<Register>()));
+ if (second.IsRegister()) {
+ __ sub(out.As<Register>(), first.As<Register>(), ShifterOperand(second.As<Register>()));
} else {
- __ AddConstant(locations->Out().As<Register>(),
- locations->InAt(0).As<Register>(),
- -locations->InAt(1).GetConstant()->AsIntConstant()->GetValue());
+ __ AddConstant(out.As<Register>(),
+ first.As<Register>(),
+ -second.GetConstant()->AsIntConstant()->GetValue());
}
break;
}
- case Primitive::kPrimLong:
- __ subs(locations->Out().AsRegisterPairLow<Register>(),
- locations->InAt(0).AsRegisterPairLow<Register>(),
- ShifterOperand(locations->InAt(1).AsRegisterPairLow<Register>()));
- __ sbc(locations->Out().AsRegisterPairHigh<Register>(),
- locations->InAt(0).AsRegisterPairHigh<Register>(),
- ShifterOperand(locations->InAt(1).AsRegisterPairHigh<Register>()));
+ case Primitive::kPrimLong: {
+ __ subs(out.AsRegisterPairLow<Register>(),
+ first.AsRegisterPairLow<Register>(),
+ ShifterOperand(second.AsRegisterPairLow<Register>()));
+ __ sbc(out.AsRegisterPairHigh<Register>(),
+ first.AsRegisterPairHigh<Register>(),
+ ShifterOperand(second.AsRegisterPairHigh<Register>()));
break;
+ }
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
+ case Primitive::kPrimFloat: {
+ __ vsubs(FromDToLowS(out.As<DRegister>()),
+ FromDToLowS(first.As<DRegister>()),
+ FromDToLowS(second.As<DRegister>()));
break;
+ }
+
+ case Primitive::kPrimDouble: {
+ __ vsubd(out.As<DRegister>(), first.As<DRegister>(), second.As<DRegister>());
+ break;
+ }
+
default:
- LOG(FATAL) << "Unimplemented sub type " << sub->GetResultType();
+ LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
}
}
@@ -1351,17 +1358,33 @@ void InstructionCodeGeneratorARM::VisitParameterValue(HParameterValue* instructi
// Nothing to do, the parameter is already at its location.
}
-void LocationsBuilderARM::VisitNot(HNot* instruction) {
+void LocationsBuilderARM::VisitNot(HNot* not_) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
-void InstructionCodeGeneratorARM::VisitNot(HNot* instruction) {
- LocationSummary* locations = instruction->GetLocations();
- __ eor(locations->Out().As<Register>(),
- locations->InAt(0).As<Register>(), ShifterOperand(1));
+void InstructionCodeGeneratorARM::VisitNot(HNot* not_) {
+ LocationSummary* locations = not_->GetLocations();
+ Location out = locations->Out();
+ Location in = locations->InAt(0);
+ switch (not_->InputAt(0)->GetType()) {
+ case Primitive::kPrimBoolean:
+ __ eor(out.As<Register>(), in.As<Register>(), ShifterOperand(1));
+ break;
+
+ case Primitive::kPrimInt:
+ __ mvn(out.As<Register>(), ShifterOperand(in.As<Register>()));
+ break;
+
+ case Primitive::kPrimLong:
+ LOG(FATAL) << "Not yet implemented type for not operation " << not_->GetResultType();
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType();
+ }
}
void LocationsBuilderARM::VisitCompare(HCompare* compare) {
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
new file mode 100644
index 0000000000..79528ac128
--- /dev/null
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -0,0 +1,1205 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "code_generator_arm64.h"
+
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "gc/accounting/card_table.h"
+#include "mirror/array-inl.h"
+#include "mirror/art_method.h"
+#include "mirror/class.h"
+#include "thread.h"
+#include "utils/arm64/assembler_arm64.h"
+#include "utils/assembler.h"
+#include "utils/stack_checks.h"
+
+
+using namespace vixl; // NOLINT(build/namespaces)
+
+#ifdef __
+#error "ARM64 Codegen VIXL macro-assembler macro already defined."
+#endif
+
+
+namespace art {
+
+namespace arm64 {
+
+static bool IsFPType(Primitive::Type type) {
+ return type == Primitive::kPrimFloat || type == Primitive::kPrimDouble;
+}
+
+// TODO: clean-up some of the constant definitions.
+static constexpr size_t kHeapRefSize = sizeof(mirror::HeapReference<mirror::Object>);
+static constexpr int kCurrentMethodStackOffset = 0;
+
+namespace {
+// Convenience helpers to ease conversion to and from VIXL operands.
+
+int VIXLRegCodeFromART(int code) {
+ // TODO: static check?
+ DCHECK_EQ(SP, 31);
+ DCHECK_EQ(WSP, 31);
+ DCHECK_EQ(XZR, 32);
+ DCHECK_EQ(WZR, 32);
+ if (code == SP) {
+ return vixl::kSPRegInternalCode;
+ }
+ if (code == XZR) {
+ return vixl::kZeroRegCode;
+ }
+ return code;
+}
+
+int ARTRegCodeFromVIXL(int code) {
+ // TODO: static check?
+ DCHECK_EQ(SP, 31);
+ DCHECK_EQ(WSP, 31);
+ DCHECK_EQ(XZR, 32);
+ DCHECK_EQ(WZR, 32);
+ if (code == vixl::kSPRegInternalCode) {
+ return SP;
+ }
+ if (code == vixl::kZeroRegCode) {
+ return XZR;
+ }
+ return code;
+}
+
+Register XRegisterFrom(Location location) {
+ return Register::XRegFromCode(VIXLRegCodeFromART(location.reg()));
+}
+
+Register WRegisterFrom(Location location) {
+ return Register::WRegFromCode(VIXLRegCodeFromART(location.reg()));
+}
+
+Register RegisterFrom(Location location, Primitive::Type type) {
+ DCHECK(type != Primitive::kPrimVoid && !IsFPType(type));
+ return type == Primitive::kPrimLong ? XRegisterFrom(location) : WRegisterFrom(location);
+}
+
+Register OutputRegister(HInstruction* instr) {
+ return RegisterFrom(instr->GetLocations()->Out(), instr->GetType());
+}
+
+Register InputRegisterAt(HInstruction* instr, int input_index) {
+ return RegisterFrom(instr->GetLocations()->InAt(input_index),
+ instr->InputAt(input_index)->GetType());
+}
+
+int64_t Int64ConstantFrom(Location location) {
+ HConstant* instr = location.GetConstant();
+ return instr->IsIntConstant() ? instr->AsIntConstant()->GetValue()
+ : instr->AsLongConstant()->GetValue();
+}
+
+Operand OperandFrom(Location location, Primitive::Type type) {
+ if (location.IsRegister()) {
+ return Operand(RegisterFrom(location, type));
+ } else {
+ return Operand(Int64ConstantFrom(location));
+ }
+}
+
+Operand InputOperandAt(HInstruction* instr, int input_index) {
+ return OperandFrom(instr->GetLocations()->InAt(input_index),
+ instr->InputAt(input_index)->GetType());
+}
+
+MemOperand StackOperandFrom(Location location) {
+ return MemOperand(sp, location.GetStackIndex());
+}
+
+MemOperand HeapOperand(const Register& base, Offset offset) {
+ // A heap reference must be 32bit, so fit in a W register.
+ DCHECK(base.IsW());
+ return MemOperand(base.X(), offset.SizeValue());
+}
+
+MemOperand HeapOperandFrom(Location location, Primitive::Type type, Offset offset) {
+ return HeapOperand(RegisterFrom(location, type), offset);
+}
+
+Location LocationFrom(const Register& reg) {
+ return Location::RegisterLocation(ARTRegCodeFromVIXL(reg.code()));
+}
+
+} // namespace
+
+inline Condition ARM64Condition(IfCondition cond) {
+ switch (cond) {
+ case kCondEQ: return eq;
+ case kCondNE: return ne;
+ case kCondLT: return lt;
+ case kCondLE: return le;
+ case kCondGT: return gt;
+ case kCondGE: return ge;
+ default:
+ LOG(FATAL) << "Unknown if condition";
+ }
+ return nv; // Unreachable.
+}
+
+static const Register kRuntimeParameterCoreRegisters[] = { x0, x1, x2, x3, x4, x5, x6, x7 };
+static constexpr size_t kRuntimeParameterCoreRegistersLength =
+ arraysize(kRuntimeParameterCoreRegisters);
+static const FPRegister kRuntimeParameterFpuRegisters[] = { };
+static constexpr size_t kRuntimeParameterFpuRegistersLength = 0;
+
+class InvokeRuntimeCallingConvention : public CallingConvention<Register, FPRegister> {
+ public:
+ static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
+
+ InvokeRuntimeCallingConvention()
+ : CallingConvention(kRuntimeParameterCoreRegisters,
+ kRuntimeParameterCoreRegistersLength,
+ kRuntimeParameterFpuRegisters,
+ kRuntimeParameterFpuRegistersLength) {}
+
+ Location GetReturnLocation(Primitive::Type return_type);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
+};
+
+Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type return_type) {
+ DCHECK_NE(return_type, Primitive::kPrimVoid);
+ if (return_type == Primitive::kPrimFloat || return_type == Primitive::kPrimDouble) {
+ LOG(FATAL) << "Unimplemented return type " << return_type;
+ }
+ return LocationFrom(x0);
+}
+
+#define __ reinterpret_cast<Arm64Assembler*>(codegen->GetAssembler())->vixl_masm_->
+
+class SlowPathCodeARM64 : public SlowPathCode {
+ public:
+ SlowPathCodeARM64() : entry_label_(), exit_label_() {}
+
+ vixl::Label* GetEntryLabel() { return &entry_label_; }
+ vixl::Label* GetExitLabel() { return &exit_label_; }
+
+ private:
+ vixl::Label entry_label_;
+ vixl::Label exit_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARM64);
+};
+
+class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+ explicit BoundsCheckSlowPathARM64(HBoundsCheck* instruction,
+ Location index_location,
+ Location length_location)
+ : instruction_(instruction),
+ index_location_(index_location),
+ length_location_(length_location) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARM64* arm64_codegen = reinterpret_cast<CodeGeneratorARM64*>(codegen);
+ __ Bind(GetEntryLabel());
+ InvokeRuntimeCallingConvention calling_convention;
+ arm64_codegen->MoveHelper(LocationFrom(calling_convention.GetRegisterAt(0)),
+ index_location_, Primitive::kPrimInt);
+ arm64_codegen->MoveHelper(LocationFrom(calling_convention.GetRegisterAt(1)),
+ length_location_, Primitive::kPrimInt);
+ size_t offset = QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pThrowArrayBounds).SizeValue();
+ __ Ldr(lr, MemOperand(tr, offset));
+ __ Blr(lr);
+ codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ }
+
+ private:
+ HBoundsCheck* const instruction_;
+ const Location index_location_;
+ const Location length_location_;
+
+ DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64);
+};
+
+class NullCheckSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+ explicit NullCheckSlowPathARM64(HNullCheck* instr) : instruction_(instr) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ __ Bind(GetEntryLabel());
+ int32_t offset = QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pThrowNullPointer).Int32Value();
+ __ Ldr(lr, MemOperand(tr, offset));
+ __ Blr(lr);
+ codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ }
+
+ private:
+ HNullCheck* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM64);
+};
+
+class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+ explicit SuspendCheckSlowPathARM64(HSuspendCheck* instruction,
+ HBasicBlock* successor)
+ : instruction_(instruction), successor_(successor) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ size_t offset = QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pTestSuspend).SizeValue();
+ __ Bind(GetEntryLabel());
+ __ Ldr(lr, MemOperand(tr, offset));
+ __ Blr(lr);
+ codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ __ B(GetReturnLabel());
+ }
+
+ vixl::Label* GetReturnLabel() {
+ DCHECK(successor_ == nullptr);
+ return &return_label_;
+ }
+
+
+ private:
+ HSuspendCheck* const instruction_;
+ // If not null, the block to branch to after the suspend check.
+ HBasicBlock* const successor_;
+
+ // If `successor_` is null, the label to branch to after the suspend check.
+ vixl::Label return_label_;
+
+ DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM64);
+};
+
+#undef __
+
+Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) {
+ Location next_location;
+ if (type == Primitive::kPrimVoid) {
+ LOG(FATAL) << "Unreachable type " << type;
+ }
+
+ if (type == Primitive::kPrimFloat || type == Primitive::kPrimDouble) {
+ LOG(FATAL) << "Unimplemented type " << type;
+ }
+
+ if (gp_index_ < calling_convention.GetNumberOfRegisters()) {
+ next_location = LocationFrom(calling_convention.GetRegisterAt(gp_index_));
+ if (type == Primitive::kPrimLong) {
+ // Double stack slot reserved on the stack.
+ stack_index_++;
+ }
+ } else { // Stack.
+ if (type == Primitive::kPrimLong) {
+ next_location = Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index_));
+ // Double stack slot reserved on the stack.
+ stack_index_++;
+ } else {
+ next_location = Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index_));
+ }
+ }
+ // Move to the next register/stack slot.
+ gp_index_++;
+ stack_index_++;
+ return next_location;
+}
+
+CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph)
+ : CodeGenerator(graph,
+ kNumberOfAllocatableRegisters,
+ kNumberOfAllocatableFloatingPointRegisters,
+ kNumberOfAllocatableRegisterPairs),
+ block_labels_(nullptr),
+ location_builder_(graph, this),
+ instruction_visitor_(graph, this) {}
+
+#define __ reinterpret_cast<Arm64Assembler*>(GetAssembler())->vixl_masm_->
+
+void CodeGeneratorARM64::GenerateFrameEntry() {
+ // TODO: Add proper support for the stack overflow check.
+ UseScratchRegisterScope temps(assembler_.vixl_masm_);
+ Register temp = temps.AcquireX();
+ __ Add(temp, sp, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64)));
+ __ Ldr(temp, MemOperand(temp, 0));
+ RecordPcInfo(nullptr, 0);
+
+ CPURegList preserved_regs = GetFramePreservedRegisters();
+ int frame_size = GetFrameSize();
+ core_spill_mask_ |= preserved_regs.list();
+
+ __ Str(w0, MemOperand(sp, -frame_size, PreIndex));
+ __ PokeCPURegList(preserved_regs, frame_size - preserved_regs.TotalSizeInBytes());
+
+ // Stack layout:
+ // sp[frame_size - 8] : lr.
+ // ... : other preserved registers.
+ // sp[frame_size - regs_size]: first preserved register.
+ // ... : reserved frame space.
+ // sp[0] : context pointer.
+}
+
+void CodeGeneratorARM64::GenerateFrameExit() {
+ int frame_size = GetFrameSize();
+ CPURegList preserved_regs = GetFramePreservedRegisters();
+ __ PeekCPURegList(preserved_regs, frame_size - preserved_regs.TotalSizeInBytes());
+ __ Drop(frame_size);
+}
+
+void CodeGeneratorARM64::Bind(HBasicBlock* block) {
+ __ Bind(GetLabelOf(block));
+}
+
+void CodeGeneratorARM64::MoveHelper(Location destination,
+ Location source,
+ Primitive::Type type) {
+ if (source.Equals(destination)) {
+ return;
+ }
+ if (destination.IsRegister()) {
+ Register dst = RegisterFrom(destination, type);
+ if (source.IsRegister()) {
+ Register src = RegisterFrom(source, type);
+ DCHECK(dst.IsSameSizeAndType(src));
+ __ Mov(dst, src);
+ } else {
+ DCHECK(dst.Is64Bits() || !source.IsDoubleStackSlot());
+ __ Ldr(dst, StackOperandFrom(source));
+ }
+ } else {
+ DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
+ if (source.IsRegister()) {
+ __ Str(RegisterFrom(source, type), StackOperandFrom(destination));
+ } else {
+ UseScratchRegisterScope temps(assembler_.vixl_masm_);
+ Register temp = destination.IsDoubleStackSlot() ? temps.AcquireX() : temps.AcquireW();
+ __ Ldr(temp, StackOperandFrom(source));
+ __ Str(temp, StackOperandFrom(destination));
+ }
+ }
+}
+
+void CodeGeneratorARM64::Move(HInstruction* instruction,
+ Location location,
+ HInstruction* move_for) {
+ LocationSummary* locations = instruction->GetLocations();
+ if (locations != nullptr && locations->Out().Equals(location)) {
+ return;
+ }
+
+ Primitive::Type type = instruction->GetType();
+
+ if (instruction->IsIntConstant() || instruction->IsLongConstant()) {
+ int64_t value = instruction->IsIntConstant() ? instruction->AsIntConstant()->GetValue()
+ : instruction->AsLongConstant()->GetValue();
+ if (location.IsRegister()) {
+ Register dst = RegisterFrom(location, type);
+ DCHECK((instruction->IsIntConstant() && dst.Is32Bits()) ||
+ (instruction->IsLongConstant() && dst.Is64Bits()));
+ __ Mov(dst, value);
+ } else {
+ DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot());
+ UseScratchRegisterScope temps(assembler_.vixl_masm_);
+ Register temp = instruction->IsIntConstant() ? temps.AcquireW() : temps.AcquireX();
+ __ Mov(temp, value);
+ __ Str(temp, StackOperandFrom(location));
+ }
+
+ } else if (instruction->IsLoadLocal()) {
+ uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
+ switch (type) {
+ case Primitive::kPrimNot:
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ MoveHelper(location, Location::StackSlot(stack_slot), type);
+ break;
+ case Primitive::kPrimLong:
+ MoveHelper(location, Location::DoubleStackSlot(stack_slot), type);
+ break;
+ default:
+ LOG(FATAL) << "Unimplemented type" << type;
+ }
+
+ } else {
+ DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
+ MoveHelper(location, locations->Out(), type);
+ }
+}
+
+size_t CodeGeneratorARM64::FrameEntrySpillSize() const {
+ return GetFramePreservedRegistersSize();
+}
+
+Location CodeGeneratorARM64::GetStackLocation(HLoadLocal* load) const {
+ Primitive::Type type = load->GetType();
+ switch (type) {
+ case Primitive::kPrimNot:
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ return Location::StackSlot(GetStackSlot(load->GetLocal()));
+ case Primitive::kPrimLong:
+ return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Unimplemented type " << type;
+ break;
+ case Primitive::kPrimVoid:
+ default:
+ LOG(FATAL) << "Unexpected type " << type;
+ }
+ LOG(FATAL) << "Unreachable";
+ return Location::NoLocation();
+}
+
+void CodeGeneratorARM64::MarkGCCard(Register object, Register value) {
+ UseScratchRegisterScope temps(assembler_.vixl_masm_);
+ Register card = temps.AcquireX();
+ Register temp = temps.AcquireX();
+ vixl::Label done;
+ __ Cbz(value, &done);
+ __ Ldr(card, MemOperand(tr, Thread::CardTableOffset<kArm64WordSize>().Int32Value()));
+ __ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
+ __ Strb(card, MemOperand(card, temp));
+ __ Bind(&done);
+}
+
+void CodeGeneratorARM64::SetupBlockedRegisters() const {
+ // Block reserved registers:
+ // ip0 (VIXL temporary)
+ // ip1 (VIXL temporary)
+ // xSuspend (Suspend counter)
+ // lr
+ // sp is not part of the allocatable registers, so we don't need to block it.
+ CPURegList reserved_core_registers = vixl_reserved_core_registers;
+ reserved_core_registers.Combine(runtime_reserved_core_registers);
+ // TODO: See if we should instead allow allocating but preserve those if used.
+ reserved_core_registers.Combine(quick_callee_saved_registers);
+ while (!reserved_core_registers.IsEmpty()) {
+ blocked_core_registers_[reserved_core_registers.PopLowestIndex().code()] = true;
+ }
+}
+
+Location CodeGeneratorARM64::AllocateFreeRegister(Primitive::Type type) const {
+ if (type == Primitive::kPrimVoid) {
+ LOG(FATAL) << "Unreachable type " << type;
+ }
+
+ if (type == Primitive::kPrimFloat || type == Primitive::kPrimDouble) {
+ LOG(FATAL) << "Unimplemented support for floating-point";
+ }
+
+ ssize_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfXRegisters);
+ DCHECK_NE(reg, -1);
+ blocked_core_registers_[reg] = true;
+
+ if (IsFPType(type)) {
+ return Location::FpuRegisterLocation(reg);
+ } else {
+ return Location::RegisterLocation(reg);
+ }
+}
+
+void CodeGeneratorARM64::DumpCoreRegister(std::ostream& stream, int reg) const {
+ stream << Arm64ManagedRegister::FromXRegister(XRegister(reg));
+}
+
+void CodeGeneratorARM64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
+ stream << Arm64ManagedRegister::FromDRegister(DRegister(reg));
+}
+
+#undef __
+#define __ assembler_->vixl_masm_->
+
+InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
+ CodeGeneratorARM64* codegen)
+ : HGraphVisitor(graph),
+ assembler_(codegen->GetAssembler()),
+ codegen_(codegen) {}
+
+#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M) \
+ M(ArrayGet) \
+ M(ArraySet) \
+ M(DoubleConstant) \
+ M(FloatConstant) \
+ M(Mul) \
+ M(Neg) \
+ M(NewArray) \
+ M(ParallelMove)
+
+#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode
+
+enum UnimplementedInstructionBreakCode {
+#define ENUM_UNIMPLEMENTED_INSTRUCTION(name) UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name),
+ FOR_EACH_UNIMPLEMENTED_INSTRUCTION(ENUM_UNIMPLEMENTED_INSTRUCTION)
+#undef ENUM_UNIMPLEMENTED_INSTRUCTION
+};
+
+#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name) \
+ void InstructionCodeGeneratorARM64::Visit##name(H##name* instr) { \
+ __ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name)); \
+ } \
+ void LocationsBuilderARM64::Visit##name(H##name* instr) { \
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); \
+ locations->SetOut(Location::Any()); \
+ }
+ FOR_EACH_UNIMPLEMENTED_INSTRUCTION(DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS)
+#undef DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS
+
+#undef UNIMPLEMENTED_INSTRUCTION_BREAK_CODE
+
+void LocationsBuilderARM64::HandleAddSub(HBinaryOperation* instr) {
+ DCHECK(instr->IsAdd() || instr->IsSub());
+ DCHECK_EQ(instr->InputCount(), 2U);
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
+ Primitive::Type type = instr->GetResultType();
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister());
+ break;
+ }
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ LOG(FATAL) << "Unexpected " << instr->DebugName() << " type " << type;
+ break;
+ default:
+ LOG(FATAL) << "Unimplemented " << instr->DebugName() << " type " << type;
+ }
+}
+
+void InstructionCodeGeneratorARM64::HandleAddSub(HBinaryOperation* instr) {
+ DCHECK(instr->IsAdd() || instr->IsSub());
+
+ Primitive::Type type = instr->GetType();
+ Register dst = OutputRegister(instr);
+ Register lhs = InputRegisterAt(instr, 0);
+ Operand rhs = InputOperandAt(instr, 1);
+
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ if (instr->IsAdd()) {
+ __ Add(dst, lhs, rhs);
+ } else {
+ __ Sub(dst, lhs, rhs);
+ }
+ break;
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ LOG(FATAL) << "Unexpected add/sub type " << type;
+ break;
+ default:
+ LOG(FATAL) << "Unimplemented add/sub type " << type;
+ }
+}
+
+void LocationsBuilderARM64::VisitAdd(HAdd* instruction) {
+ HandleAddSub(instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitAdd(HAdd* instruction) {
+ HandleAddSub(instruction);
+}
+
+void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction) {
+ __ Ldr(OutputRegister(instruction),
+ HeapOperand(InputRegisterAt(instruction, 0), mirror::Array::LengthOffset()));
+}
+
+void LocationsBuilderARM64::VisitCompare(HCompare* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitCompare(HCompare* instruction) {
+ Primitive::Type in_type = instruction->InputAt(0)->GetType();
+
+ DCHECK_EQ(in_type, Primitive::kPrimLong);
+ switch (in_type) {
+ case Primitive::kPrimLong: {
+ vixl::Label done;
+ Register result = OutputRegister(instruction);
+ Register left = InputRegisterAt(instruction, 0);
+ Operand right = InputOperandAt(instruction, 1);
+ __ Subs(result, left, right);
+ __ B(eq, &done);
+ __ Mov(result, 1);
+ __ Cneg(result, result, le);
+ __ Bind(&done);
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unimplemented compare type " << in_type;
+ }
+}
+
+void LocationsBuilderARM64::VisitCondition(HCondition* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
+ if (instruction->NeedsMaterialization()) {
+ locations->SetOut(Location::RequiresRegister());
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitCondition(HCondition* instruction) {
+ if (!instruction->NeedsMaterialization()) {
+ return;
+ }
+
+ LocationSummary* locations = instruction->GetLocations();
+ Register lhs = InputRegisterAt(instruction, 0);
+ Operand rhs = InputOperandAt(instruction, 1);
+ Register res = RegisterFrom(locations->Out(), instruction->GetType());
+ Condition cond = ARM64Condition(instruction->GetCondition());
+
+ __ Cmp(lhs, rhs);
+ __ Csel(res, vixl::Assembler::AppropriateZeroRegFor(res), Operand(1), InvertCondition(cond));
+}
+
+#define FOR_EACH_CONDITION_INSTRUCTION(M) \
+ M(Equal) \
+ M(NotEqual) \
+ M(LessThan) \
+ M(LessThanOrEqual) \
+ M(GreaterThan) \
+ M(GreaterThanOrEqual)
+#define DEFINE_CONDITION_VISITORS(Name) \
+void LocationsBuilderARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); } \
+void InstructionCodeGeneratorARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); }
+FOR_EACH_CONDITION_INSTRUCTION(DEFINE_CONDITION_VISITORS)
+#undef FOR_EACH_CONDITION_INSTRUCTION
+
+void LocationsBuilderARM64::VisitExit(HExit* exit) {
+ exit->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARM64::VisitExit(HExit* exit) {
+ if (kIsDebugBuild) {
+ down_cast<Arm64Assembler*>(GetAssembler())->Comment("Unreachable");
+ __ Brk(0); // TODO: Introduce special markers for such code locations.
+ }
+}
+
+void LocationsBuilderARM64::VisitGoto(HGoto* got) {
+ got->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARM64::VisitGoto(HGoto* got) {
+ HBasicBlock* successor = got->GetSuccessor();
+ // TODO: Support for suspend checks emission.
+ if (!codegen_->GoesToNextBlock(got->GetBlock(), successor)) {
+ __ B(codegen_->GetLabelOf(successor));
+ }
+}
+
+void LocationsBuilderARM64::VisitIf(HIf* if_instr) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
+ HInstruction* cond = if_instr->InputAt(0);
+ DCHECK(cond->IsCondition());
+ if (cond->AsCondition()->NeedsMaterialization()) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) {
+ HInstruction* cond = if_instr->InputAt(0);
+ DCHECK(cond->IsCondition());
+ HCondition* condition = cond->AsCondition();
+ vixl::Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
+ vixl::Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
+
+ // TODO: Support constant condition input in VisitIf.
+
+ if (condition->NeedsMaterialization()) {
+ // The condition instruction has been materialized, compare the output to 0.
+ Location cond_val = if_instr->GetLocations()->InAt(0);
+ DCHECK(cond_val.IsRegister());
+ __ Cbnz(InputRegisterAt(if_instr, 0), true_target);
+
+ } else {
+ // The condition instruction has not been materialized, use its inputs as
+ // the comparison and its condition as the branch condition.
+ Register lhs = InputRegisterAt(condition, 0);
+ Operand rhs = InputOperandAt(condition, 1);
+ Condition cond = ARM64Condition(condition->GetCondition());
+ if ((cond == eq || cond == ne) && rhs.IsImmediate() && (rhs.immediate() == 0)) {
+ if (cond == eq) {
+ __ Cbz(lhs, true_target);
+ } else {
+ __ Cbnz(lhs, true_target);
+ }
+ } else {
+ __ Cmp(lhs, rhs);
+ __ B(cond, true_target);
+ }
+ }
+
+ if (!codegen_->GoesToNextBlock(if_instr->GetBlock(), if_instr->IfFalseSuccessor())) {
+ __ B(false_target);
+ }
+}
+
+void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
+ Primitive::Type res_type = instruction->GetType();
+ Register res = OutputRegister(instruction);
+ Register obj = InputRegisterAt(instruction, 0);
+ uint32_t offset = instruction->GetFieldOffset().Uint32Value();
+
+ switch (res_type) {
+ case Primitive::kPrimBoolean: {
+ __ Ldrb(res, MemOperand(obj, offset));
+ break;
+ }
+ case Primitive::kPrimByte: {
+ __ Ldrsb(res, MemOperand(obj, offset));
+ break;
+ }
+ case Primitive::kPrimShort: {
+ __ Ldrsh(res, MemOperand(obj, offset));
+ break;
+ }
+ case Primitive::kPrimChar: {
+ __ Ldrh(res, MemOperand(obj, offset));
+ break;
+ }
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ case Primitive::kPrimLong: { // TODO: support volatile.
+ DCHECK(res.IsX() == (res_type == Primitive::kPrimLong));
+ __ Ldr(res, MemOperand(obj, offset));
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Unimplemented register res_type " << res_type;
+ break;
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable res_type " << res_type;
+ }
+}
+
+void LocationsBuilderARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
+ Register obj = InputRegisterAt(instruction, 0);
+ Register value = InputRegisterAt(instruction, 1);
+ Primitive::Type field_type = instruction->InputAt(1)->GetType();
+ uint32_t offset = instruction->GetFieldOffset().Uint32Value();
+
+ switch (field_type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte: {
+ __ Strb(value, MemOperand(obj, offset));
+ break;
+ }
+
+ case Primitive::kPrimShort:
+ case Primitive::kPrimChar: {
+ __ Strh(value, MemOperand(obj, offset));
+ break;
+ }
+
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ case Primitive::kPrimLong: {
+ DCHECK(value.IsX() == (field_type == Primitive::kPrimLong));
+ __ Str(value, MemOperand(obj, offset));
+
+ if (field_type == Primitive::kPrimNot) {
+ codegen_->MarkGCCard(obj, value);
+ }
+ break;
+ }
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Unimplemented register type " << field_type;
+ break;
+
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable type " << field_type;
+ }
+}
+
+void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderARM64::VisitInvokeStatic(HInvokeStatic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ HandleInvoke(invoke);
+}
+
+void LocationsBuilderARM64::HandleInvoke(HInvoke* invoke) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
+ locations->AddTemp(LocationFrom(x0));
+
+ InvokeDexCallingConventionVisitor calling_convention_visitor;
+ for (size_t i = 0; i < invoke->InputCount(); i++) {
+ HInstruction* input = invoke->InputAt(i);
+ locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType()));
+ }
+
+ Primitive::Type return_type = invoke->GetType();
+ if (return_type != Primitive::kPrimVoid) {
+ locations->SetOut(calling_convention_visitor.GetReturnLocation(return_type));
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitInvokeStatic(HInvokeStatic* invoke) {
+ Register temp = WRegisterFrom(invoke->GetLocations()->GetTemp(0));
+ // Make sure that ArtMethod* is passed in W0 as per the calling convention
+ DCHECK(temp.Is(w0));
+ size_t index_in_cache = mirror::Array::DataOffset(kHeapRefSize).SizeValue() +
+ invoke->GetIndexInDexCache() * kHeapRefSize;
+
+ // TODO: Implement all kinds of calls:
+ // 1) boot -> boot
+ // 2) app -> boot
+ // 3) app -> app
+ //
+ // Currently we implement the app -> app logic, which looks up in the resolve cache.
+
+ // temp = method;
+ __ Ldr(temp, MemOperand(sp, kCurrentMethodStackOffset));
+ // temp = temp->dex_cache_resolved_methods_;
+ __ Ldr(temp, MemOperand(temp.X(), mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
+ // temp = temp[index_in_cache];
+ __ Ldr(temp, MemOperand(temp.X(), index_in_cache));
+ // lr = temp->entry_point_from_quick_compiled_code_;
+ __ Ldr(lr, MemOperand(temp.X(), mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
+ // lr();
+ __ Blr(lr);
+
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ DCHECK(!codegen_->IsLeafMethod());
+}
+
+void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ LocationSummary* locations = invoke->GetLocations();
+ Location receiver = locations->InAt(0);
+ Register temp = XRegisterFrom(invoke->GetLocations()->GetTemp(0));
+ size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() +
+ invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
+ Offset class_offset = mirror::Object::ClassOffset();
+ Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset();
+
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ Ldr(temp.W(), MemOperand(sp, receiver.GetStackIndex()));
+ __ Ldr(temp.W(), MemOperand(temp, class_offset.SizeValue()));
+ } else {
+ DCHECK(receiver.IsRegister());
+ __ Ldr(temp.W(), HeapOperandFrom(receiver, Primitive::kPrimNot,
+ class_offset));
+ }
+ // temp = temp->GetMethodAt(method_offset);
+ __ Ldr(temp.W(), MemOperand(temp, method_offset));
+ // lr = temp->GetEntryPoint();
+ __ Ldr(lr, MemOperand(temp, entry_point.SizeValue()));
+ // lr();
+ __ Blr(lr);
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void LocationsBuilderARM64::VisitLoadLocal(HLoadLocal* load) {
+ load->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load) {
+ // Nothing to do, this is driven by the code generator.
+}
+
+void LocationsBuilderARM64::VisitLocal(HLocal* local) {
+ local->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARM64::VisitLocal(HLocal* local) {
+ DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
+}
+
+void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
+ locations->SetOut(Location::ConstantLocation(constant));
+}
+
+void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant) {
+ // Will be generated at use site.
+}
+
+void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(1)));
+ locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
+}
+
+void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt);
+ DCHECK(type_index.Is(w0));
+ Register current_method = RegisterFrom(locations->GetTemp(1), Primitive::kPrimNot);
+ DCHECK(current_method.Is(w1));
+ __ Ldr(current_method, MemOperand(sp, kCurrentMethodStackOffset));
+ __ Mov(type_index, instruction->GetTypeIndex());
+ __ Ldr(lr, MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocObjectWithAccessCheck).Int32Value()));
+ __ Blr(lr);
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+ DCHECK(!codegen_->IsLeafMethod());
+}
+
+void LocationsBuilderARM64::VisitNot(HNot* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitNot(HNot* instruction) {
+ switch (instruction->InputAt(0)->GetType()) {
+ case Primitive::kPrimBoolean:
+ __ Eor(OutputRegister(instruction), InputRegisterAt(instruction, 0), Operand(1));
+ break;
+
+ case Primitive::kPrimInt:
+ __ Mvn(OutputRegister(instruction), InputOperandAt(instruction, 0));
+ break;
+
+ case Primitive::kPrimLong:
+ LOG(FATAL) << "Not yet implemented type for not operation " << instruction->GetResultType();
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
+ }
+}
+
+void LocationsBuilderARM64::VisitNullCheck(HNullCheck* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitNullCheck(HNullCheck* instruction) {
+ SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM64(instruction);
+ codegen_->AddSlowPath(slow_path);
+
+ LocationSummary* locations = instruction->GetLocations();
+ Location obj = locations->InAt(0);
+ if (obj.IsRegister()) {
+ __ Cbz(RegisterFrom(obj, instruction->InputAt(0)->GetType()), slow_path->GetEntryLabel());
+ } else {
+ DCHECK(obj.IsConstant()) << obj;
+ DCHECK_EQ(obj.GetConstant()->AsIntConstant()->GetValue(), 0);
+ __ B(slow_path->GetEntryLabel());
+ }
+}
+
+void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
+ if (location.IsStackSlot()) {
+ location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
+ } else if (location.IsDoubleStackSlot()) {
+ location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
+ }
+ locations->SetOut(location);
+}
+
+void InstructionCodeGeneratorARM64::VisitParameterValue(HParameterValue* instruction) {
+ // Nothing to do, the parameter is already at its location.
+}
+
+void LocationsBuilderARM64::VisitPhi(HPhi* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
+ locations->SetInAt(i, Location::Any());
+ }
+ locations->SetOut(Location::Any());
+}
+
+void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction) {
+ LOG(FATAL) << "Unreachable";
+}
+
+void LocationsBuilderARM64::VisitReturn(HReturn* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
+ Primitive::Type return_type = instruction->InputAt(0)->GetType();
+
+ if (return_type == Primitive::kPrimFloat || return_type == Primitive::kPrimDouble) {
+ LOG(FATAL) << "Unimplemented return type " << return_type;
+ }
+
+ locations->SetInAt(0, LocationFrom(x0));
+}
+
+void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction) {
+ if (kIsDebugBuild) {
+ Primitive::Type type = instruction->InputAt(0)->GetType();
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ DCHECK(InputRegisterAt(instruction, 0).Is(w0));
+ break;
+
+ case Primitive::kPrimLong:
+ DCHECK(InputRegisterAt(instruction, 0).Is(x0));
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented return type " << type;
+ }
+ }
+ codegen_->GenerateFrameExit();
+ __ Br(lr);
+}
+
+void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) {
+ instruction->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction) {
+ codegen_->GenerateFrameExit();
+ __ Br(lr);
+}
+
+void LocationsBuilderARM64::VisitStoreLocal(HStoreLocal* store) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
+ Primitive::Type field_type = store->InputAt(1)->GetType();
+ switch (field_type) {
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
+ break;
+
+ case Primitive::kPrimLong:
+ locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented local type " << field_type;
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store) {
+}
+
+void LocationsBuilderARM64::VisitSub(HSub* instruction) {
+ HandleAddSub(instruction);
+}
+
+void InstructionCodeGeneratorARM64::VisitSub(HSub* instruction) {
+ HandleAddSub(instruction);
+}
+
+void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ if (instruction->HasUses()) {
+ locations->SetOut(Location::SameAsFirstInput());
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ BoundsCheckSlowPathARM64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64(
+ instruction, locations->InAt(0), locations->InAt(1));
+ codegen_->AddSlowPath(slow_path);
+
+ __ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1));
+ __ B(slow_path->GetEntryLabel(), hs);
+}
+
+void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
+}
+
+void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
+ // TODO: Improve support for suspend checks.
+ SuspendCheckSlowPathARM64* slow_path =
+ new (GetGraph()->GetArena()) SuspendCheckSlowPathARM64(instruction, nullptr);
+ codegen_->AddSlowPath(slow_path);
+
+ __ Subs(wSuspend, wSuspend, 1);
+ __ B(slow_path->GetEntryLabel(), le);
+ __ Bind(slow_path->GetReturnLabel());
+}
+
+void LocationsBuilderARM64::VisitTemporary(HTemporary* temp) {
+ temp->SetLocations(nullptr);
+}
+
+void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp) {
+ // Nothing to do, this is driven by the code generator.
+}
+
+} // namespace arm64
+} // namespace art
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
new file mode 100644
index 0000000000..a4003ffea5
--- /dev/null
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
+#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
+
+#include "code_generator.h"
+#include "nodes.h"
+#include "parallel_move_resolver.h"
+#include "utils/arm64/assembler_arm64.h"
+#include "a64/disasm-a64.h"
+#include "a64/macro-assembler-a64.h"
+#include "arch/arm64/quick_method_frame_info_arm64.h"
+
+namespace art {
+namespace arm64 {
+
+class CodeGeneratorARM64;
+
+static constexpr size_t kArm64WordSize = 8;
+static const vixl::Register kParameterCoreRegisters[] = {
+ vixl::x1, vixl::x2, vixl::x3, vixl::x4, vixl::x5, vixl::x6, vixl::x7
+};
+static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
+static const vixl::FPRegister kParameterFPRegisters[] = {
+ vixl::d0, vixl::d1, vixl::d2, vixl::d3, vixl::d4, vixl::d5, vixl::d6, vixl::d7
+};
+static constexpr size_t kParameterFPRegistersLength = arraysize(kParameterFPRegisters);
+
+const vixl::Register tr = vixl::x18; // Thread Register
+const vixl::Register wSuspend = vixl::w19; // Suspend Register
+const vixl::Register xSuspend = vixl::x19;
+
+const vixl::CPURegList vixl_reserved_core_registers(vixl::ip0, vixl::ip1);
+const vixl::CPURegList runtime_reserved_core_registers(tr, xSuspend, vixl::lr);
+const vixl::CPURegList quick_callee_saved_registers(vixl::CPURegister::kRegister,
+ vixl::kXRegSize,
+ kArm64CalleeSaveRefSpills);
+
+class InvokeDexCallingConvention : public CallingConvention<vixl::Register, vixl::FPRegister> {
+ public:
+ InvokeDexCallingConvention()
+ : CallingConvention(kParameterCoreRegisters,
+ kParameterCoreRegistersLength,
+ kParameterFPRegisters,
+ kParameterFPRegistersLength) {}
+
+ Location GetReturnLocation(Primitive::Type return_type) {
+ DCHECK_NE(return_type, Primitive::kPrimVoid);
+ if (return_type == Primitive::kPrimFloat || return_type == Primitive::kPrimDouble) {
+ LOG(FATAL) << "Unimplemented return type " << return_type;
+ }
+ return Location::RegisterLocation(X0);
+ }
+
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
+};
+
+class InvokeDexCallingConventionVisitor {
+ public:
+ InvokeDexCallingConventionVisitor() : gp_index_(0), stack_index_(0) {}
+
+ Location GetNextLocation(Primitive::Type type);
+ Location GetReturnLocation(Primitive::Type return_type) {
+ return calling_convention.GetReturnLocation(return_type);
+ }
+
+ private:
+ InvokeDexCallingConvention calling_convention;
+ // The current index for core registers.
+ uint32_t gp_index_;
+ // The current stack index.
+ uint32_t stack_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitor);
+};
+
+class InstructionCodeGeneratorARM64 : public HGraphVisitor {
+ public:
+ InstructionCodeGeneratorARM64(HGraph* graph, CodeGeneratorARM64* codegen);
+
+#define DECLARE_VISIT_INSTRUCTION(name, super) \
+ virtual void Visit##name(H##name* instr);
+ FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+#undef DECLARE_VISIT_INSTRUCTION
+
+ void LoadCurrentMethod(XRegister reg);
+
+ Arm64Assembler* GetAssembler() const { return assembler_; }
+
+ private:
+ void HandleAddSub(HBinaryOperation* instr);
+
+ Arm64Assembler* const assembler_;
+ CodeGeneratorARM64* const codegen_;
+
+ DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorARM64);
+};
+
+class LocationsBuilderARM64 : public HGraphVisitor {
+ public:
+ explicit LocationsBuilderARM64(HGraph* graph, CodeGeneratorARM64* codegen)
+ : HGraphVisitor(graph), codegen_(codegen) {}
+
+#define DECLARE_VISIT_INSTRUCTION(name, super) \
+ virtual void Visit##name(H##name* instr);
+ FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+#undef DECLARE_VISIT_INSTRUCTION
+
+ private:
+ void HandleAddSub(HBinaryOperation* instr);
+ void HandleInvoke(HInvoke* instr);
+
+ CodeGeneratorARM64* const codegen_;
+ InvokeDexCallingConventionVisitor parameter_visitor_;
+
+ DISALLOW_COPY_AND_ASSIGN(LocationsBuilderARM64);
+};
+
+class CodeGeneratorARM64 : public CodeGenerator {
+ public:
+ explicit CodeGeneratorARM64(HGraph* graph);
+ virtual ~CodeGeneratorARM64() { }
+
+ virtual void GenerateFrameEntry() OVERRIDE;
+ virtual void GenerateFrameExit() OVERRIDE;
+
+ static const vixl::CPURegList& GetFramePreservedRegisters() {
+ static const vixl::CPURegList frame_preserved_regs =
+ vixl::CPURegList(vixl::CPURegister::kRegister, vixl::kXRegSize, vixl::lr.Bit());
+ return frame_preserved_regs;
+ }
+ static int GetFramePreservedRegistersSize() {
+ return GetFramePreservedRegisters().TotalSizeInBytes();
+ }
+
+ virtual void Bind(HBasicBlock* block) OVERRIDE;
+
+ vixl::Label* GetLabelOf(HBasicBlock* block) const {
+ return block_labels_ + block->GetBlockId();
+ }
+
+ virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
+
+ virtual size_t GetWordSize() const OVERRIDE {
+ return kArm64WordSize;
+ }
+
+ virtual size_t FrameEntrySpillSize() const OVERRIDE;
+
+ virtual HGraphVisitor* GetLocationBuilder() OVERRIDE { return &location_builder_; }
+ virtual HGraphVisitor* GetInstructionVisitor() OVERRIDE { return &instruction_visitor_; }
+ virtual Arm64Assembler* GetAssembler() OVERRIDE { return &assembler_; }
+
+ // Emit a write barrier.
+ void MarkGCCard(vixl::Register object, vixl::Register value);
+
+ // Register allocation.
+
+ virtual void SetupBlockedRegisters() const OVERRIDE;
+ // AllocateFreeRegister() is only used when allocating registers locally
+ // during CompileBaseline().
+ virtual Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+
+ virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
+
+ virtual size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE {
+ UNIMPLEMENTED(INFO) << "TODO: SaveCoreRegister";
+ return 0;
+ }
+
+ virtual size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE {
+ UNIMPLEMENTED(INFO) << "TODO: RestoreCoreRegister";
+ return 0;
+ }
+
+ // The number of registers that can be allocated. The register allocator may
+ // decide to reserve and not use a few of them.
+ // We do not consider registers sp, xzr, wzr. They are either not allocatable
+ // (xzr, wzr), or make for poor allocatable registers (sp alignment
+ // requirements, etc.). This also facilitates our task as all other registers
+ // can easily be mapped via to or from their type and index or code.
+ static const int kNumberOfAllocatableCoreRegisters = vixl::kNumberOfRegisters - 1;
+ static const int kNumberOfAllocatableFloatingPointRegisters = vixl::kNumberOfFPRegisters;
+ static const int kNumberOfAllocatableRegisters =
+ kNumberOfAllocatableCoreRegisters + kNumberOfAllocatableFloatingPointRegisters;
+ static constexpr int kNumberOfAllocatableRegisterPairs = 0;
+
+ virtual void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
+ virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
+
+ virtual InstructionSet GetInstructionSet() const OVERRIDE {
+ return InstructionSet::kArm64;
+ }
+
+ void MoveHelper(Location destination, Location source, Primitive::Type type);
+
+ virtual void Initialize() OVERRIDE {
+ HGraph* graph = GetGraph();
+ int length = graph->GetBlocks().Size();
+ block_labels_ = graph->GetArena()->AllocArray<vixl::Label>(length);
+ for (int i = 0; i < length; ++i) {
+ new(block_labels_ + i) vixl::Label();
+ }
+ }
+
+ private:
+ // Labels for each block that will be compiled.
+ vixl::Label* block_labels_;
+
+ LocationsBuilderARM64 location_builder_;
+ InstructionCodeGeneratorARM64 instruction_visitor_;
+ Arm64Assembler assembler_;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARM64);
+};
+
+} // namespace arm64
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_ARM64_H_
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 5f01265b85..61f0750c5c 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1056,16 +1056,13 @@ void InstructionCodeGeneratorX86::VisitAdd(HAdd* add) {
LocationSummary* locations = add->GetLocations();
Location first = locations->InAt(0);
Location second = locations->InAt(1);
-
+ DCHECK(first.Equals(locations->Out()));
switch (add->GetResultType()) {
case Primitive::kPrimInt: {
- DCHECK_EQ(first.As<Register>(), locations->Out().As<Register>());
if (second.IsRegister()) {
__ addl(first.As<Register>(), second.As<Register>());
} else if (second.IsConstant()) {
- HConstant* instruction = second.GetConstant();
- Immediate imm(instruction->AsIntConstant()->GetValue());
- __ addl(first.As<Register>(), imm);
+ __ addl(first.As<Register>(), Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
} else {
__ addl(first.As<Register>(), Address(ESP, second.GetStackIndex()));
}
@@ -1073,10 +1070,6 @@ void InstructionCodeGeneratorX86::VisitAdd(HAdd* add) {
}
case Primitive::kPrimLong: {
- DCHECK_EQ(first.AsRegisterPairLow<Register>(),
- locations->Out().AsRegisterPairLow<Register>());
- DCHECK_EQ(first.AsRegisterPairHigh<Register>(),
- locations->Out().AsRegisterPairHigh<Register>());
if (second.IsRegister()) {
__ addl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
__ adcl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
@@ -1122,16 +1115,16 @@ void LocationsBuilderX86::VisitSub(HSub* sub) {
locations->SetOut(Location::SameAsFirstInput());
break;
}
-
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::SameAsFirstInput());
break;
+ }
default:
- LOG(FATAL) << "Unimplemented sub type " << sub->GetResultType();
+ LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
}
}
@@ -1139,52 +1132,43 @@ void InstructionCodeGeneratorX86::VisitSub(HSub* sub) {
LocationSummary* locations = sub->GetLocations();
Location first = locations->InAt(0);
Location second = locations->InAt(1);
+ DCHECK(first.Equals(locations->Out()));
switch (sub->GetResultType()) {
case Primitive::kPrimInt: {
- DCHECK_EQ(first.As<Register>(),
- locations->Out().As<Register>());
if (second.IsRegister()) {
- __ subl(first.As<Register>(),
- second.As<Register>());
+ __ subl(first.As<Register>(), second.As<Register>());
} else if (second.IsConstant()) {
- HConstant* instruction = second.GetConstant();
- Immediate imm(instruction->AsIntConstant()->GetValue());
- __ subl(first.As<Register>(), imm);
+ __ subl(first.As<Register>(), Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
} else {
- __ subl(first.As<Register>(),
- Address(ESP, second.GetStackIndex()));
+ __ subl(first.As<Register>(), Address(ESP, second.GetStackIndex()));
}
break;
}
case Primitive::kPrimLong: {
- DCHECK_EQ(first.AsRegisterPairLow<Register>(),
- locations->Out().AsRegisterPairLow<Register>());
- DCHECK_EQ(first.AsRegisterPairHigh<Register>(),
- locations->Out().AsRegisterPairHigh<Register>());
if (second.IsRegister()) {
- __ subl(first.AsRegisterPairLow<Register>(),
- second.AsRegisterPairLow<Register>());
- __ sbbl(first.AsRegisterPairHigh<Register>(),
- second.AsRegisterPairHigh<Register>());
+ __ subl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
+ __ sbbl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
} else {
- __ subl(first.AsRegisterPairLow<Register>(),
- Address(ESP, second.GetStackIndex()));
+ __ subl(first.AsRegisterPairLow<Register>(), Address(ESP, second.GetStackIndex()));
__ sbbl(first.AsRegisterPairHigh<Register>(),
Address(ESP, second.GetHighStackIndex(kX86WordSize)));
}
break;
}
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
+ case Primitive::kPrimFloat: {
+ __ subss(first.As<XmmRegister>(), second.As<XmmRegister>());
break;
+ }
+
+ case Primitive::kPrimDouble: {
+ __ subsd(first.As<XmmRegister>(), second.As<XmmRegister>());
+ break;
+ }
default:
- LOG(FATAL) << "Unimplemented sub type " << sub->GetResultType();
+ LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
}
}
@@ -1356,18 +1340,34 @@ void LocationsBuilderX86::VisitParameterValue(HParameterValue* instruction) {
void InstructionCodeGeneratorX86::VisitParameterValue(HParameterValue* instruction) {
}
-void LocationsBuilderX86::VisitNot(HNot* instruction) {
+void LocationsBuilderX86::VisitNot(HNot* not_) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
-void InstructionCodeGeneratorX86::VisitNot(HNot* instruction) {
- LocationSummary* locations = instruction->GetLocations();
+void InstructionCodeGeneratorX86::VisitNot(HNot* not_) {
+ LocationSummary* locations = not_->GetLocations();
+ DCHECK_EQ(locations->InAt(0).As<Register>(), locations->Out().As<Register>());
Location out = locations->Out();
DCHECK_EQ(locations->InAt(0).As<Register>(), out.As<Register>());
- __ xorl(out.As<Register>(), Immediate(1));
+ switch (not_->InputAt(0)->GetType()) {
+ case Primitive::kPrimBoolean:
+ __ xorl(out.As<Register>(), Immediate(1));
+ break;
+
+ case Primitive::kPrimInt:
+ __ notl(out.As<Register>());
+ break;
+
+ case Primitive::kPrimLong:
+ LOG(FATAL) << "Not yet implemented type for not operation " << not_->GetResultType();
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType();
+ }
}
void LocationsBuilderX86::VisitCompare(HCompare* compare) {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 38a40dc7de..4a05b89892 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1047,19 +1047,17 @@ void InstructionCodeGeneratorX86_64::VisitAdd(HAdd* add) {
LocationSummary* locations = add->GetLocations();
Location first = locations->InAt(0);
Location second = locations->InAt(1);
-
DCHECK(first.Equals(locations->Out()));
+
switch (add->GetResultType()) {
case Primitive::kPrimInt: {
if (second.IsRegister()) {
__ addl(first.As<CpuRegister>(), second.As<CpuRegister>());
} else if (second.IsConstant()) {
- HConstant* instruction = second.GetConstant();
- Immediate imm(instruction->AsIntConstant()->GetValue());
+ Immediate imm(second.GetConstant()->AsIntConstant()->GetValue());
__ addl(first.As<CpuRegister>(), imm);
} else {
- __ addl(first.As<CpuRegister>(),
- Address(CpuRegister(RSP), second.GetStackIndex()));
+ __ addl(first.As<CpuRegister>(), Address(CpuRegister(RSP), second.GetStackIndex()));
}
break;
}
@@ -1100,53 +1098,52 @@ void LocationsBuilderX86_64::VisitSub(HSub* sub) {
locations->SetOut(Location::SameAsFirstInput());
break;
}
-
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble: {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::SameAsFirstInput());
break;
-
+ }
default:
- LOG(FATAL) << "Unimplemented sub type " << sub->GetResultType();
+ LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
}
}
void InstructionCodeGeneratorX86_64::VisitSub(HSub* sub) {
LocationSummary* locations = sub->GetLocations();
- DCHECK_EQ(locations->InAt(0).As<CpuRegister>().AsRegister(),
- locations->Out().As<CpuRegister>().AsRegister());
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ DCHECK(first.Equals(locations->Out()));
switch (sub->GetResultType()) {
case Primitive::kPrimInt: {
- if (locations->InAt(1).IsRegister()) {
- __ subl(locations->InAt(0).As<CpuRegister>(),
- locations->InAt(1).As<CpuRegister>());
- } else if (locations->InAt(1).IsConstant()) {
- HConstant* instruction = locations->InAt(1).GetConstant();
- Immediate imm(instruction->AsIntConstant()->GetValue());
- __ subl(locations->InAt(0).As<CpuRegister>(), imm);
+ if (second.IsRegister()) {
+ __ subl(first.As<CpuRegister>(), second.As<CpuRegister>());
+ } else if (second.IsConstant()) {
+ Immediate imm(second.GetConstant()->AsIntConstant()->GetValue());
+ __ subl(first.As<CpuRegister>(), imm);
} else {
- __ subl(locations->InAt(0).As<CpuRegister>(),
- Address(CpuRegister(RSP), locations->InAt(1).GetStackIndex()));
+ __ subl(first.As<CpuRegister>(), Address(CpuRegister(RSP), second.GetStackIndex()));
}
break;
}
case Primitive::kPrimLong: {
- __ subq(locations->InAt(0).As<CpuRegister>(),
- locations->InAt(1).As<CpuRegister>());
+ __ subq(first.As<CpuRegister>(), second.As<CpuRegister>());
break;
}
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
+ case Primitive::kPrimFloat: {
+ __ subss(first.As<XmmRegister>(), second.As<XmmRegister>());
break;
+ }
+
+ case Primitive::kPrimDouble: {
+ __ subsd(first.As<XmmRegister>(), second.As<XmmRegister>());
+ break;
+ }
default:
- LOG(FATAL) << "Unimplemented sub type " << sub->GetResultType();
+ LOG(FATAL) << "Unexpected sub type " << sub->GetResultType();
}
}
@@ -1276,18 +1273,34 @@ void InstructionCodeGeneratorX86_64::VisitParameterValue(HParameterValue* instru
// Nothing to do, the parameter is already at its location.
}
-void LocationsBuilderX86_64::VisitNot(HNot* instruction) {
+void LocationsBuilderX86_64::VisitNot(HNot* not_) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ new (GetGraph()->GetArena()) LocationSummary(not_, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
}
-void InstructionCodeGeneratorX86_64::VisitNot(HNot* instruction) {
- LocationSummary* locations = instruction->GetLocations();
+void InstructionCodeGeneratorX86_64::VisitNot(HNot* not_) {
+ LocationSummary* locations = not_->GetLocations();
DCHECK_EQ(locations->InAt(0).As<CpuRegister>().AsRegister(),
locations->Out().As<CpuRegister>().AsRegister());
- __ xorq(locations->Out().As<CpuRegister>(), Immediate(1));
+ Location out = locations->Out();
+ switch (not_->InputAt(0)->GetType()) {
+ case Primitive::kPrimBoolean:
+ __ xorq(out.As<CpuRegister>(), Immediate(1));
+ break;
+
+ case Primitive::kPrimInt:
+ __ notl(out.As<CpuRegister>());
+ break;
+
+ case Primitive::kPrimLong:
+ LOG(FATAL) << "Not yet implemented type for not operation " << not_->GetResultType();
+ break;
+
+ default:
+ LOG(FATAL) << "Unimplemented type for not operation " << not_->GetResultType();
+ }
}
void LocationsBuilderX86_64::VisitPhi(HPhi* instruction) {
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index af4cf73867..03951e29dd 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -16,8 +16,10 @@
#include <functional>
+#include "base/macros.h"
#include "builder.h"
#include "code_generator_arm.h"
+#include "code_generator_arm64.h"
#include "code_generator_x86.h"
#include "code_generator_x86_64.h"
#include "common_compiler_test.h"
@@ -93,6 +95,12 @@ static void RunCodeBaseline(HGraph* graph, bool has_result, int32_t expected) {
if (kRuntimeISA == kX86_64) {
Run(allocator, codegenX86_64, has_result, expected);
}
+
+ arm64::CodeGeneratorARM64 codegenARM64(graph);
+ codegenARM64.CompileBaseline(&allocator, true);
+ if (kRuntimeISA == kArm64) {
+ Run(allocator, codegenARM64, has_result, expected);
+ }
}
static void RunCodeOptimized(CodeGenerator* codegen,
@@ -134,8 +142,8 @@ static void TestCode(const uint16_t* data, bool has_result = false, int32_t expe
HGraphBuilder builder(&arena);
const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
HGraph* graph = builder.BuildGraph(*item);
- // Remove suspend checks, they cannot be executed in this context.
ASSERT_NE(graph, nullptr);
+ // Remove suspend checks, they cannot be executed in this context.
RemoveSuspendChecks(graph);
RunCodeBaseline(graph, has_result, expected);
}
@@ -260,6 +268,31 @@ TEST(CodegenTest, ReturnIf2) {
TestCode(data, true, 0);
}
+// Exercise bit-wise (one's complement) not-int instruction.
+#define NOT_INT_TEST(TEST_NAME, INPUT, EXPECTED_OUTPUT) \
+TEST(CodegenTest, TEST_NAME) { \
+ const int32_t input = INPUT; \
+ const uint16_t input_lo = input & 0x0000FFFF; \
+ const uint16_t input_hi = input >> 16; \
+ const uint16_t data[] = TWO_REGISTERS_CODE_ITEM( \
+ Instruction::CONST | 0 << 8, input_lo, input_hi, \
+ Instruction::NOT_INT | 1 << 8 | 0 << 12 , \
+ Instruction::RETURN | 1 << 8); \
+ \
+ TestCode(data, true, EXPECTED_OUTPUT); \
+}
+
+NOT_INT_TEST(ReturnNotIntMinus2, -2, 1)
+NOT_INT_TEST(ReturnNotIntMinus1, -1, 0)
+NOT_INT_TEST(ReturnNotInt0, 0, -1)
+NOT_INT_TEST(ReturnNotInt1, 1, -2)
+NOT_INT_TEST(ReturnNotIntINT_MIN, -2147483648, 2147483647) // (2^31) - 1
+NOT_INT_TEST(ReturnNotIntINT_MINPlus1, -2147483647, 2147483646) // (2^31) - 2
+NOT_INT_TEST(ReturnNotIntINT_MAXMinus1, 2147483646, -2147483647) // -(2^31) - 1
+NOT_INT_TEST(ReturnNotIntINT_MAX, 2147483647, -2147483648) // -(2^31)
+
+#undef NOT_INT_TEST
+
TEST(CodegenTest, ReturnAdd1) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
@@ -370,12 +403,16 @@ TEST(CodegenTest, NonMaterializedCondition) {
TestCode(data, true, 12); \
}
+#if !defined(__aarch64__)
MUL_TEST(INT, MulInt);
MUL_TEST(LONG, MulLong);
-// MUL_TEST(FLOAT, Float);
-// MUL_TEST(DOUBLE, Double);
+#endif
+#if defined(__aarch64__)
+TEST(CodegenTest, DISABLED_ReturnMulIntLit8) {
+#else
TEST(CodegenTest, ReturnMulIntLit8) {
+#endif
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::MUL_INT_LIT8, 3 << 8 | 0,
@@ -384,7 +421,11 @@ TEST(CodegenTest, ReturnMulIntLit8) {
TestCode(data, true, 12);
}
+#if defined(__aarch64__)
+TEST(CodegenTest, DISABLED_ReturnMulIntLit16) {
+#else
TEST(CodegenTest, ReturnMulIntLit16) {
+#endif
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::MUL_INT_LIT16, 3,
@@ -393,5 +434,121 @@ TEST(CodegenTest, ReturnMulIntLit16) {
TestCode(data, true, 12);
}
+TEST(CodegenTest, MaterializedCondition1) {
+ // Check that condition are materialized correctly. A materialized condition
+ // should yield `1` if it evaluated to true, and `0` otherwise.
+ // We force the materialization of comparisons for different combinations of
+ // inputs and check the results.
+
+ int lhs[] = {1, 2, -1, 2, 0xabc};
+ int rhs[] = {2, 1, 2, -1, 0xabc};
+
+ for (size_t i = 0; i < arraysize(lhs); i++) {
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HGraph* graph = new (&allocator) HGraph(&allocator);
+
+ HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(entry_block);
+ graph->SetEntryBlock(entry_block);
+ entry_block->AddInstruction(new (&allocator) HGoto());
+ HBasicBlock* code_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(code_block);
+ HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(exit_block);
+ exit_block->AddInstruction(new (&allocator) HExit());
+
+ entry_block->AddSuccessor(code_block);
+ code_block->AddSuccessor(exit_block);
+ graph->SetExitBlock(exit_block);
+
+ HIntConstant cst_lhs(lhs[i]);
+ code_block->AddInstruction(&cst_lhs);
+ HIntConstant cst_rhs(rhs[i]);
+ code_block->AddInstruction(&cst_rhs);
+ HLessThan cmp_lt(&cst_lhs, &cst_rhs);
+ code_block->AddInstruction(&cmp_lt);
+ HReturn ret(&cmp_lt);
+ code_block->AddInstruction(&ret);
+
+ auto hook_before_codegen = [](HGraph* graph) {
+ HBasicBlock* block = graph->GetEntryBlock()->GetSuccessors().Get(0);
+ HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+ block->InsertInstructionBefore(move, block->GetLastInstruction());
+ };
+
+ RunCodeOptimized(graph, hook_before_codegen, true, lhs[i] < rhs[i]);
+ }
+}
+
+TEST(CodegenTest, MaterializedCondition2) {
+ // Check that HIf correctly interprets a materialized condition.
+ // We force the materialization of comparisons for different combinations of
+ // inputs. An HIf takes the materialized combination as input and returns a
+ // value that we verify.
+
+ int lhs[] = {1, 2, -1, 2, 0xabc};
+ int rhs[] = {2, 1, 2, -1, 0xabc};
+
+
+ for (size_t i = 0; i < arraysize(lhs); i++) {
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HGraph* graph = new (&allocator) HGraph(&allocator);
+
+ HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(entry_block);
+ graph->SetEntryBlock(entry_block);
+ entry_block->AddInstruction(new (&allocator) HGoto());
+
+ HBasicBlock* if_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(if_block);
+ HBasicBlock* if_true_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(if_true_block);
+ HBasicBlock* if_false_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(if_false_block);
+ HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(exit_block);
+ exit_block->AddInstruction(new (&allocator) HExit());
+
+ graph->SetEntryBlock(entry_block);
+ entry_block->AddSuccessor(if_block);
+ if_block->AddSuccessor(if_true_block);
+ if_block->AddSuccessor(if_false_block);
+ if_true_block->AddSuccessor(exit_block);
+ if_false_block->AddSuccessor(exit_block);
+ graph->SetExitBlock(exit_block);
+
+ HIntConstant cst_lhs(lhs[i]);
+ if_block->AddInstruction(&cst_lhs);
+ HIntConstant cst_rhs(rhs[i]);
+ if_block->AddInstruction(&cst_rhs);
+ HLessThan cmp_lt(&cst_lhs, &cst_rhs);
+ if_block->AddInstruction(&cmp_lt);
+ // We insert a temporary to separate the HIf from the HLessThan and force
+ // the materialization of the condition.
+ HTemporary force_materialization(0);
+ if_block->AddInstruction(&force_materialization);
+ HIf if_lt(&cmp_lt);
+ if_block->AddInstruction(&if_lt);
+
+ HIntConstant cst_lt(1);
+ if_true_block->AddInstruction(&cst_lt);
+ HReturn ret_lt(&cst_lt);
+ if_true_block->AddInstruction(&ret_lt);
+ HIntConstant cst_ge(0);
+ if_false_block->AddInstruction(&cst_ge);
+ HReturn ret_ge(&cst_ge);
+ if_false_block->AddInstruction(&ret_ge);
+
+ auto hook_before_codegen = [](HGraph* graph) {
+ HBasicBlock* block = graph->GetEntryBlock()->GetSuccessors().Get(0);
+ HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+ block->InsertInstructionBefore(move, block->GetLastInstruction());
+ };
+
+ RunCodeOptimized(graph, hook_before_codegen, true, lhs[i] < rhs[i]);
+ }
+}
} // namespace art
diff --git a/compiler/optimizing/graph_visualizer.h b/compiler/optimizing/graph_visualizer.h
index 05984a080e..4d8bec2422 100644
--- a/compiler/optimizing/graph_visualizer.h
+++ b/compiler/optimizing/graph_visualizer.h
@@ -17,6 +17,8 @@
#ifndef ART_COMPILER_OPTIMIZING_GRAPH_VISUALIZER_H_
#define ART_COMPILER_OPTIMIZING_GRAPH_VISUALIZER_H_
+#include <ostream>
+
#include "base/value_object.h"
namespace art {
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 2d9e35c3b6..29eabe7e29 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -50,7 +50,7 @@ void InstructionSimplifier::VisitEqual(HEqual* equal) {
// Replace (bool_value == 0) with !bool_value
DCHECK_EQ(input2->AsIntConstant()->GetValue(), 0);
equal->GetBlock()->ReplaceAndRemoveInstructionWith(
- equal, new (GetGraph()->GetArena()) HNot(input1));
+ equal, new (GetGraph()->GetArena()) HNot(Primitive::kPrimBoolean, input1));
}
}
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 9b7ff88b68..7adb84008a 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -485,7 +485,7 @@ class HBasicBlock : public ArenaObject {
M(Local, Instruction) \
M(LongConstant, Constant) \
M(NewInstance, Instruction) \
- M(Not, Instruction) \
+ M(Not, UnaryOperation) \
M(ParameterValue, Instruction) \
M(ParallelMove, Instruction) \
M(Phi, Instruction) \
@@ -1708,15 +1708,17 @@ class HParameterValue : public HExpression<0> {
DISALLOW_COPY_AND_ASSIGN(HParameterValue);
};
-class HNot : public HExpression<1> {
+class HNot : public HUnaryOperation {
public:
- explicit HNot(HInstruction* input) : HExpression(Primitive::kPrimBoolean, SideEffects::None()) {
- SetRawInputAt(0, input);
- }
+ explicit HNot(Primitive::Type result_type, HInstruction* input)
+ : HUnaryOperation(result_type, input) {}
virtual bool CanBeMoved() const { return true; }
virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual int32_t Evaluate(int32_t x) const OVERRIDE { return ~x; }
+ virtual int64_t Evaluate(int64_t x) const OVERRIDE { return ~x; }
+
DECLARE_INSTRUCTION(Not);
private:
@@ -2123,7 +2125,7 @@ class HGraphVisitor : public ValueObject {
#undef DECLARE_VISIT_INSTRUCTION
private:
- HGraph* graph_;
+ HGraph* const graph_;
DISALLOW_COPY_AND_ASSIGN(HGraphVisitor);
};
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index dce8e6d78b..80e9cdb16f 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -215,7 +215,10 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
}
// Do not attempt to compile on architectures we do not support.
- if (instruction_set != kX86 && instruction_set != kX86_64 && instruction_set != kThumb2) {
+ if (instruction_set != kArm64 &&
+ instruction_set != kThumb2 &&
+ instruction_set != kX86 &&
+ instruction_set != kX86_64) {
return nullptr;
}
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 8f718480b3..7dda4f61d5 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -32,6 +32,7 @@ class BlockInfo : public ArenaObject {
live_in_(allocator, number_of_ssa_values, false),
live_out_(allocator, number_of_ssa_values, false),
kill_(allocator, number_of_ssa_values, false) {
+ UNUSED(block_);
live_in_.ClearAllBits();
live_out_.ClearAllBits();
kill_.ClearAllBits();
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index c144991cfc..1b1d121725 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -219,12 +219,13 @@ class Arm64Assembler FINAL : public Assembler {
void AddConstant(XRegister rd, int32_t value, vixl::Condition cond = vixl::al);
void AddConstant(XRegister rd, XRegister rn, int32_t value, vixl::Condition cond = vixl::al);
- // Vixl assembler.
- vixl::MacroAssembler* const vixl_masm_;
-
// List of exception blocks to generate at the end of the code cache.
std::vector<Arm64Exception*> exception_blocks_;
+ public:
+ // Vixl assembler.
+ vixl::MacroAssembler* const vixl_masm_;
+
// Used for testing.
friend class Arm64ManagedRegister_VixlRegisters_Test;
};
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 91b8d8ab9a..2b0c94c9e0 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -118,6 +118,7 @@ class Label {
friend class arm::ArmAssembler;
friend class arm::Arm32Assembler;
friend class arm::Thumb2Assembler;
+ friend class arm64::Arm64Assembler;
friend class mips::MipsAssembler;
friend class x86::X86Assembler;
friend class x86_64::X86_64Assembler;
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index 5bfa462d79..91237ae910 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -24,7 +24,6 @@
#include <cstdio>
#include <cstdlib>
#include <fstream>
-#include <iostream>
#include <iterator>
#include <sys/stat.h>
@@ -118,9 +117,8 @@ class AssemblerTest : public testing::Test {
std::vector<int64_t> imms = CreateImmediateValues(imm_bytes);
for (auto reg : registers) {
for (int64_t imm : imms) {
- Imm* new_imm = CreateImmediate(imm);
- (assembler_.get()->*f)(*reg, *new_imm);
- delete new_imm;
+ Imm new_imm = CreateImmediate(imm);
+ (assembler_.get()->*f)(*reg, new_imm);
std::string base = fmt;
size_t reg_index = base.find("{reg}");
@@ -154,9 +152,8 @@ class AssemblerTest : public testing::Test {
std::string str;
std::vector<int64_t> imms = CreateImmediateValues(imm_bytes);
for (int64_t imm : imms) {
- Imm* new_imm = CreateImmediate(imm);
- (assembler_.get()->*f)(*new_imm);
- delete new_imm;
+ Imm new_imm = CreateImmediate(imm);
+ (assembler_.get()->*f)(new_imm);
std::string base = fmt;
size_t imm_index = base.find("{imm}");
@@ -333,7 +330,7 @@ class AssemblerTest : public testing::Test {
}
// Create an immediate from the specific value.
- virtual Imm* CreateImmediate(int64_t imm_value) = 0;
+ virtual Imm CreateImmediate(int64_t imm_value) = 0;
private:
// Driver() assembles and compares the results. If the results are not equal and we have a
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index c7eada34f5..b5bf31bbd6 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -29,7 +29,7 @@
namespace art {
namespace x86 {
-class Immediate {
+class Immediate : public ValueObject {
public:
explicit Immediate(int32_t value) : value_(value) {}
@@ -47,7 +47,7 @@ class Immediate {
};
-class Operand {
+class Operand : public ValueObject {
public:
uint8_t mod() const {
return (encoding_at(0) >> 6) & 3;
@@ -129,8 +129,6 @@ class Operand {
}
friend class X86Assembler;
-
- DISALLOW_COPY_AND_ASSIGN(Operand);
};
@@ -168,7 +166,6 @@ class Address : public Operand {
}
}
-
Address(Register index, ScaleFactor scale, int32_t disp) {
CHECK_NE(index, ESP); // Illegal addressing mode.
SetModRM(0, ESP);
@@ -205,14 +202,12 @@ class Address : public Operand {
private:
Address() {}
-
- DISALLOW_COPY_AND_ASSIGN(Address);
};
class X86Assembler FINAL : public Assembler {
public:
- explicit X86Assembler() {}
+ explicit X86Assembler() : cfi_cfa_offset_(0), cfi_pc_(0) {}
virtual ~X86Assembler() {}
/*
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 7e5859cc45..92b81ec2e7 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -36,7 +36,7 @@ namespace x86_64 {
//
// Note: As we support cross-compilation, the value type must be int64_t. Please be aware of
// conversion rules in expressions regarding negation, especially size_t on 32b.
-class Immediate {
+class Immediate : public ValueObject {
public:
explicit Immediate(int64_t value) : value_(value) {}
@@ -54,12 +54,10 @@ class Immediate {
private:
const int64_t value_;
-
- DISALLOW_COPY_AND_ASSIGN(Immediate);
};
-class Operand {
+class Operand : public ValueObject {
public:
uint8_t mod() const {
return (encoding_at(0) >> 6) & 3;
@@ -157,8 +155,6 @@ class Operand {
}
friend class X86_64Assembler;
-
- DISALLOW_COPY_AND_ASSIGN(Operand);
};
@@ -247,8 +243,6 @@ class Address : public Operand {
private:
Address() {}
-
- DISALLOW_COPY_AND_ASSIGN(Address);
};
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 37a09328fa..18c5cbcbc6 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -72,8 +72,8 @@ class AssemblerX86_64Test : public AssemblerTest<x86_64::X86_64Assembler, x86_64
return registers_;
}
- x86_64::Immediate* CreateImmediate(int64_t imm_value) OVERRIDE {
- return new x86_64::Immediate(imm_value);
+ x86_64::Immediate CreateImmediate(int64_t imm_value) OVERRIDE {
+ return x86_64::Immediate(imm_value);
}
private:
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 977774043c..98712cd9cc 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -30,6 +30,9 @@
#include <sys/utsname.h>
#endif
+#define ATRACE_TAG ATRACE_TAG_DALVIK
+#include "cutils/trace.h"
+
#include "base/dumpable.h"
#include "base/stl_util.h"
#include "base/stringpiece.h"
@@ -1238,8 +1241,7 @@ static int dex2oat(int argc, char** argv) {
#ifdef ART_SEA_IR_MODE
true,
#endif
- verbose_methods.empty() ? nullptr : &verbose_methods
- )); // NOLINT(whitespace/parens)
+ verbose_methods.empty() ? nullptr : &verbose_methods));
// Done with usage checks, enable watchdog if requested
WatchDog watch_dog(watch_dog_enabled);
diff --git a/disassembler/disassembler.cc b/disassembler/disassembler.cc
index c97bf64c5d..bf68204d75 100644
--- a/disassembler/disassembler.cc
+++ b/disassembler/disassembler.cc
@@ -16,7 +16,7 @@
#include "disassembler.h"
-#include <iostream>
+#include <ostream>
#include "base/logging.h"
#include "base/stringprintf.h"
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index 9caae9c48b..ee652b34f7 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -18,7 +18,7 @@
#include <inttypes.h>
-#include <iostream>
+#include <ostream>
#include <sstream>
#include "base/logging.h"
diff --git a/disassembler/disassembler_arm64.cc b/disassembler/disassembler_arm64.cc
index fc1065aff9..229ac9755f 100644
--- a/disassembler/disassembler_arm64.cc
+++ b/disassembler/disassembler_arm64.cc
@@ -18,7 +18,7 @@
#include <inttypes.h>
-#include <iostream>
+#include <ostream>
#include "base/logging.h"
#include "base/stringprintf.h"
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index c06492a3fe..97c06f178d 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -16,7 +16,7 @@
#include "disassembler_mips.h"
-#include <iostream>
+#include <ostream>
#include <sstream>
#include "base/logging.h"
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index ce14520225..e12559f010 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -16,13 +16,14 @@
#include "disassembler_x86.h"
-#include <iostream>
+#include <inttypes.h>
+
+#include <ostream>
#include <sstream>
#include "base/logging.h"
#include "base/stringprintf.h"
#include "thread.h"
-#include <inttypes.h>
namespace art {
namespace x86 {
diff --git a/runtime/arch/arm64/context_arm64.h b/runtime/arch/arm64/context_arm64.h
index 7b6aac9d6c..d9a433bbce 100644
--- a/runtime/arch/arm64/context_arm64.h
+++ b/runtime/arch/arm64/context_arm64.h
@@ -34,7 +34,7 @@ class Arm64Context : public Context {
void Reset() OVERRIDE;
- void FillCalleeSaves(const StackVisitor& fr) OVERRIDE;
+ void FillCalleeSaves(const StackVisitor& fr) OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetSP(uintptr_t new_sp) OVERRIDE {
bool success = SetGPR(SP, new_sp);
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 0fb96d7a9d..147d4348be 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1285,7 +1285,7 @@ TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_I
THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
-THREE_ARG_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
+THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
// This is separated out as the argument order is different.
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index ea586b8543..c5a0f6c231 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -1148,7 +1148,7 @@ TEST_F(StubTest, AllocObjectArray) {
// For some reason this does not work, as the type_idx is artificial and outside what the
// resolved types of c_obj allow...
- if (false) {
+ if ((false)) {
// Use an arbitrary method from c to use as referrer
size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex()), // type_idx
reinterpret_cast<size_t>(c_obj->GetVirtualMethod(0)), // arbitrary
diff --git a/runtime/base/casts.h b/runtime/base/casts.h
index be94c2eb78..138c2fda80 100644
--- a/runtime/base/casts.h
+++ b/runtime/base/casts.h
@@ -19,6 +19,8 @@
#include <assert.h>
#include <string.h>
+#include <type_traits>
+
#include "base/macros.h"
namespace art {
@@ -65,16 +67,9 @@ inline To implicit_cast(From const &f) {
template<typename To, typename From> // use like this: down_cast<T*>(foo);
inline To down_cast(From* f) { // so we only accept pointers
- // Ensures that To is a sub-type of From *. This test is here only
- // for compile-time type checking, and has no overhead in an
- // optimized build at run-time, as it will be optimized away
- // completely.
- if (false) {
- implicit_cast<From*, To>(0);
- }
+ static_assert(std::is_base_of<From, typename std::remove_pointer<To>::type>::value,
+ "down_cast unsafe as To is not a subtype of From");
- //
- // assert(f == NULL || dynamic_cast<To>(f) != NULL); // RTTI: debug mode only!
return static_cast<To>(f);
}
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index 5e8e994020..baa83e35af 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -17,8 +17,8 @@
#ifndef ART_RUNTIME_BASE_LOGGING_H_
#define ART_RUNTIME_BASE_LOGGING_H_
-#include <iostream>
#include <memory>
+#include <ostream>
#include "base/macros.h"
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index c80d35e42b..febea61b8d 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -68,22 +68,28 @@ struct CompileAssert {
#define ART_FRIEND_TEST(test_set_name, individual_test)\
friend class test_set_name##_##individual_test##_Test
-// DISALLOW_COPY_AND_ASSIGN disallows the copy and operator= functions.
-// It goes in the private: declarations in a class.
+// DISALLOW_COPY_AND_ASSIGN disallows the copy and operator= functions. It goes in the private:
+// declarations in a class.
#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
- TypeName(const TypeName&); \
- void operator=(const TypeName&)
+ TypeName(const TypeName&) = delete; \
+ void operator=(const TypeName&) = delete
-// A macro to disallow all the implicit constructors, namely the
-// default constructor, copy constructor and operator= functions.
+// A macro to disallow all the implicit constructors, namely the default constructor, copy
+// constructor and operator= functions.
//
-// This should be used in the private: declarations for a class
-// that wants to prevent anyone from instantiating it. This is
-// especially useful for classes containing only static methods.
+// This should be used in the private: declarations for a class that wants to prevent anyone from
+// instantiating it. This is especially useful for classes containing only static methods.
#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
- TypeName(); \
+ TypeName() = delete; \
DISALLOW_COPY_AND_ASSIGN(TypeName)
+// A macro to disallow new and delete operators for a class. It goes in the private: declarations.
+#define DISALLOW_ALLOCATION() \
+ public: \
+ ALWAYS_INLINE void operator delete(void*, size_t) { UNREACHABLE(); } \
+ private: \
+ void* operator new(size_t) = delete
+
// The arraysize(arr) macro returns the # of elements in an array arr.
// The expression is a compile-time constant, and therefore can be
// used in defining new arrays, for example. If you use arraysize on
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index f70db35f1c..e066787bfd 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -21,11 +21,8 @@
#include "mutex.h"
-#define ATRACE_TAG ATRACE_TAG_DALVIK
-
-#include "cutils/trace.h"
-
#include "base/stringprintf.h"
+#include "base/value_object.h"
#include "runtime.h"
#include "thread.h"
@@ -44,35 +41,6 @@ static inline int futex(volatile int *uaddr, int op, int val, const struct times
}
#endif // ART_USE_FUTEXES
-class ScopedContentionRecorder {
- public:
- ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
- : mutex_(kLogLockContentions ? mutex : NULL),
- blocked_tid_(kLogLockContentions ? blocked_tid : 0),
- owner_tid_(kLogLockContentions ? owner_tid : 0),
- start_nano_time_(kLogLockContentions ? NanoTime() : 0) {
- if (ATRACE_ENABLED()) {
- std::string msg = StringPrintf("Lock contention on %s (owner tid: %" PRIu64 ")",
- mutex->GetName(), owner_tid);
- ATRACE_BEGIN(msg.c_str());
- }
- }
-
- ~ScopedContentionRecorder() {
- ATRACE_END();
- if (kLogLockContentions) {
- uint64_t end_nano_time = NanoTime();
- mutex_->RecordContention(blocked_tid_, owner_tid_, end_nano_time - start_nano_time_);
- }
- }
-
- private:
- BaseMutex* const mutex_;
- const uint64_t blocked_tid_;
- const uint64_t owner_tid_;
- const uint64_t start_nano_time_;
-};
-
static inline uint64_t SafeGetTid(const Thread* self) {
if (self != NULL) {
return static_cast<uint64_t>(self->GetTid());
@@ -158,15 +126,7 @@ inline void ReaderWriterMutex::SharedLock(Thread* self) {
// Add as an extra reader.
done = state_.CompareExchangeWeakAcquire(cur_state, cur_state + 1);
} else {
- // Owner holds it exclusively, hang up.
- ScopedContentionRecorder scr(this, GetExclusiveOwnerTid(), SafeGetTid(self));
- ++num_pending_readers_;
- if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
- if (errno != EAGAIN) {
- PLOG(FATAL) << "futex wait failed for " << name_;
- }
- }
- --num_pending_readers_;
+ HandleSharedLockContention(self, cur_state);
}
} while (!done);
#else
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 1d373497eb..423ea77da9 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -19,8 +19,12 @@
#include <errno.h>
#include <sys/time.h>
+#define ATRACE_TAG ATRACE_TAG_DALVIK
+#include "cutils/trace.h"
+
#include "atomic.h"
#include "base/logging.h"
+#include "base/value_object.h"
#include "mutex-inl.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
@@ -106,6 +110,36 @@ class ScopedAllMutexesLock FINAL {
const BaseMutex* const mutex_;
};
+// Scoped class that generates events at the beginning and end of lock contention.
+class ScopedContentionRecorder FINAL : public ValueObject {
+ public:
+ ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
+ : mutex_(kLogLockContentions ? mutex : NULL),
+ blocked_tid_(kLogLockContentions ? blocked_tid : 0),
+ owner_tid_(kLogLockContentions ? owner_tid : 0),
+ start_nano_time_(kLogLockContentions ? NanoTime() : 0) {
+ if (ATRACE_ENABLED()) {
+ std::string msg = StringPrintf("Lock contention on %s (owner tid: %" PRIu64 ")",
+ mutex->GetName(), owner_tid);
+ ATRACE_BEGIN(msg.c_str());
+ }
+ }
+
+ ~ScopedContentionRecorder() {
+ ATRACE_END();
+ if (kLogLockContentions) {
+ uint64_t end_nano_time = NanoTime();
+ mutex_->RecordContention(blocked_tid_, owner_tid_, end_nano_time - start_nano_time_);
+ }
+ }
+
+ private:
+ BaseMutex* const mutex_;
+ const uint64_t blocked_tid_;
+ const uint64_t owner_tid_;
+ const uint64_t start_nano_time_;
+};
+
BaseMutex::BaseMutex(const char* name, LockLevel level) : level_(level), name_(name) {
if (kLogLockContentions) {
ScopedAllMutexesLock mu(this);
@@ -612,6 +646,20 @@ bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32
}
#endif
+#if ART_USE_FUTEXES
+void ReaderWriterMutex::HandleSharedLockContention(Thread* self, int32_t cur_state) {
+ // Owner holds it exclusively, hang up.
+ ScopedContentionRecorder scr(this, GetExclusiveOwnerTid(), SafeGetTid(self));
+ ++num_pending_readers_;
+ if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
+ if (errno != EAGAIN) {
+ PLOG(FATAL) << "futex wait failed for " << name_;
+ }
+ }
+ --num_pending_readers_;
+}
+#endif
+
bool ReaderWriterMutex::SharedTryLock(Thread* self) {
DCHECK(self == NULL || self == Thread::Current());
#if ART_USE_FUTEXES
@@ -680,7 +728,7 @@ ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
CHECK_MUTEX_CALL(pthread_condattr_init, (&cond_attrs));
#if !defined(__APPLE__)
// Apple doesn't have CLOCK_MONOTONIC or pthread_condattr_setclock.
- CHECK_MUTEX_CALL(pthread_condattr_setclock(&cond_attrs, CLOCK_MONOTONIC));
+ CHECK_MUTEX_CALL(pthread_condattr_setclock, (&cond_attrs, CLOCK_MONOTONIC));
#endif
CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, &cond_attrs));
#endif
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 516fa07b66..628231a273 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -361,6 +361,9 @@ class LOCKABLE ReaderWriterMutex : public BaseMutex {
private:
#if ART_USE_FUTEXES
+ // Out-of-inline path for handling contention for a SharedLock.
+ void HandleSharedLockContention(Thread* self, int32_t cur_state);
+
// -1 implies held exclusive, +ve shared held by state_ many owners.
AtomicInteger state_;
// Exclusive owner. Modification guarded by this mutex.
diff --git a/runtime/base/stringpiece.cc b/runtime/base/stringpiece.cc
index 824ee4863d..2570bad85d 100644
--- a/runtime/base/stringpiece.cc
+++ b/runtime/base/stringpiece.cc
@@ -16,7 +16,7 @@
#include "stringpiece.h"
-#include <iostream>
+#include <ostream>
#include <utility>
#include "logging.h"
diff --git a/runtime/base/value_object.h b/runtime/base/value_object.h
index ee0e2a0dcc..8c752a923d 100644
--- a/runtime/base/value_object.h
+++ b/runtime/base/value_object.h
@@ -17,19 +17,13 @@
#ifndef ART_RUNTIME_BASE_VALUE_OBJECT_H_
#define ART_RUNTIME_BASE_VALUE_OBJECT_H_
-#include "base/logging.h"
+#include "base/macros.h"
namespace art {
class ValueObject {
- public:
- void* operator new(size_t size) {
- LOG(FATAL) << "UNREACHABLE";
- abort();
- }
- void operator delete(void*, size_t) {
- LOG(FATAL) << "UNREACHABLE";
- }
+ private:
+ DISALLOW_ALLOCATION();
};
} // namespace art
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index 8c2293f869..9d2d59c9a4 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -37,7 +37,7 @@ class CheckReferenceMapVisitor : public StackVisitor {
CHECK_EQ(GetDexPc(), DexFile::kDexNoIndex);
}
- if (!m || m->IsNative() || m->IsRuntimeMethod() || IsShadowFrame()) {
+ if (m == nullptr || m->IsNative() || m->IsRuntimeMethod() || IsShadowFrame()) {
return true;
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index bbbb9e0b81..f6717fb006 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -17,6 +17,7 @@
#include "class_linker.h"
#include <deque>
+#include <iostream>
#include <memory>
#include <queue>
#include <string>
@@ -5110,7 +5111,7 @@ bool ClassLinker::LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_
bool seen_non_ref = false;
for (size_t i = 0; i < num_fields; i++) {
mirror::ArtField* field = fields->Get(i);
- if (false) { // enable to debug field layout
+ if ((false)) { // enable to debug field layout
LOG(INFO) << "LinkFields: " << (is_static ? "static" : "instance")
<< " class=" << PrettyClass(klass.Get())
<< " field=" << PrettyField(field)
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index b676c62ee1..18bbc3813b 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -4116,7 +4116,6 @@ class HeapChunkContext {
HeapChunkContext(bool merge, bool native)
: buf_(16384 - 16),
type_(0),
- merge_(merge),
chunk_overhead_(0) {
Reset();
if (native) {
@@ -4327,7 +4326,6 @@ class HeapChunkContext {
void* startOfNextMemoryChunk_;
size_t totalAllocationUnits_;
uint32_t type_;
- bool merge_;
bool needHeader_;
size_t chunk_overhead_;
@@ -4678,7 +4676,7 @@ static const char* GetMethodSourceFile(mirror::ArtMethod* method)
* between the contents of these tables.
*/
jbyteArray Dbg::GetRecentAllocations() {
- if (false) {
+ if ((false)) {
DumpRecentAllocations();
}
diff --git a/runtime/dex_instruction_visitor_test.cc b/runtime/dex_instruction_visitor_test.cc
index c5e63eb06e..5273084a9a 100644
--- a/runtime/dex_instruction_visitor_test.cc
+++ b/runtime/dex_instruction_visitor_test.cc
@@ -16,7 +16,6 @@
#include "dex_instruction_visitor.h"
-#include <iostream>
#include <memory>
#include "gtest/gtest.h"
diff --git a/runtime/dex_method_iterator_test.cc b/runtime/dex_method_iterator_test.cc
index b2b7138080..c6f333f675 100644
--- a/runtime/dex_method_iterator_test.cc
+++ b/runtime/dex_method_iterator_test.cc
@@ -38,7 +38,7 @@ TEST_F(DexMethodIteratorTest, Basic) {
const DexFile& dex_file = it.GetDexFile();
InvokeType invoke_type = it.GetInvokeType();
uint32_t method_idx = it.GetMemberIndex();
- if (false) {
+ if ((false)) {
LOG(INFO) << invoke_type << " " << PrettyMethod(method_idx, dex_file);
}
it.Next();
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 804c98a99f..fa531a7c60 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -1144,7 +1144,7 @@ static constexpr bool kReadPageMapEntryWithoutLockInBulkFree = true;
size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
size_t freed_bytes = 0;
- if (false) {
+ if ((false)) {
// Used only to test Free() as GC uses only BulkFree().
for (size_t i = 0; i < num_ptrs; ++i) {
freed_bytes += FreeInternal(self, ptrs[i]);
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 4148e9c0b9..9e6a8003f4 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -18,6 +18,9 @@
#include "garbage_collector.h"
+#define ATRACE_TAG ATRACE_TAG_DALVIK
+#include "cutils/trace.h"
+
#include "base/dumpable.h"
#include "base/histogram-inl.h"
#include "base/logging.h"
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 942b556a7e..83da0639ea 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -21,6 +21,9 @@
#include <climits>
#include <vector>
+#define ATRACE_TAG ATRACE_TAG_DALVIK
+#include "cutils/trace.h"
+
#include "base/bounded_fifo.h"
#include "base/logging.h"
#include "base/macros.h"
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 2892857dda..bceac4403d 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -2138,6 +2138,13 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
} else {
LOG(FATAL) << "Invalid current allocator " << current_allocator_;
}
+ if (IsGcConcurrent()) {
+ // Disable concurrent GC check so that we don't have spammy JNI requests.
+ // This gets recalculated in GrowForUtilization. It is important that it is disabled /
+ // calculated in the same thread so that there aren't any races that can cause it to become
+ // permanantly disabled. b/17942071
+ concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
+ }
CHECK(collector != nullptr)
<< "Could not find garbage collector with collector_type="
<< static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
@@ -2956,9 +2963,6 @@ void Heap::RequestConcurrentGC(Thread* self) {
self->IsHandlingStackOverflow()) {
return;
}
- // We already have a request pending, no reason to start more until we update
- // concurrent_start_bytes_.
- concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
JNIEnv* env = self->GetJniEnv();
DCHECK(WellKnownClasses::java_lang_Daemons != nullptr);
DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != nullptr);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index ba85c55c96..ff1e38b91a 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -601,9 +601,6 @@ class Heap {
void RemoveRememberedSet(space::Space* space);
bool IsCompilingBoot() const;
- bool RunningOnValgrind() const {
- return running_on_valgrind_;
- }
bool HasImageSpace() const;
ReferenceProcessor* GetReferenceProcessor() {
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index 7230116106..cfde460034 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -19,7 +19,7 @@
#include "space.h"
-#include <iostream>
+#include <ostream>
#include <valgrind.h>
#include <memcheck/memcheck.h>
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index d25694ad24..161eba9c1d 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -17,6 +17,9 @@
#include "rosalloc_space-inl.h"
+#define ATRACE_TAG ATRACE_TAG_DALVIK
+#include "cutils/trace.h"
+
#include "gc/accounting/card_table.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
@@ -73,8 +76,9 @@ RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap* mem_map, const std::strin
uint8_t* begin = mem_map->Begin();
// TODO: Fix RosAllocSpace to support valgrind. There is currently some issues with
// AllocationSize caused by redzones. b/12944686
- if (false && Runtime::Current()->GetHeap()->RunningOnValgrind()) {
- LOG(FATAL) << "Unimplemented";
+ if (Runtime::Current()->RunningOnValgrind()) {
+ UNIMPLEMENTED(FATAL);
+ UNREACHABLE();
} else {
return new RosAllocSpace(name, mem_map, rosalloc, begin, end, begin + capacity, growth_limit,
can_move_objects, starting_size, initial_size, low_memory_mode);
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index c1455fd0bc..4d177a32d8 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -126,7 +126,7 @@ IndirectRef IndirectReferenceTable::Add(uint32_t cookie, mirror::Object* obj) {
}
table_[index].Add(obj);
result = ToIndirectRef(index);
- if (false) {
+ if ((false)) {
LOG(INFO) << "+++ added at " << ExtractIndex(result) << " top=" << segment_state_.parts.topIndex
<< " holes=" << segment_state_.parts.numHoles;
}
@@ -193,7 +193,7 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) {
int numHoles = segment_state_.parts.numHoles - prevState.parts.numHoles;
if (numHoles != 0) {
while (--topIndex > bottomIndex && numHoles != 0) {
- if (false) {
+ if ((false)) {
LOG(INFO) << "+++ checking for hole at " << topIndex - 1
<< " (cookie=" << cookie << ") val="
<< table_[topIndex - 1].GetReference()->Read<kWithoutReadBarrier>();
@@ -201,7 +201,7 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) {
if (!table_[topIndex - 1].GetReference()->IsNull()) {
break;
}
- if (false) {
+ if ((false)) {
LOG(INFO) << "+++ ate hole at " << (topIndex - 1);
}
numHoles--;
@@ -210,7 +210,7 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) {
segment_state_.parts.topIndex = topIndex;
} else {
segment_state_.parts.topIndex = topIndex-1;
- if (false) {
+ if ((false)) {
LOG(INFO) << "+++ ate last entry " << topIndex - 1;
}
}
@@ -228,7 +228,7 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) {
*table_[idx].GetReference() = GcRoot<mirror::Object>(nullptr);
segment_state_.parts.numHoles++;
- if (false) {
+ if ((false)) {
LOG(INFO) << "+++ left hole at " << idx << ", holes=" << segment_state_.parts.numHoles;
}
}
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index f8858d09fe..fa03fc7fa9 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -21,6 +21,7 @@
#include <math.h>
+#include <iostream>
#include <sstream>
#include "base/logging.h"
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 1bbcf8ef1c..b2b24209c7 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -478,6 +478,7 @@ class MANAGED LOCKABLE Object {
friend struct art::ObjectOffsets; // for verifying offset information
friend class CopyObjectVisitor; // for CopyObject().
friend class CopyClassVisitor; // for CopyObject().
+ DISALLOW_ALLOCATION();
DISALLOW_IMPLICIT_CONSTRUCTORS(Object);
};
diff --git a/runtime/native_bridge_art_interface.cc b/runtime/native_bridge_art_interface.cc
index bc191b4289..b0d8e87a90 100644
--- a/runtime/native_bridge_art_interface.cc
+++ b/runtime/native_bridge_art_interface.cc
@@ -107,10 +107,11 @@ static android::NativeBridgeRuntimeCallbacks native_bridge_art_callbacks_ {
GetMethodShorty, GetNativeMethodCount, GetNativeMethods
};
-void LoadNativeBridge(std::string& native_bridge_library_filename) {
- android::LoadNativeBridge(native_bridge_library_filename.c_str(), &native_bridge_art_callbacks_);
+bool LoadNativeBridge(std::string& native_bridge_library_filename) {
VLOG(startup) << "Runtime::Setup native bridge library: "
<< (native_bridge_library_filename.empty() ? "(empty)" : native_bridge_library_filename);
+ return android::LoadNativeBridge(native_bridge_library_filename.c_str(),
+ &native_bridge_art_callbacks_);
}
void PreInitializeNativeBridge(std::string dir) {
diff --git a/runtime/native_bridge_art_interface.h b/runtime/native_bridge_art_interface.h
index 026cd82c15..090cddb9b6 100644
--- a/runtime/native_bridge_art_interface.h
+++ b/runtime/native_bridge_art_interface.h
@@ -26,7 +26,7 @@ namespace art {
// Mirror libnativebridge interface. Done to have the ART callbacks out of line, and not require
// the system/core header file in other files.
-void LoadNativeBridge(std::string& native_bridge_library_filename);
+bool LoadNativeBridge(std::string& native_bridge_library_filename);
// This is mostly for testing purposes, as in a full system this is called by Zygote code.
void PreInitializeNativeBridge(std::string dir);
diff --git a/runtime/offsets.cc b/runtime/offsets.cc
index 369140176e..f59ed881af 100644
--- a/runtime/offsets.cc
+++ b/runtime/offsets.cc
@@ -16,7 +16,7 @@
#include "offsets.h"
-#include <iostream> // NOLINT
+#include <ostream>
namespace art {
diff --git a/runtime/offsets.h b/runtime/offsets.h
index 72a6b0f31f..9d5063f3ee 100644
--- a/runtime/offsets.h
+++ b/runtime/offsets.h
@@ -17,7 +17,8 @@
#ifndef ART_RUNTIME_OFFSETS_H_
#define ART_RUNTIME_OFFSETS_H_
-#include <iostream> // NOLINT
+#include <ostream>
+
#include "globals.h"
namespace art {
diff --git a/runtime/profiler.cc b/runtime/profiler.cc
index 1d06d352a9..e399195008 100644
--- a/runtime/profiler.cc
+++ b/runtime/profiler.cc
@@ -97,7 +97,7 @@ static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mu
switch (profile_options.GetProfileType()) {
case kProfilerMethod: {
mirror::ArtMethod* method = thread->GetCurrentMethod(nullptr);
- if (false && method == nullptr) {
+ if ((false) && method == nullptr) {
LOG(INFO) << "No current method available";
std::ostringstream os;
thread->Dump(os);
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 8e578374c0..c58735a94e 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -300,7 +300,7 @@ class InstrumentationStackVisitor : public StackVisitor {
InstrumentationStackVisitor(Thread* self, bool is_deoptimization, size_t frame_depth)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: StackVisitor(self, nullptr),
- self_(self), frame_depth_(frame_depth),
+ frame_depth_(frame_depth),
instrumentation_frames_to_pop_(0) {
CHECK_NE(frame_depth_, kInvalidFrameDepth);
}
@@ -324,7 +324,6 @@ class InstrumentationStackVisitor : public StackVisitor {
}
private:
- Thread* const self_;
const size_t frame_depth_;
size_t instrumentation_frames_to_pop_;
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index da513080d5..8ba098f5d9 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -147,11 +147,15 @@ Runtime::Runtime()
target_sdk_version_(0),
implicit_null_checks_(false),
implicit_so_checks_(false),
- implicit_suspend_checks_(false) {
+ implicit_suspend_checks_(false),
+ is_native_bridge_loaded_(false) {
CheckAsmSupportOffsetsAndSizes();
}
Runtime::~Runtime() {
+ if (is_native_bridge_loaded_) {
+ UnloadNativeBridge();
+ }
if (dump_gc_performance_on_shutdown_) {
// This can't be called from the Heap destructor below because it
// could call RosAlloc::InspectAll() which needs the thread_list
@@ -431,12 +435,11 @@ bool Runtime::Start() {
return false;
}
} else {
- bool have_native_bridge = !native_bridge_library_filename_.empty();
- if (have_native_bridge) {
+ if (is_native_bridge_loaded_) {
PreInitializeNativeBridge(".");
}
- DidForkFromZygote(self->GetJniEnv(), have_native_bridge ? NativeBridgeAction::kInitialize :
- NativeBridgeAction::kUnload, GetInstructionSetString(kRuntimeISA));
+ DidForkFromZygote(self->GetJniEnv(), NativeBridgeAction::kInitialize,
+ GetInstructionSetString(kRuntimeISA));
}
StartDaemonThreads();
@@ -515,14 +518,17 @@ bool Runtime::InitZygote() {
void Runtime::DidForkFromZygote(JNIEnv* env, NativeBridgeAction action, const char* isa) {
is_zygote_ = false;
- switch (action) {
- case NativeBridgeAction::kUnload:
- UnloadNativeBridge();
- break;
+ if (is_native_bridge_loaded_) {
+ switch (action) {
+ case NativeBridgeAction::kUnload:
+ UnloadNativeBridge();
+ is_native_bridge_loaded_ = false;
+ break;
- case NativeBridgeAction::kInitialize:
- InitializeNativeBridge(env, isa);
- break;
+ case NativeBridgeAction::kInitialize:
+ InitializeNativeBridge(env, isa);
+ break;
+ }
}
// Create the thread pool.
@@ -890,8 +896,7 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
// Runtime::Start():
// DidForkFromZygote(kInitialize) -> try to initialize any native bridge given.
// No-op wrt native bridge.
- native_bridge_library_filename_ = options->native_bridge_library_filename_;
- LoadNativeBridge(native_bridge_library_filename_);
+ is_native_bridge_loaded_ = LoadNativeBridge(options->native_bridge_library_filename_);
VLOG(startup) << "Runtime::Init exiting";
return true;
diff --git a/runtime/runtime.h b/runtime/runtime.h
index f3bea17d6d..bfa7d720cb 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -635,14 +635,16 @@ class Runtime {
bool implicit_so_checks_; // StackOverflow checks are implicit.
bool implicit_suspend_checks_; // Thread suspension checks are implicit.
- // The filename to the native bridge library. If this is not empty the native bridge will be
- // initialized and loaded from the given file (initialized and available). An empty value means
- // that there's no native bridge (initialized but not available).
+ // Whether or not a native bridge has been loaded.
//
// The native bridge allows running native code compiled for a foreign ISA. The way it works is,
// if standard dlopen fails to load native library associated with native activity, it calls to
// the native bridge to load it and then gets the trampoline for the entry to native activity.
- std::string native_bridge_library_filename_;
+ //
+ // The option 'native_bridge_library_filename' specifies the name of the native bridge.
+ // When non-empty the native bridge will be loaded from the given file. An empty value means
+ // that there's no native bridge.
+ bool is_native_bridge_loaded_;
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index 6d6783678f..d4ec80372d 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -133,7 +133,7 @@ void SignalCatcher::HandleSigQuit() {
runtime->DumpForSigQuit(os);
- if (false) {
+ if ((false)) {
std::string maps;
if (ReadFileToString("/proc/self/maps", &maps)) {
os << "/proc/self/maps:\n" << maps;
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 91a37fddaf..b3158a48a6 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -18,6 +18,9 @@
#include <sys/uio.h>
+#define ATRACE_TAG ATRACE_TAG_DALVIK
+#include "cutils/trace.h"
+
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
diff --git a/sigchainlib/sigchain_dummy.cc b/sigchainlib/sigchain_dummy.cc
index fbc8c3fb5c..17bfe8f729 100644
--- a/sigchainlib/sigchain_dummy.cc
+++ b/sigchainlib/sigchain_dummy.cc
@@ -14,6 +14,9 @@
* limitations under the License.
*/
+#include <stdio.h>
+#include <stdlib.h>
+
#ifdef HAVE_ANDROID_OS
#include <android/log.h>
#else
@@ -21,8 +24,6 @@
#include <iostream>
#endif
-#include <stdlib.h>
-
#include "sigchain.h"
static void log(const char* format, ...) {
diff --git a/test/115-native-bridge/run b/test/115-native-bridge/run
index e475cd6b89..32a9975c4b 100644
--- a/test/115-native-bridge/run
+++ b/test/115-native-bridge/run
@@ -18,9 +18,9 @@ ARGS=${@}
# Use libnativebridgetest as a native bridge, start NativeBridgeMain (Main is JniTest main file).
LIBPATH=$(echo ${ARGS} | sed -r 's/.*Djava.library.path=([^ ]*) .*/\1/')
-cp ${LIBPATH}/libnativebridgetest.so .
+ln -s ${LIBPATH}/libnativebridgetest.so .
touch libarttest.so
-cp ${LIBPATH}/libarttest.so libarttest2.so
+ln -s ${LIBPATH}/libarttest.so libarttest2.so
# pwd likely has /, so it's a pain to put that into a sed rule.
LEFT=$(echo ${ARGS} | sed -r 's/-Djava.library.path.*//')
diff --git a/test/414-optimizing-arith-sub/expected.txt b/test/414-optimizing-arith-sub/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/414-optimizing-arith-sub/expected.txt
diff --git a/test/414-optimizing-arith-sub/info.txt b/test/414-optimizing-arith-sub/info.txt
new file mode 100644
index 0000000000..1eaa14887b
--- /dev/null
+++ b/test/414-optimizing-arith-sub/info.txt
@@ -0,0 +1 @@
+Subtraction tests.
diff --git a/test/414-optimizing-arith-sub/src/Main.java b/test/414-optimizing-arith-sub/src/Main.java
new file mode 100644
index 0000000000..30e84368d0
--- /dev/null
+++ b/test/414-optimizing-arith-sub/src/Main.java
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Note that $opt$ is a marker for the optimizing compiler to ensure
+// it does compile the method.
+public class Main {
+
+ public static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(float expected, float result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectEquals(double expected, double result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void expectApproxEquals(float a, float b) {
+ float maxDelta = 0.0001F;
+ boolean aproxEquals = (a > b) ? ((a - b) < maxDelta) : ((b - a) < maxDelta);
+ if (!aproxEquals) {
+ throw new Error("Expected: " + a + ", found: " + b + ", with delta: " + maxDelta + " " + (a - b));
+ }
+ }
+
+ public static void expectApproxEquals(double a, double b) {
+ double maxDelta = 0.00001D;
+ boolean aproxEquals = (a > b) ? ((a - b) < maxDelta) : ((b - a) < maxDelta);
+ if (!aproxEquals) {
+ throw new Error("Expected: " + a + ", found: " + b + ", with delta: " + maxDelta + " " + (a - b));
+ }
+ }
+
+ public static void expectNaN(float a) {
+ if (a == a) {
+ throw new Error("Expected NaN: " + a);
+ }
+ }
+
+ public static void expectNaN(double a) {
+ if (a == a) {
+ throw new Error("Expected NaN: " + a);
+ }
+ }
+
+ public static void main(String[] args) {
+ subInt();
+ subLong();
+ subFloat();
+ subDouble();
+ }
+
+ private static void subInt() {
+ expectEquals(2, $opt$Sub(5, 3));
+ expectEquals(0, $opt$Sub(0, 0));
+ expectEquals(-3, $opt$Sub(0, 3));
+ expectEquals(3, $opt$Sub(3, 0));
+ expectEquals(4, $opt$Sub(1, -3));
+ expectEquals(-9, $opt$Sub(-12, -3));
+ expectEquals(134217724, $opt$Sub(134217729, 5)); // (2^27 + 1) - 5
+ }
+
+ private static void subLong() {
+ expectEquals(2L, $opt$Sub(5L, 3L));
+ expectEquals(0L, $opt$Sub(0L, 0L));
+ expectEquals(-3L, $opt$Sub(0L, 3L));
+ expectEquals(3L, $opt$Sub(3L, 0L));
+ expectEquals(4L, $opt$Sub(1L, -3L));
+ expectEquals(-9L, $opt$Sub(-12L, -3L));
+ expectEquals(134217724L, $opt$Sub(134217729L, 5L)); // (2^27 + 1) - 5
+ expectEquals(34359738362L, $opt$Sub(34359738369L, 7L)); // (2^35 + 1) - 7
+ }
+
+ private static void subFloat() {
+ expectApproxEquals(2F, $opt$Sub(5F, 3F));
+ expectApproxEquals(0F, $opt$Sub(0F, 0F));
+ expectApproxEquals(-3F, $opt$Sub(0F, 3F));
+ expectApproxEquals(3F, $opt$Sub(3F, 0F));
+ expectApproxEquals(4F, $opt$Sub(1F, -3F));
+ expectApproxEquals(-9F, $opt$Sub(-12F, -3F));
+ expectApproxEquals(34359738362F, $opt$Sub(34359738369F, 7F)); // (2^35 + 1) - 7
+ expectApproxEquals(-0.1F, $opt$Sub(0.1F, 0.2F));
+ expectApproxEquals(0.2F, $opt$Sub(-0.5F, -0.7F));
+
+ expectNaN($opt$Sub(Float.NEGATIVE_INFINITY, Float.NEGATIVE_INFINITY));
+ expectNaN($opt$Sub(Float.POSITIVE_INFINITY, Float.POSITIVE_INFINITY));
+ expectNaN($opt$Sub(Float.NaN, 11F));
+ expectNaN($opt$Sub(Float.NaN, -11F));
+ expectNaN($opt$Sub(Float.NaN, Float.NEGATIVE_INFINITY));
+ expectNaN($opt$Sub(Float.NaN, Float.POSITIVE_INFINITY));
+
+ expectEquals(Float.NEGATIVE_INFINITY, $opt$Sub(-Float.MAX_VALUE, Float.MAX_VALUE));
+ expectEquals(Float.NEGATIVE_INFINITY, $opt$Sub(2F, Float.POSITIVE_INFINITY));
+ expectEquals(Float.POSITIVE_INFINITY, $opt$Sub(Float.MAX_VALUE, -Float.MAX_VALUE));
+ expectEquals(Float.POSITIVE_INFINITY, $opt$Sub(2F, Float.NEGATIVE_INFINITY));
+ expectEquals(Float.POSITIVE_INFINITY, $opt$Sub(Float.POSITIVE_INFINITY, Float.NEGATIVE_INFINITY));
+ expectEquals(Float.NEGATIVE_INFINITY, $opt$Sub(Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY));
+ }
+
+ private static void subDouble() {
+ expectApproxEquals(2D, $opt$Sub(5D, 3D));
+ expectApproxEquals(0D, $opt$Sub(0D, 0D));
+ expectApproxEquals(-3D, $opt$Sub(0D, 3D));
+ expectApproxEquals(3D, $opt$Sub(3D, 0D));
+ expectApproxEquals(4D, $opt$Sub(1D, -3D));
+ expectApproxEquals(-9D, $opt$Sub(-12D, -3D));
+ expectApproxEquals(134217724D, $opt$Sub(134217729D, 5D)); // (2^27 + 1) - 5
+ expectApproxEquals(34359738362D, $opt$Sub(34359738369D, 7D)); // (2^35 + 1) - 7
+ expectApproxEquals(-0.1D, $opt$Sub(0.1D, 0.2D));
+ expectApproxEquals(0.2D, $opt$Sub(-0.5D, -0.7D));
+
+ expectNaN($opt$Sub(Double.NEGATIVE_INFINITY, Double.NEGATIVE_INFINITY));
+ expectNaN($opt$Sub(Double.POSITIVE_INFINITY, Double.POSITIVE_INFINITY));
+ expectNaN($opt$Sub(Double.NaN, 11D));
+ expectNaN($opt$Sub(Double.NaN, -11D));
+ expectNaN($opt$Sub(Double.NaN, Double.NEGATIVE_INFINITY));
+ expectNaN($opt$Sub(Double.NaN, Double.POSITIVE_INFINITY));
+
+ expectEquals(Double.NEGATIVE_INFINITY, $opt$Sub(-Double.MAX_VALUE, Double.MAX_VALUE));
+ expectEquals(Double.NEGATIVE_INFINITY, $opt$Sub(2D, Double.POSITIVE_INFINITY));
+ expectEquals(Double.POSITIVE_INFINITY, $opt$Sub(Double.MAX_VALUE, -Double.MAX_VALUE));
+ expectEquals(Double.POSITIVE_INFINITY, $opt$Sub(2D, Double.NEGATIVE_INFINITY));
+ expectEquals(Double.POSITIVE_INFINITY, $opt$Sub(Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY));
+ expectEquals(Double.NEGATIVE_INFINITY, $opt$Sub(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY));
+ }
+
+ static int $opt$Sub(int a, int b) {
+ return a - b;
+ }
+
+ static long $opt$Sub(long a, long b) {
+ return a - b;
+ }
+
+ static float $opt$Sub(float a, float b) {
+ return a - b;
+ }
+
+ static double $opt$Sub(double a, double b) {
+ return a - b;
+ }
+
+}
diff --git a/test/800-smali/build b/test/800-smali/build
deleted file mode 100644
index 1b5a4e3381..0000000000
--- a/test/800-smali/build
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2014 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Stop if something fails.
-set -e
-
-# Compile Java classes
-mkdir classes
-${JAVAC} -d classes `find src -name '*.java'`
-${DX} -JXmx256m --debug --dex --output=java_classes.dex classes
-
-# Compile Smali classes
-${SMALI} -JXmx256m --output smali_classes.dex `find src -name '*.smali'`
-
-# Combine files.
-${DXMERGER} classes.dex java_classes.dex smali_classes.dex
-
-# Zip up output.
-zip $TEST_NAME.jar classes.dex
diff --git a/test/800-smali/info.txt b/test/800-smali/info.txt
index cfcc23095b..3022962ac5 100644
--- a/test/800-smali/info.txt
+++ b/test/800-smali/info.txt
@@ -1,4 +1,4 @@
Smali-based tests.
-Will compile and run all the smali files in src/ and run the test cases mentioned in src/Main.java.
+Will compile and run all the smali files in smali/ and run the test cases mentioned in src/Main.java.
Obviously needs to run under Dalvik or ART.
diff --git a/test/800-smali/src/b_17790197.smali b/test/800-smali/smali/b_17790197.smali
index 7560fcf834..7560fcf834 100644
--- a/test/800-smali/src/b_17790197.smali
+++ b/test/800-smali/smali/b_17790197.smali
diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk
index fd9503896d..55de1f3f35 100644
--- a/test/Android.libarttest.mk
+++ b/test/Android.libarttest.mk
@@ -58,8 +58,7 @@ define build-libarttest
ifeq ($$(art_target_or_host),target)
$(call set-target-local-clang-vars)
$(call set-target-local-cflags-vars,debug)
- LOCAL_SHARED_LIBRARIES += libdl libcutils
- LOCAL_STATIC_LIBRARIES := libgtest
+ LOCAL_SHARED_LIBRARIES += libdl
LOCAL_MULTILIB := both
LOCAL_MODULE_PATH_32 := $(ART_TARGET_TEST_OUT)/$(ART_TARGET_ARCH_32)
LOCAL_MODULE_PATH_64 := $(ART_TARGET_TEST_OUT)/$(ART_TARGET_ARCH_64)
@@ -68,11 +67,7 @@ define build-libarttest
else # host
LOCAL_CLANG := $(ART_HOST_CLANG)
LOCAL_CFLAGS := $(ART_HOST_CFLAGS) $(ART_HOST_DEBUG_CFLAGS)
- LOCAL_STATIC_LIBRARIES := libcutils
LOCAL_LDLIBS := $(ART_HOST_LDLIBS) -ldl -lpthread
- ifeq ($(HOST_OS),linux)
- LOCAL_LDLIBS += -lrt
- endif
LOCAL_IS_HOST_MODULE := true
LOCAL_MULTILIB := both
include $(BUILD_HOST_SHARED_LIBRARY)
diff --git a/test/Android.libnativebridgetest.mk b/test/Android.libnativebridgetest.mk
index 5e2493c8bf..1b20e6965b 100644
--- a/test/Android.libnativebridgetest.mk
+++ b/test/Android.libnativebridgetest.mk
@@ -51,7 +51,7 @@ define build-libnativebridgetest
ifeq ($$(art_target_or_host),target)
$(call set-target-local-clang-vars)
$(call set-target-local-cflags-vars,debug)
- LOCAL_SHARED_LIBRARIES += libdl libcutils
+ LOCAL_SHARED_LIBRARIES += libdl
LOCAL_STATIC_LIBRARIES := libgtest
LOCAL_MULTILIB := both
LOCAL_MODULE_PATH_32 := $(ART_TARGET_TEST_OUT)/$(ART_TARGET_ARCH_32)
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 427e0b1e8a..682282532b 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -290,6 +290,45 @@ endif
TEST_ART_BROKEN_DEFAULT_RUN_TESTS :=
+# Known broken tests for the arm64 optimizing compiler backend.
+TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS := \
+ 003-omnibus-opcodes \
+ 006-args \
+ 011-array-copy \
+ 018-stack-overflow \
+ 036-finalizer \
+ 044-proxy \
+ 070-nio-buffer \
+ 072-precise-gc \
+ 082-inline-execute \
+ 083-compiler-regressions \
+ 093-serialization \
+ 096-array-copy-concurrent-gc \
+ 100-reflect2 \
+ 106-exceptions2 \
+ 107-int-math2 \
+ 121-modifiers \
+ 122-npe \
+ 123-compiler-regressions-mt \
+ 405-optimizing-long-allocator \
+ 407-arrays \
+ 410-floats \
+ 411-optimizing-arith \
+ 412-new-array \
+ 413-regalloc-regression \
+ 414-optimizing-arith-sub \
+ 700-LoadArgRegs \
+ 800-smali
+
+ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
+ optimizing,$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES),$(TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS),64)
+endif
+
+TEST_ART_BROKEN_OPTIMIZING_ARM64_RUN_TESTS :=
+
+
# Clear variables ahead of appending to them when defining tests.
$(foreach target, $(TARGET_TYPES), $(eval ART_RUN_TEST_$(call name-to-var,$(target))_RULES :=))
$(foreach target, $(TARGET_TYPES), \
diff --git a/test/run-test b/test/run-test
index 73ffc3129f..2ef3ab189a 100755
--- a/test/run-test
+++ b/test/run-test
@@ -469,9 +469,6 @@ if echo "$test_dir" | grep 089; then
file_size_limit=5120
elif echo "$test_dir" | grep 083; then
file_size_limit=5120
-elif echo "$test_dir" | grep 115; then
-# Native bridge test copies libarttest.so into its directory, which needs 2MB already.
- file_size_limit=5120
fi
if ! ulimit -S "$file_size_limit"; then
echo "ulimit file size setting failed"