summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/Android.common_build.mk16
-rw-r--r--compiler/dex/local_value_numbering.cc11
-rw-r--r--compiler/dex/mir_graph.cc8
-rw-r--r--compiler/dex/mir_optimization.cc4
-rw-r--r--compiler/dex/quick/arm/assemble_arm.cc2
-rw-r--r--compiler/dex/quick/arm/int_arm.cc8
-rw-r--r--compiler/dex/quick/arm/utility_arm.cc2
-rw-r--r--compiler/dex/quick/arm64/assemble_arm64.cc10
-rw-r--r--compiler/dex/quick/arm64/utility_arm64.cc2
-rw-r--r--compiler/dex/quick/gen_common.cc2
-rwxr-xr-xcompiler/dex/quick/gen_invoke.cc9
-rw-r--r--compiler/dex/quick/mips/assemble_mips.cc2
-rw-r--r--compiler/dex/quick/mir_to_lir.cc5
-rwxr-xr-xcompiler/dex/quick/x86/int_x86.cc28
-rw-r--r--compiler/dex/quick/x86/utility_x86.cc8
-rw-r--r--compiler/jni/jni_compiler_test.cc18
-rw-r--r--compiler/optimizing/code_generator.cc55
-rw-r--r--compiler/optimizing/code_generator.h42
-rw-r--r--compiler/optimizing/code_generator_arm.cc195
-rw-r--r--compiler/optimizing/code_generator_arm.h18
-rw-r--r--compiler/optimizing/code_generator_x86.cc90
-rw-r--r--compiler/optimizing/code_generator_x86.h18
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc83
-rw-r--r--compiler/optimizing/code_generator_x86_64.h21
-rw-r--r--compiler/optimizing/codegen_test.cc113
-rw-r--r--compiler/optimizing/live_ranges_test.cc7
-rw-r--r--compiler/optimizing/liveness_test.cc3
-rw-r--r--compiler/optimizing/nodes.cc24
-rw-r--r--compiler/optimizing/nodes.h126
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.cc22
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.h5
-rw-r--r--compiler/optimizing/register_allocator.cc21
-rw-r--r--compiler/optimizing/ssa_builder.cc2
-rw-r--r--compiler/utils/scoped_arena_allocator.cc10
-rw-r--r--compiler/utils/scoped_arena_allocator.h2
-rw-r--r--compiler/utils/x86/managed_register_x86.cc6
-rw-r--r--dex2oat/dex2oat.cc4
-rw-r--r--disassembler/disassembler_arm.cc11
-rw-r--r--disassembler/disassembler_arm64.cc10
-rw-r--r--disassembler/disassembler_x86.cc2
-rw-r--r--patchoat/patchoat.cc2
-rw-r--r--runtime/arch/x86/fault_handler_x86.cc3
-rw-r--r--runtime/base/histogram-inl.h9
-rw-r--r--runtime/base/histogram_test.cc12
-rw-r--r--runtime/base/macros.h42
-rw-r--r--runtime/check_jni.cc2
-rw-r--r--runtime/class_linker_test.cc8
-rw-r--r--runtime/debugger.cc26
-rw-r--r--runtime/dex_file_verifier.cc2
-rw-r--r--runtime/dex_instruction-inl.h20
-rw-r--r--runtime/dex_instruction.cc72
-rw-r--r--runtime/entrypoints/quick/quick_math_entrypoints.cc9
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc36
-rw-r--r--runtime/gc/allocator/rosalloc.cc5
-rw-r--r--runtime/gc/allocator/rosalloc.h2
-rw-r--r--runtime/handle_scope-inl.h5
-rw-r--r--runtime/handle_scope.h104
-rw-r--r--runtime/handle_scope_test.cc6
-rw-r--r--runtime/hprof/hprof.cc4
-rw-r--r--runtime/interpreter/interpreter_goto_table_impl.cc9
-rw-r--r--runtime/interpreter/interpreter_switch_impl.cc11
-rw-r--r--runtime/jdwp/jdwp_event.cc16
-rw-r--r--runtime/jni_internal_test.cc46
-rw-r--r--runtime/mirror/object.h2
-rw-r--r--runtime/mirror/object_test.cc67
-rw-r--r--runtime/native/java_lang_Runtime.cc2
-rw-r--r--runtime/native/java_lang_reflect_Field.cc1
-rw-r--r--runtime/parsed_options_test.cc2
-rw-r--r--runtime/reflection_test.cc36
-rw-r--r--runtime/runtime.h5
-rw-r--r--runtime/thread.cc26
-rw-r--r--runtime/thread.h2
-rw-r--r--runtime/transaction_test.cc24
-rw-r--r--test/083-compiler-regressions/expected.txt1
-rw-r--r--test/083-compiler-regressions/src/Main.java19
-rw-r--r--test/800-smali/build32
-rw-r--r--test/800-smali/expected.txt2
-rw-r--r--test/800-smali/info.txt4
-rw-r--r--test/800-smali/src/Main.java122
-rw-r--r--test/800-smali/src/b_17790197.smali17
-rw-r--r--test/Android.run-test.mk13
-rwxr-xr-xtest/run-test10
82 files changed, 1211 insertions, 652 deletions
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index 976a66ea9e..14edb716bf 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -196,7 +196,7 @@ ART_TARGET_CLANG_CFLAGS_arm64 += \
-fno-vectorize
art_debug_cflags := \
- -O1 \
+ -O2 \
-DDYNAMIC_ANNOTATIONS_ENABLED=1 \
-UNDEBUG
@@ -231,13 +231,23 @@ ART_TARGET_CFLAGS += -DART_BASE_ADDRESS_MIN_DELTA=$(LIBART_IMG_TARGET_MIN_BASE_A
ART_TARGET_CFLAGS += -DART_BASE_ADDRESS_MAX_DELTA=$(LIBART_IMG_TARGET_MAX_BASE_ADDRESS_DELTA)
# Colorize clang compiler warnings.
+art_clang_cflags := -fcolor-diagnostics
+
+# Warn if switch fallthroughs aren't annotated.
+art_clang_cflags += -Wimplicit-fallthrough
+
+# Enable float equality warnings.
+art_clang_cflags += -Wfloat-equal
+
ifeq ($(ART_HOST_CLANG),true)
- ART_HOST_CFLAGS += -fcolor-diagnostics
+ ART_HOST_CFLAGS += $(art_clang_cflags)
endif
ifeq ($(ART_TARGET_CLANG),true)
- ART_TARGET_CFLAGS += -fcolor-diagnostics
+ ART_TARGET_CFLAGS += $(art_clang_cflags)
endif
+art_clang_cflags :=
+
ART_TARGET_LDFLAGS :=
ifeq ($(TARGET_CPU_SMP),true)
ART_TARGET_CFLAGS += -DANDROID_SMP=1
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index e411164f3a..eb9891606c 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -1460,7 +1460,7 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
uint16_t reg = GetOperandValue(mir->ssa_rep->uses[0]);
HandleNullCheck(mir, reg);
}
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
case Instruction::INVOKE_STATIC:
case Instruction::INVOKE_STATIC_RANGE:
// Make ref args aliasing.
@@ -1583,7 +1583,7 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
uint16_t reg = GetOperandValue(mir->ssa_rep->uses[0]);
HandleNullCheck(mir, reg);
}
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
case Instruction::NEG_INT:
case Instruction::NOT_INT:
case Instruction::NEG_FLOAT:
@@ -1610,7 +1610,6 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
}
break;
-
case Instruction::DOUBLE_TO_LONG:
case Instruction::LONG_TO_DOUBLE:
case Instruction::NEG_LONG:
@@ -1782,7 +1781,7 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::APUT_OBJECT:
HandlePutObject(mir);
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
case Instruction::APUT:
case Instruction::APUT_WIDE:
case Instruction::APUT_BYTE:
@@ -1804,7 +1803,7 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::IPUT_OBJECT:
HandlePutObject(mir);
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
case Instruction::IPUT:
case Instruction::IPUT_WIDE:
case Instruction::IPUT_BOOLEAN:
@@ -1826,7 +1825,7 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::SPUT_OBJECT:
HandlePutObject(mir);
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
case Instruction::SPUT:
case Instruction::SPUT_WIDE:
case Instruction::SPUT_BOOLEAN:
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 7dfdc760bd..f0c9858627 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -391,7 +391,7 @@ bool MIRGraph::IsBadMonitorExitCatch(NarrowDexOffset monitor_exit_offset,
switch (check_insn->Opcode()) {
case Instruction::MOVE_WIDE:
wide = true;
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
case Instruction::MOVE_OBJECT:
case Instruction::MOVE:
dest = check_insn->VRegA_12x();
@@ -399,7 +399,7 @@ bool MIRGraph::IsBadMonitorExitCatch(NarrowDexOffset monitor_exit_offset,
case Instruction::MOVE_WIDE_FROM16:
wide = true;
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
case Instruction::MOVE_OBJECT_FROM16:
case Instruction::MOVE_FROM16:
dest = check_insn->VRegA_22x();
@@ -407,7 +407,7 @@ bool MIRGraph::IsBadMonitorExitCatch(NarrowDexOffset monitor_exit_offset,
case Instruction::MOVE_WIDE_16:
wide = true;
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
case Instruction::MOVE_OBJECT_16:
case Instruction::MOVE_16:
dest = check_insn->VRegA_32x();
@@ -417,7 +417,7 @@ bool MIRGraph::IsBadMonitorExitCatch(NarrowDexOffset monitor_exit_offset,
case Instruction::GOTO_16:
case Instruction::GOTO_32:
check_insn = check_insn->RelativeAt(check_insn->GetTargetOffset());
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
default:
return check_insn->Opcode() == Instruction::MONITOR_EXIT &&
check_insn->VRegA_11x() == monitor_reg;
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 322b737677..84c056daf3 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -929,7 +929,9 @@ bool MIRGraph::EliminateNullChecks(BasicBlock* bb) {
mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
} else {
// Do the null check.
- // Do not clear MIR_IGNORE_NULL_CHECK flag as it may be set by another optimization
+ // TODO: Rewrite the pass to converge first before doing any modifications so that
+ // we don't lose the MIR_IGNORE_NULL_CHECK here if previously set by some other pass.
+ mir->optimization_flags &= ~MIR_IGNORE_NULL_CHECK;
// Mark s_reg as null-checked
ssa_regs_to_check->ClearBit(src_sreg);
}
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index cf34948969..4e20d76604 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -1265,7 +1265,7 @@ void ArmMir2Lir::AssembleLIR() {
if (lir->operands[1] != rs_r15pc.GetReg()) {
break;
}
- // NOTE: intentional fallthrough.
+ FALLTHROUGH_INTENDED;
case kFixupLoad: {
/*
* PC-relative loads are mostly used to load immediates
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 8f1261d301..018dc1c0c6 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -49,12 +49,13 @@ LIR* ArmMir2Lir::OpIT(ConditionCode ccode, const char* guide) {
int cond_bit = code & 1;
int alt_bit = cond_bit ^ 1;
- // Note: case fallthroughs intentional
switch (strlen(guide)) {
case 3:
mask1 = (guide[2] == 'T') ? cond_bit : alt_bit;
+ FALLTHROUGH_INTENDED;
case 2:
mask2 = (guide[1] == 'T') ? cond_bit : alt_bit;
+ FALLTHROUGH_INTENDED;
case 1:
mask3 = (guide[0] == 'T') ? cond_bit : alt_bit;
break;
@@ -62,6 +63,7 @@ LIR* ArmMir2Lir::OpIT(ConditionCode ccode, const char* guide) {
break;
default:
LOG(FATAL) << "OAT: bad case in OpIT";
+ UNREACHABLE();
}
mask = (mask3 << 3) | (mask2 << 2) | (mask1 << 1) |
(1 << (3 - strlen(guide)));
@@ -77,12 +79,13 @@ void ArmMir2Lir::UpdateIT(LIR* it, const char* new_guide) {
int cond_bit = code & 1;
int alt_bit = cond_bit ^ 1;
- // Note: case fallthroughs intentional
switch (strlen(new_guide)) {
case 3:
mask1 = (new_guide[2] == 'T') ? cond_bit : alt_bit;
+ FALLTHROUGH_INTENDED;
case 2:
mask2 = (new_guide[1] == 'T') ? cond_bit : alt_bit;
+ FALLTHROUGH_INTENDED;
case 1:
mask3 = (new_guide[0] == 'T') ? cond_bit : alt_bit;
break;
@@ -90,6 +93,7 @@ void ArmMir2Lir::UpdateIT(LIR* it, const char* new_guide) {
break;
default:
LOG(FATAL) << "OAT: bad case in UpdateIT";
+ UNREACHABLE();
}
mask = (mask3 << 3) | (mask2 << 2) | (mask1 << 1) |
(1 << (3 - strlen(new_guide)));
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index bba1a8c65b..e833c9a629 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -494,7 +494,7 @@ LIR* ArmMir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, in
(value <= 1020) && ((value & 0x3) == 0)) {
return NewLIR3(kThumbAddPcRel, r_dest.GetReg(), r_src1.GetReg(), value >> 2);
}
- // Note: intentional fallthrough
+ FALLTHROUGH_INTENDED;
case kOpSub:
if (all_low_regs && ((abs_value & 0x7) == abs_value)) {
if (op == kOpAdd)
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 7c663a9418..e2ff090293 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -705,16 +705,16 @@ uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
switch (kind) {
case kFmtRegX:
want_64_bit = true;
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
case kFmtRegW:
want_var_size = false;
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
case kFmtRegR:
want_zero = true;
break;
case kFmtRegXOrSp:
want_64_bit = true;
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
case kFmtRegWOrSp:
want_var_size = false;
break;
@@ -722,10 +722,10 @@ uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
break;
case kFmtRegD:
want_64_bit = true;
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
case kFmtRegS:
want_var_size = false;
- // Intentional fall-through.
+ FALLTHROUGH_INTENDED;
case kFmtRegF:
want_float = true;
break;
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index 38670ff8be..0883694033 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -833,7 +833,7 @@ LIR* Arm64Mir2Lir::OpRegRegImm64(OpKind op, RegStorage r_dest, RegStorage r_src1
value);
case kOpAdd:
neg = !neg;
- // Note: intentional fallthrough
+ FALLTHROUGH_INTENDED;
case kOpSub:
// Add and sub below read/write sp rather than xzr.
if (abs_value < 0x1000) {
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 3a3821f800..12ca065de4 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -1785,7 +1785,7 @@ void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, Re
case Instruction::SUB_INT:
case Instruction::SUB_INT_2ADDR:
lit = -lit;
- // Intended fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::ADD_INT:
case Instruction::ADD_INT_2ADDR:
case Instruction::ADD_INT_LIT8:
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 408c73d1b0..2bef7c53c5 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -552,7 +552,8 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
} else {
break;
}
- // Intentional fallthrough for x86
+ DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
+ FALLTHROUGH_INTENDED;
default:
return -1;
}
@@ -596,7 +597,8 @@ static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, nullptr, cu, cg)) {
break; // kInvokeTgt := kArg0->entrypoint
}
- // Intentional fallthrough for X86
+ DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
+ FALLTHROUGH_INTENDED;
default:
return -1;
}
@@ -641,7 +643,8 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, nullptr, cu, cg)) {
break; // kInvokeTgt := kArg0->entrypoint
}
- // Intentional fallthrough for X86
+ DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
+ FALLTHROUGH_INTENDED;
default:
return -1;
}
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index c7e9190ed9..01d1a1e0db 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -465,6 +465,7 @@ void MipsMir2Lir::ConvertShortToLongBranch(LIR* lir) {
switch (opcode) {
case kMipsBal:
LOG(FATAL) << "long branch and link unsupported";
+ UNREACHABLE();
case kMipsB:
unconditional = true;
break;
@@ -478,6 +479,7 @@ void MipsMir2Lir::ConvertShortToLongBranch(LIR* lir) {
case kMipsBnez: opcode = kMipsBeqz; break;
default:
LOG(FATAL) << "Unexpected branch kind " << opcode;
+ UNREACHABLE();
}
LIR* hop_target = NULL;
if (!unconditional) {
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 0ac1299c5a..4399981272 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -482,7 +482,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
case Instruction::RETURN_OBJECT:
DCHECK(rl_src[0].ref);
- // Intentional fallthrough.
+ FALLTHROUGH_INTENDED;
case Instruction::RETURN:
if (!kLeafOptimization || !mir_graph_->MethodIsLeaf()) {
GenSuspendTest(opt_flags);
@@ -1031,8 +1031,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
GenArithImmOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
break;
}
- // Note: intentional fallthrough.
-
+ FALLTHROUGH_INTENDED;
case Instruction::MUL_LONG:
case Instruction::DIV_LONG:
case Instruction::REM_LONG:
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 5177176984..07034cb8d7 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -2930,25 +2930,25 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
break;
case Instruction::ADD_INT_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::ADD_INT:
op = kOpAdd;
break;
case Instruction::SUB_INT_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::SUB_INT:
op = kOpSub;
break;
case Instruction::MUL_INT_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::MUL_INT:
op = kOpMul;
break;
case Instruction::DIV_INT_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::DIV_INT:
op = kOpDiv;
is_div_rem = true;
@@ -2956,46 +2956,46 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
/* NOTE: returns in kArg1 */
case Instruction::REM_INT_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::REM_INT:
op = kOpRem;
is_div_rem = true;
break;
case Instruction::AND_INT_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::AND_INT:
op = kOpAnd;
break;
case Instruction::OR_INT_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::OR_INT:
op = kOpOr;
break;
case Instruction::XOR_INT_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::XOR_INT:
op = kOpXor;
break;
case Instruction::SHL_INT_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::SHL_INT:
shift_op = true;
op = kOpLsl;
break;
case Instruction::SHR_INT_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::SHR_INT:
shift_op = true;
op = kOpAsr;
break;
case Instruction::USHR_INT_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::USHR_INT:
shift_op = true;
op = kOpLsr;
@@ -3245,19 +3245,19 @@ void X86Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
switch (opcode) {
case Instruction::SHL_LONG_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::SHL_LONG:
op = kOpLsl;
break;
case Instruction::SHR_LONG_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::SHR_LONG:
op = kOpAsr;
break;
case Instruction::USHR_LONG_2ADDR:
is_two_addr = true;
- // Fallthrough
+ FALLTHROUGH_INTENDED;
case Instruction::USHR_LONG:
op = kOpLsr;
break;
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index 6898b5078c..8d5dabc5fd 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -657,7 +657,8 @@ LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int
CHECK_EQ(is_array, false);
CHECK_EQ(r_dest.IsFloat(), false);
break;
- } // else fall-through to k32 case
+ }
+ FALLTHROUGH_INTENDED; // else fall-through to k32 case
case k32:
case kSingle:
case kReference: // TODO: update for reference decompression on 64-bit targets.
@@ -791,7 +792,7 @@ LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int
switch (size) {
case k64:
consider_non_temporal = true;
- // Fall through!
+ FALLTHROUGH_INTENDED;
case kDouble:
if (r_src.IsFloat()) {
opcode = is_array ? kX86MovsdAR : kX86MovsdMR;
@@ -810,7 +811,8 @@ LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int
CHECK_EQ(r_src.IsFloat(), false);
consider_non_temporal = true;
break;
- } // else fall-through to k32 case
+ }
+ FALLTHROUGH_INTENDED; // else fall-through to k32 case
case k32:
case kSingle:
case kReference:
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index a21004c220..fd7d350a09 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -365,12 +365,12 @@ void JniCompilerTest::CompileAndRunDoubleDoubleMethodImpl() {
EXPECT_EQ(0, gJava_MyClassNatives_fooDD_calls);
jdouble result = env_->CallNonvirtualDoubleMethod(jobj_, jklass_, jmethod_,
99.0, 10.0);
- EXPECT_EQ(99.0 - 10.0, result);
+ EXPECT_DOUBLE_EQ(99.0 - 10.0, result);
EXPECT_EQ(1, gJava_MyClassNatives_fooDD_calls);
jdouble a = 3.14159265358979323846;
jdouble b = 0.69314718055994530942;
result = env_->CallNonvirtualDoubleMethod(jobj_, jklass_, jmethod_, a, b);
- EXPECT_EQ(a - b, result);
+ EXPECT_DOUBLE_EQ(a - b, result);
EXPECT_EQ(2, gJava_MyClassNatives_fooDD_calls);
gJava_MyClassNatives_fooDD_calls = 0;
@@ -513,13 +513,13 @@ void JniCompilerTest::CompileAndRunStaticDoubleDoubleMethodImpl() {
EXPECT_EQ(0, gJava_MyClassNatives_fooSDD_calls);
jdouble result = env_->CallStaticDoubleMethod(jklass_, jmethod_, 99.0, 10.0);
- EXPECT_EQ(99.0 - 10.0, result);
+ EXPECT_DOUBLE_EQ(99.0 - 10.0, result);
EXPECT_EQ(1, gJava_MyClassNatives_fooSDD_calls);
jdouble a = 3.14159265358979323846;
jdouble b = 0.69314718055994530942;
result = env_->CallStaticDoubleMethod(jklass_, jmethod_, a, b);
- EXPECT_EQ(a - b, result);
- EXPECT_EQ(2, gJava_MyClassNatives_fooSDD_calls);
+ EXPECT_DOUBLE_EQ(a - b, result);
+ EXPECT_DOUBLE_EQ(2, gJava_MyClassNatives_fooSDD_calls);
gJava_MyClassNatives_fooSDD_calls = 0;
}
@@ -539,7 +539,7 @@ void JniCompilerTest::RunStaticLogDoubleMethodImpl() {
SetUpForTest(true, "logD", "(D)D", reinterpret_cast<void*>(&Java_MyClassNatives_logD));
jdouble result = env_->CallStaticDoubleMethod(jklass_, jmethod_, 2.0);
- EXPECT_EQ(log(2.0), result);
+ EXPECT_DOUBLE_EQ(log(2.0), result);
}
JNI_TEST(RunStaticLogDoubleMethod)
@@ -553,7 +553,7 @@ void JniCompilerTest::RunStaticLogFloatMethodImpl() {
SetUpForTest(true, "logF", "(F)F", reinterpret_cast<void*>(&Java_MyClassNatives_logF));
jfloat result = env_->CallStaticFloatMethod(jklass_, jmethod_, 2.0);
- EXPECT_EQ(logf(2.0), result);
+ EXPECT_FLOAT_EQ(logf(2.0), result);
}
JNI_TEST(RunStaticLogFloatMethod)
@@ -1047,11 +1047,11 @@ void JniCompilerTest::CompileAndRunFloatFloatMethodImpl() {
jfloat result = env_->CallNonvirtualFloatMethod(jobj_, jklass_, jmethod_,
99.0F, 10.0F);
- EXPECT_EQ(99.0F - 10.0F, result);
+ EXPECT_FLOAT_EQ(99.0F - 10.0F, result);
jfloat a = 3.14159F;
jfloat b = 0.69314F;
result = env_->CallNonvirtualFloatMethod(jobj_, jklass_, jmethod_, a, b);
- EXPECT_EQ(a - b, result);
+ EXPECT_FLOAT_EQ(a - b, result);
}
JNI_TEST(CompileAndRunFloatFloatMethod)
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index fe4c3c3baa..29dbd8b33d 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -102,14 +102,14 @@ void CodeGenerator::GenerateSlowPaths() {
}
}
-size_t CodeGenerator::AllocateFreeRegisterInternal(
- bool* blocked_registers, size_t number_of_registers) const {
- for (size_t regno = 0; regno < number_of_registers; regno++) {
- if (!blocked_registers[regno]) {
- blocked_registers[regno] = true;
- return regno;
+size_t CodeGenerator::FindFreeEntry(bool* array, size_t length) {
+ for (size_t i = 0; i < length; ++i) {
+ if (!array[i]) {
+ array[i] = true;
+ return i;
}
}
+ LOG(FATAL) << "Could not find a register in baseline register allocator";
return -1;
}
@@ -156,17 +156,34 @@ void CodeGenerator::AllocateRegistersLocally(HInstruction* instruction) const {
LocationSummary* locations = instruction->GetLocations();
if (locations == nullptr) return;
- for (size_t i = 0, e = GetNumberOfRegisters(); i < e; ++i) {
- blocked_registers_[i] = false;
+ for (size_t i = 0, e = GetNumberOfCoreRegisters(); i < e; ++i) {
+ blocked_core_registers_[i] = false;
+ }
+
+ for (size_t i = 0, e = GetNumberOfFloatingPointRegisters(); i < e; ++i) {
+ blocked_fpu_registers_[i] = false;
+ }
+
+ for (size_t i = 0, e = number_of_register_pairs_; i < e; ++i) {
+ blocked_register_pairs_[i] = false;
}
// Mark all fixed input, temp and output registers as used.
for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
Location loc = locations->InAt(i);
+ // The DCHECKS below check that a register is not specified twice in
+ // the summary.
if (loc.IsRegister()) {
- // Check that a register is not specified twice in the summary.
- DCHECK(!blocked_registers_[loc.GetEncoding()]);
- blocked_registers_[loc.GetEncoding()] = true;
+ DCHECK(!blocked_core_registers_[loc.reg()]);
+ blocked_core_registers_[loc.reg()] = true;
+ } else if (loc.IsFpuRegister()) {
+ DCHECK(!blocked_fpu_registers_[loc.reg()]);
+ blocked_fpu_registers_[loc.reg()] = true;
+ } else if (loc.IsRegisterPair()) {
+ DCHECK(!blocked_core_registers_[loc.AsRegisterPairLow<int>()]);
+ blocked_core_registers_[loc.AsRegisterPairLow<int>()] = true;
+ DCHECK(!blocked_core_registers_[loc.AsRegisterPairHigh<int>()]);
+ blocked_core_registers_[loc.AsRegisterPairHigh<int>()] = true;
}
}
@@ -174,12 +191,14 @@ void CodeGenerator::AllocateRegistersLocally(HInstruction* instruction) const {
Location loc = locations->GetTemp(i);
if (loc.IsRegister()) {
// Check that a register is not specified twice in the summary.
- DCHECK(!blocked_registers_[loc.GetEncoding()]);
- blocked_registers_[loc.GetEncoding()] = true;
+ DCHECK(!blocked_core_registers_[loc.reg()]);
+ blocked_core_registers_[loc.reg()] = true;
+ } else {
+ DCHECK_EQ(loc.GetPolicy(), Location::kRequiresRegister);
}
}
- SetupBlockedRegisters(blocked_registers_);
+ SetupBlockedRegisters();
// Allocate all unallocated input locations.
for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
@@ -188,14 +207,14 @@ void CodeGenerator::AllocateRegistersLocally(HInstruction* instruction) const {
if (loc.IsUnallocated()) {
if ((loc.GetPolicy() == Location::kRequiresRegister)
|| (loc.GetPolicy() == Location::kRequiresFpuRegister)) {
- loc = AllocateFreeRegister(input->GetType(), blocked_registers_);
+ loc = AllocateFreeRegister(input->GetType());
} else {
DCHECK_EQ(loc.GetPolicy(), Location::kAny);
HLoadLocal* load = input->AsLoadLocal();
if (load != nullptr) {
loc = GetStackLocation(load);
} else {
- loc = AllocateFreeRegister(input->GetType(), blocked_registers_);
+ loc = AllocateFreeRegister(input->GetType());
}
}
locations->SetInAt(i, loc);
@@ -209,7 +228,7 @@ void CodeGenerator::AllocateRegistersLocally(HInstruction* instruction) const {
DCHECK_EQ(loc.GetPolicy(), Location::kRequiresRegister);
// TODO: Adjust handling of temps. We currently consider temps to use
// core registers. They may also use floating point registers at some point.
- loc = AllocateFreeRegister(Primitive::kPrimInt, blocked_registers_);
+ loc = AllocateFreeRegister(Primitive::kPrimInt);
locations->SetTempAt(i, loc);
}
}
@@ -219,7 +238,7 @@ void CodeGenerator::AllocateRegistersLocally(HInstruction* instruction) const {
case Location::kAny:
case Location::kRequiresRegister:
case Location::kRequiresFpuRegister:
- result_location = AllocateFreeRegister(instruction->GetType(), blocked_registers_);
+ result_location = AllocateFreeRegister(instruction->GetType());
break;
case Location::kSameAsFirstInput:
result_location = locations->InAt(0);
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 74ad8e93f3..4eba791723 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -109,10 +109,10 @@ class CodeGenerator : public ArenaObject {
void SetFrameSize(uint32_t size) { frame_size_ = size; }
uint32_t GetCoreSpillMask() const { return core_spill_mask_; }
- virtual size_t GetNumberOfCoreRegisters() const = 0;
- virtual size_t GetNumberOfFloatingPointRegisters() const = 0;
- virtual size_t GetNumberOfRegisters() const = 0;
- virtual void SetupBlockedRegisters(bool* blocked_registers) const = 0;
+ size_t GetNumberOfCoreRegisters() const { return number_of_core_registers_; }
+ size_t GetNumberOfFloatingPointRegisters() const { return number_of_fpu_registers_; }
+ virtual void SetupBlockedRegisters() const = 0;
+
virtual void DumpCoreRegister(std::ostream& stream, int reg) const = 0;
virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const = 0;
virtual InstructionSet GetInstructionSet() const = 0;
@@ -150,16 +150,26 @@ class CodeGenerator : public ArenaObject {
// have not been written to.
void ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check) const;
+ bool* GetBlockedCoreRegisters() const { return blocked_core_registers_; }
+
protected:
- CodeGenerator(HGraph* graph, size_t number_of_registers)
+ CodeGenerator(HGraph* graph,
+ size_t number_of_core_registers,
+ size_t number_of_fpu_registers,
+ size_t number_of_register_pairs)
: frame_size_(kUninitializedFrameSize),
core_spill_mask_(0),
first_register_slot_in_slow_path_(0),
+ blocked_core_registers_(graph->GetArena()->AllocArray<bool>(number_of_core_registers)),
+ blocked_fpu_registers_(graph->GetArena()->AllocArray<bool>(number_of_fpu_registers)),
+ blocked_register_pairs_(graph->GetArena()->AllocArray<bool>(number_of_register_pairs)),
+ number_of_core_registers_(number_of_core_registers),
+ number_of_fpu_registers_(number_of_fpu_registers),
+ number_of_register_pairs_(number_of_register_pairs),
graph_(graph),
block_labels_(graph->GetArena(), 0),
pc_infos_(graph->GetArena(), 32),
slow_paths_(graph->GetArena(), 8),
- blocked_registers_(graph->GetArena()->AllocArray<bool>(number_of_registers)),
is_leaf_(true),
stack_map_stream_(graph->GetArena()) {}
~CodeGenerator() {}
@@ -168,12 +178,9 @@ class CodeGenerator : public ArenaObject {
void AllocateRegistersLocally(HInstruction* instruction) const;
// Backend specific implementation for allocating a register.
- virtual Location AllocateFreeRegister(Primitive::Type type,
- bool* blocked_registers) const = 0;
+ virtual Location AllocateFreeRegister(Primitive::Type type) const = 0;
- // Raw implementation of allocating a register: loops over blocked_registers to find
- // the first available register.
- size_t AllocateFreeRegisterInternal(bool* blocked_registers, size_t number_of_registers) const;
+ static size_t FindFreeEntry(bool* array, size_t length);
virtual Location GetStackLocation(HLoadLocal* load) const = 0;
@@ -182,6 +189,16 @@ class CodeGenerator : public ArenaObject {
uint32_t core_spill_mask_;
uint32_t first_register_slot_in_slow_path_;
+ // Arrays used when doing register allocation to know which
+ // registers we can allocate. `SetupBlockedRegisters` updates the
+ // arrays.
+ bool* const blocked_core_registers_;
+ bool* const blocked_fpu_registers_;
+ bool* const blocked_register_pairs_;
+ size_t number_of_core_registers_;
+ size_t number_of_fpu_registers_;
+ size_t number_of_register_pairs_;
+
private:
void InitLocations(HInstruction* instruction);
size_t GetStackOffsetOfSavedRegister(size_t index);
@@ -193,9 +210,6 @@ class CodeGenerator : public ArenaObject {
GrowableArray<PcInfo> pc_infos_;
GrowableArray<SlowPathCode*> slow_paths_;
- // Temporary data structure used when doing register allocation.
- bool* const blocked_registers_;
-
bool is_leaf_;
StackMapStream stack_map_stream_;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index d555a0d553..9be780216a 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -67,7 +67,7 @@ class NullCheckSlowPathARM : public SlowPathCode {
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
__ Bind(GetEntryLabel());
int32_t offset = QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pThrowNullPointer).Int32Value();
- __ ldr(LR, Address(TR, offset));
+ __ LoadFromOffset(kLoadWord, LR, TR, offset);
__ blx(LR);
codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
}
@@ -100,7 +100,7 @@ class SuspendCheckSlowPathARM : public SlowPathCode {
__ Bind(GetEntryLabel());
codegen->SaveLiveRegisters(instruction_->GetLocations());
int32_t offset = QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pTestSuspend).Int32Value();
- __ ldr(LR, Address(TR, offset));
+ __ LoadFromOffset(kLoadWord, LR, TR, offset);
__ blx(LR);
codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
codegen->RestoreLiveRegisters(instruction_->GetLocations());
@@ -143,7 +143,7 @@ class BoundsCheckSlowPathARM : public SlowPathCode {
arm_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
arm_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(1)), length_location_);
int32_t offset = QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pThrowArrayBounds).Int32Value();
- __ ldr(LR, Address(TR, offset));
+ __ LoadFromOffset(kLoadWord, LR, TR, offset);
__ blx(LR);
codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
}
@@ -196,15 +196,15 @@ void CodeGeneratorARM::DumpFloatingPointRegister(std::ostream& stream, int reg)
}
void CodeGeneratorARM::SaveCoreRegister(Location stack_location, uint32_t reg_id) {
- __ str(static_cast<Register>(reg_id), Address(SP, stack_location.GetStackIndex()));
+ __ StoreToOffset(kStoreWord, static_cast<Register>(reg_id), SP, stack_location.GetStackIndex());
}
void CodeGeneratorARM::RestoreCoreRegister(Location stack_location, uint32_t reg_id) {
- __ ldr(static_cast<Register>(reg_id), Address(SP, stack_location.GetStackIndex()));
+ __ LoadFromOffset(kLoadWord, static_cast<Register>(reg_id), SP, stack_location.GetStackIndex());
}
CodeGeneratorARM::CodeGeneratorARM(HGraph* graph)
- : CodeGenerator(graph, kNumberOfRegIds),
+ : CodeGenerator(graph, kNumberOfCoreRegisters, kNumberOfDRegisters, kNumberOfRegisterPairs),
location_builder_(graph, this),
instruction_visitor_(graph, this),
move_resolver_(graph->GetArena(), this),
@@ -214,24 +214,14 @@ size_t CodeGeneratorARM::FrameEntrySpillSize() const {
return kNumberOfPushedRegistersAtEntry * kArmWordSize;
}
-static bool* GetBlockedRegisterPairs(bool* blocked_registers) {
- return blocked_registers + kNumberOfAllocIds;
-}
-
-static bool* GetBlockedDRegisters(bool* blocked_registers) {
- return blocked_registers + kNumberOfCoreRegisters + kNumberOfSRegisters;
-}
-
-Location CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type,
- bool* blocked_registers) const {
+Location CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type) const {
switch (type) {
case Primitive::kPrimLong: {
- bool* blocked_register_pairs = GetBlockedRegisterPairs(blocked_registers);
- size_t reg = AllocateFreeRegisterInternal(blocked_register_pairs, kNumberOfRegisterPairs);
+ size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs);
ArmManagedRegister pair =
ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
- blocked_registers[pair.AsRegisterPairLow()] = true;
- blocked_registers[pair.AsRegisterPairHigh()] = true;
+ blocked_core_registers_[pair.AsRegisterPairLow()] = true;
+ blocked_core_registers_[pair.AsRegisterPairHigh()] = true;
// Block all other register pairs that share a register with `pair`.
for (int i = 0; i < kNumberOfRegisterPairs; i++) {
ArmManagedRegister current =
@@ -240,7 +230,7 @@ Location CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type,
|| current.AsRegisterPairLow() == pair.AsRegisterPairHigh()
|| current.AsRegisterPairHigh() == pair.AsRegisterPairLow()
|| current.AsRegisterPairHigh() == pair.AsRegisterPairHigh()) {
- blocked_register_pairs[i] = true;
+ blocked_register_pairs_[i] = true;
}
}
return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
@@ -252,14 +242,13 @@ Location CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type,
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- int reg = AllocateFreeRegisterInternal(blocked_registers, kNumberOfCoreRegisters);
+ int reg = FindFreeEntry(blocked_core_registers_, kNumberOfCoreRegisters);
// Block all register pairs that contain `reg`.
- bool* blocked_register_pairs = GetBlockedRegisterPairs(blocked_registers);
for (int i = 0; i < kNumberOfRegisterPairs; i++) {
ArmManagedRegister current =
ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
if (current.AsRegisterPairLow() == reg || current.AsRegisterPairHigh() == reg) {
- blocked_register_pairs[i] = true;
+ blocked_register_pairs_[i] = true;
}
}
return Location::RegisterLocation(reg);
@@ -267,7 +256,7 @@ Location CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type,
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
- int reg = AllocateFreeRegisterInternal(GetBlockedDRegisters(blocked_registers), kNumberOfDRegisters);
+ int reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfDRegisters);
return Location::FpuRegisterLocation(reg);
}
@@ -278,48 +267,41 @@ Location CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type,
return Location();
}
-void CodeGeneratorARM::SetupBlockedRegisters(bool* blocked_registers) const {
- bool* blocked_register_pairs = GetBlockedRegisterPairs(blocked_registers);
- bool* blocked_fpu_registers = GetBlockedDRegisters(blocked_registers);
-
+void CodeGeneratorARM::SetupBlockedRegisters() const {
// Don't allocate the dalvik style register pair passing.
- blocked_register_pairs[R1_R2] = true;
+ blocked_register_pairs_[R1_R2] = true;
// Stack register, LR and PC are always reserved.
- blocked_registers[SP] = true;
- blocked_registers[LR] = true;
- blocked_registers[PC] = true;
+ blocked_core_registers_[SP] = true;
+ blocked_core_registers_[LR] = true;
+ blocked_core_registers_[PC] = true;
// Reserve R4 for suspend check.
- blocked_registers[R4] = true;
- blocked_register_pairs[R4_R5] = true;
+ blocked_core_registers_[R4] = true;
+ blocked_register_pairs_[R4_R5] = true;
// Reserve thread register.
- blocked_registers[TR] = true;
+ blocked_core_registers_[TR] = true;
// Reserve temp register.
- blocked_registers[IP] = true;
+ blocked_core_registers_[IP] = true;
// TODO: We currently don't use Quick's callee saved registers.
// We always save and restore R6 and R7 to make sure we can use three
// register pairs for long operations.
- blocked_registers[R5] = true;
- blocked_registers[R8] = true;
- blocked_registers[R10] = true;
- blocked_registers[R11] = true;
-
- blocked_fpu_registers[D8] = true;
- blocked_fpu_registers[D9] = true;
- blocked_fpu_registers[D10] = true;
- blocked_fpu_registers[D11] = true;
- blocked_fpu_registers[D12] = true;
- blocked_fpu_registers[D13] = true;
- blocked_fpu_registers[D14] = true;
- blocked_fpu_registers[D15] = true;
-}
+ blocked_core_registers_[R5] = true;
+ blocked_core_registers_[R8] = true;
+ blocked_core_registers_[R10] = true;
+ blocked_core_registers_[R11] = true;
-size_t CodeGeneratorARM::GetNumberOfRegisters() const {
- return kNumberOfRegIds;
+ blocked_fpu_registers_[D8] = true;
+ blocked_fpu_registers_[D9] = true;
+ blocked_fpu_registers_[D10] = true;
+ blocked_fpu_registers_[D11] = true;
+ blocked_fpu_registers_[D12] = true;
+ blocked_fpu_registers_[D13] = true;
+ blocked_fpu_registers_[D14] = true;
+ blocked_fpu_registers_[D15] = true;
}
InstructionCodeGeneratorARM::InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen)
@@ -339,7 +321,7 @@ void CodeGeneratorARM::GenerateFrameEntry() {
__ b(slow_path->GetEntryLabel(), CC);
} else {
__ AddConstant(IP, SP, -static_cast<int32_t>(GetStackOverflowReservedBytes(kArm)));
- __ ldr(IP, Address(IP, 0));
+ __ LoadFromOffset(kLoadWord, IP, IP, 0);
RecordPcInfo(nullptr, 0);
}
}
@@ -349,7 +331,7 @@ void CodeGeneratorARM::GenerateFrameEntry() {
// The return PC has already been pushed on the stack.
__ AddConstant(SP, -(GetFrameSize() - kNumberOfPushedRegistersAtEntry * kArmWordSize));
- __ str(R0, Address(SP, 0));
+ __ StoreToOffset(kStoreWord, R0, SP, 0);
}
void CodeGeneratorARM::GenerateFrameExit() {
@@ -434,7 +416,7 @@ void CodeGeneratorARM::Move32(Location destination, Location source) {
} else if (source.IsFpuRegister()) {
__ vmovrs(destination.As<Register>(), FromDToLowS(source.As<DRegister>()));
} else {
- __ ldr(destination.As<Register>(), Address(SP, source.GetStackIndex()));
+ __ LoadFromOffset(kLoadWord, destination.As<Register>(), SP, source.GetStackIndex());
}
} else if (destination.IsFpuRegister()) {
if (source.IsRegister()) {
@@ -447,13 +429,13 @@ void CodeGeneratorARM::Move32(Location destination, Location source) {
} else {
DCHECK(destination.IsStackSlot());
if (source.IsRegister()) {
- __ str(source.As<Register>(), Address(SP, destination.GetStackIndex()));
+ __ StoreToOffset(kStoreWord, source.As<Register>(), SP, destination.GetStackIndex());
} else if (source.IsFpuRegister()) {
__ vstrs(FromDToLowS(source.As<DRegister>()), Address(SP, destination.GetStackIndex()));
} else {
DCHECK(source.IsStackSlot());
- __ ldr(IP, Address(SP, source.GetStackIndex()));
- __ str(IP, Address(SP, destination.GetStackIndex()));
+ __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
+ __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
}
}
}
@@ -473,14 +455,14 @@ void CodeGeneratorARM::Move64(Location destination, Location source) {
InvokeDexCallingConvention calling_convention;
__ Mov(destination.AsRegisterPairLow<Register>(),
calling_convention.GetRegisterAt(argument_index));
- __ ldr(destination.AsRegisterPairHigh<Register>(),
- Address(SP, calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize()));
+ __ LoadFromOffset(kLoadWord, destination.AsRegisterPairHigh<Register>(),
+ SP, calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize());
} else {
DCHECK(source.IsDoubleStackSlot());
if (destination.AsRegisterPairLow<Register>() == R1) {
DCHECK_EQ(destination.AsRegisterPairHigh<Register>(), R2);
- __ ldr(R1, Address(SP, source.GetStackIndex()));
- __ ldr(R2, Address(SP, source.GetHighStackIndex(kArmWordSize)));
+ __ LoadFromOffset(kLoadWord, R1, SP, source.GetStackIndex());
+ __ LoadFromOffset(kLoadWord, R2, SP, source.GetHighStackIndex(kArmWordSize));
} else {
__ LoadFromOffset(kLoadWordPair, destination.AsRegisterPairLow<Register>(),
SP, source.GetStackIndex());
@@ -496,24 +478,25 @@ void CodeGeneratorARM::Move64(Location destination, Location source) {
InvokeDexCallingConvention calling_convention;
uint32_t argument_index = destination.GetQuickParameterIndex();
if (source.IsRegisterPair()) {
- __ Mov(calling_convention.GetRegisterAt(argument_index), source.AsRegisterPairLow<Register>());
- __ str(source.AsRegisterPairHigh<Register>(),
- Address(SP, calling_convention.GetStackOffsetOf(argument_index + 1)));
+ __ Mov(calling_convention.GetRegisterAt(argument_index),
+ source.AsRegisterPairLow<Register>());
+ __ StoreToOffset(kStoreWord, source.AsRegisterPairHigh<Register>(),
+ SP, calling_convention.GetStackOffsetOf(argument_index + 1));
} else if (source.IsFpuRegister()) {
LOG(FATAL) << "Unimplemented";
} else {
DCHECK(source.IsDoubleStackSlot());
- __ ldr(calling_convention.GetRegisterAt(argument_index), Address(SP, source.GetStackIndex()));
- __ ldr(R0, Address(SP, source.GetHighStackIndex(kArmWordSize)));
- __ str(R0, Address(SP, calling_convention.GetStackOffsetOf(argument_index + 1)));
+ __ LoadFromOffset(kLoadWord, calling_convention.GetRegisterAt(argument_index), SP, source.GetStackIndex());
+ __ LoadFromOffset(kLoadWord, R0, SP, source.GetHighStackIndex(kArmWordSize));
+ __ StoreToOffset(kStoreWord, R0, SP, calling_convention.GetStackOffsetOf(argument_index + 1));
}
} else {
DCHECK(destination.IsDoubleStackSlot());
if (source.IsRegisterPair()) {
if (source.AsRegisterPairLow<Register>() == R1) {
DCHECK_EQ(source.AsRegisterPairHigh<Register>(), R2);
- __ str(R1, Address(SP, destination.GetStackIndex()));
- __ str(R2, Address(SP, destination.GetHighStackIndex(kArmWordSize)));
+ __ StoreToOffset(kStoreWord, R1, SP, destination.GetStackIndex());
+ __ StoreToOffset(kStoreWord, R2, SP, destination.GetHighStackIndex(kArmWordSize));
} else {
__ StoreToOffset(kStoreWordPair, source.AsRegisterPairLow<Register>(),
SP, destination.GetStackIndex());
@@ -521,19 +504,19 @@ void CodeGeneratorARM::Move64(Location destination, Location source) {
} else if (source.IsQuickParameter()) {
InvokeDexCallingConvention calling_convention;
uint32_t argument_index = source.GetQuickParameterIndex();
- __ str(calling_convention.GetRegisterAt(argument_index),
- Address(SP, destination.GetStackIndex()));
- __ ldr(R0,
- Address(SP, calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize()));
- __ str(R0, Address(SP, destination.GetHighStackIndex(kArmWordSize)));
+ __ StoreToOffset(kStoreWord, calling_convention.GetRegisterAt(argument_index),
+ SP, destination.GetStackIndex());
+ __ LoadFromOffset(kLoadWord, R0,
+ SP, calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize());
+ __ StoreToOffset(kStoreWord, R0, SP, destination.GetHighStackIndex(kArmWordSize));
} else if (source.IsFpuRegister()) {
__ vstrd(source.As<DRegister>(), Address(SP, destination.GetStackIndex()));
} else {
DCHECK(source.IsDoubleStackSlot());
- __ ldr(IP, Address(SP, source.GetStackIndex()));
- __ str(IP, Address(SP, destination.GetStackIndex()));
- __ ldr(IP, Address(SP, source.GetHighStackIndex(kArmWordSize)));
- __ str(IP, Address(SP, destination.GetHighStackIndex(kArmWordSize)));
+ __ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
+ __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
+ __ LoadFromOffset(kLoadWord, IP, SP, source.GetHighStackIndex(kArmWordSize));
+ __ StoreToOffset(kStoreWord, IP, SP, destination.GetHighStackIndex(kArmWordSize));
}
}
}
@@ -544,16 +527,16 @@ void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstr
return;
}
- if (instruction->AsIntConstant() != nullptr) {
+ if (instruction->IsIntConstant()) {
int32_t value = instruction->AsIntConstant()->GetValue();
if (location.IsRegister()) {
__ LoadImmediate(location.As<Register>(), value);
} else {
DCHECK(location.IsStackSlot());
__ LoadImmediate(IP, value);
- __ str(IP, Address(SP, location.GetStackIndex()));
+ __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
}
- } else if (instruction->AsLongConstant() != nullptr) {
+ } else if (instruction->IsLongConstant()) {
int64_t value = instruction->AsLongConstant()->GetValue();
if (location.IsRegisterPair()) {
__ LoadImmediate(location.AsRegisterPairLow<Register>(), Low32Bits(value));
@@ -561,11 +544,11 @@ void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstr
} else {
DCHECK(location.IsDoubleStackSlot());
__ LoadImmediate(IP, Low32Bits(value));
- __ str(IP, Address(SP, location.GetStackIndex()));
+ __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
__ LoadImmediate(IP, High32Bits(value));
- __ str(IP, Address(SP, location.GetHighStackIndex(kArmWordSize)));
+ __ StoreToOffset(kStoreWord, IP, SP, location.GetHighStackIndex(kArmWordSize));
}
- } else if (instruction->AsLoadLocal() != nullptr) {
+ } else if (instruction->IsLoadLocal()) {
uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
switch (instruction->GetType()) {
case Primitive::kPrimBoolean:
@@ -902,7 +885,7 @@ void LocationsBuilderARM::VisitInvokeStatic(HInvokeStatic* invoke) {
}
void InstructionCodeGeneratorARM::LoadCurrentMethod(Register reg) {
- __ ldr(reg, Address(SP, kCurrentMethodStackOffset));
+ __ LoadFromOffset(kLoadWord, reg, SP, kCurrentMethodStackOffset);
}
void InstructionCodeGeneratorARM::VisitInvokeStatic(HInvokeStatic* invoke) {
@@ -921,12 +904,12 @@ void InstructionCodeGeneratorARM::VisitInvokeStatic(HInvokeStatic* invoke) {
// temp = method;
LoadCurrentMethod(temp);
// temp = temp->dex_cache_resolved_methods_;
- __ ldr(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
+ __ LoadFromOffset(kLoadWord, temp, temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
// temp = temp[index_in_cache]
- __ ldr(temp, Address(temp, index_in_cache));
+ __ LoadFromOffset(kLoadWord, temp, temp, index_in_cache);
// LR = temp[offset_of_quick_compiled_code]
- __ ldr(LR, Address(temp,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+ __ LoadFromOffset(kLoadWord, LR, temp,
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
// LR()
__ blx(LR);
@@ -980,16 +963,16 @@ void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
// temp = object->GetClass();
if (receiver.IsStackSlot()) {
- __ ldr(temp, Address(SP, receiver.GetStackIndex()));
- __ ldr(temp, Address(temp, class_offset));
+ __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
+ __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
} else {
- __ ldr(temp, Address(receiver.As<Register>(), class_offset));
+ __ LoadFromOffset(kLoadWord, temp, receiver.As<Register>(), class_offset);
}
// temp = temp->GetMethodAt(method_offset);
uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value();
- __ ldr(temp, Address(temp, method_offset));
+ __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
// LR = temp->GetEntryPoint();
- __ ldr(LR, Address(temp, entry_point));
+ __ LoadFromOffset(kLoadWord, LR, temp, entry_point);
// LR();
__ blx(LR);
DCHECK(!codegen_->IsLeafMethod());
@@ -1139,7 +1122,7 @@ void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) {
__ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
int32_t offset = QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocObjectWithAccessCheck).Int32Value();
- __ ldr(LR, Address(TR, offset));
+ __ LoadFromOffset(kLoadWord, LR, TR, offset);
__ blx(LR);
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
@@ -1288,9 +1271,10 @@ void InstructionCodeGeneratorARM::VisitInstanceFieldSet(HInstanceFieldSet* instr
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Unimplemented register type " << field_type;
-
+ UNREACHABLE();
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << field_type;
+ UNREACHABLE();
}
}
@@ -1348,9 +1332,10 @@ void InstructionCodeGeneratorARM::VisitInstanceFieldGet(HInstanceFieldGet* instr
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
-
+ UNREACHABLE();
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
}
}
@@ -1478,9 +1463,10 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
-
+ UNREACHABLE();
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
}
}
@@ -1552,7 +1538,7 @@ void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimNot: {
int32_t offset = QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAputObject).Int32Value();
- __ ldr(LR, Address(TR, offset));
+ __ LoadFromOffset(kLoadWord, LR, TR, offset);
__ blx(LR);
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
DCHECK(!codegen_->IsLeafMethod());
@@ -1575,9 +1561,10 @@ void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
-
+ UNREACHABLE();
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
}
}
@@ -1706,14 +1693,14 @@ void ParallelMoveResolverARM::EmitMove(size_t index) {
}
} else {
DCHECK(source.IsConstant());
- DCHECK(source.GetConstant()->AsIntConstant() != nullptr);
+ DCHECK(source.GetConstant()->IsIntConstant());
int32_t value = source.GetConstant()->AsIntConstant()->GetValue();
if (destination.IsRegister()) {
__ LoadImmediate(destination.As<Register>(), value);
} else {
DCHECK(destination.IsStackSlot());
__ LoadImmediate(IP, value);
- __ str(IP, Address(SP, destination.GetStackIndex()));
+ __ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
}
}
}
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 9da26e8767..874db0fd54 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -91,7 +91,7 @@ class LocationsBuilderARM : public HGraphVisitor {
LocationsBuilderARM(HGraph* graph, CodeGeneratorARM* codegen)
: HGraphVisitor(graph), codegen_(codegen) {}
-#define DECLARE_VISIT_INSTRUCTION(name) \
+#define DECLARE_VISIT_INSTRUCTION(name, super) \
virtual void Visit##name(H##name* instr);
FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
@@ -111,7 +111,7 @@ class InstructionCodeGeneratorARM : public HGraphVisitor {
public:
InstructionCodeGeneratorARM(HGraph* graph, CodeGeneratorARM* codegen);
-#define DECLARE_VISIT_INSTRUCTION(name) \
+#define DECLARE_VISIT_INSTRUCTION(name, super) \
virtual void Visit##name(H##name* instr);
FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
@@ -163,21 +163,11 @@ class CodeGeneratorARM : public CodeGenerator {
return &assembler_;
}
- virtual void SetupBlockedRegisters(bool* blocked_registers) const OVERRIDE;
- virtual Location AllocateFreeRegister(
- Primitive::Type type, bool* blocked_registers) const OVERRIDE;
- virtual size_t GetNumberOfRegisters() const OVERRIDE;
+ virtual void SetupBlockedRegisters() const OVERRIDE;
+ virtual Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
- virtual size_t GetNumberOfCoreRegisters() const OVERRIDE {
- return kNumberOfCoreRegisters;
- }
-
- virtual size_t GetNumberOfFloatingPointRegisters() const OVERRIDE {
- return kNumberOfDRegisters;
- }
-
virtual void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 5f6d45845f..34fa46efd0 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -176,7 +176,7 @@ void CodeGeneratorX86::RestoreCoreRegister(Location stack_location, uint32_t reg
}
CodeGeneratorX86::CodeGeneratorX86(HGraph* graph)
- : CodeGenerator(graph, kNumberOfRegIds),
+ : CodeGenerator(graph, kNumberOfCpuRegisters, kNumberOfXmmRegisters, kNumberOfRegisterPairs),
location_builder_(graph, this),
instruction_visitor_(graph, this),
move_resolver_(graph->GetArena(), this) {}
@@ -185,23 +185,14 @@ size_t CodeGeneratorX86::FrameEntrySpillSize() const {
return kNumberOfPushedRegistersAtEntry * kX86WordSize;
}
-static bool* GetBlockedRegisterPairs(bool* blocked_registers) {
- return blocked_registers + kNumberOfAllocIds;
-}
-
-static bool* GetBlockedXmmRegisters(bool* blocked_registers) {
- return blocked_registers + kNumberOfCpuRegisters;
-}
-
-Location CodeGeneratorX86::AllocateFreeRegister(Primitive::Type type, bool* blocked_registers) const {
+Location CodeGeneratorX86::AllocateFreeRegister(Primitive::Type type) const {
switch (type) {
case Primitive::kPrimLong: {
- bool* blocked_register_pairs = GetBlockedRegisterPairs(blocked_registers);
- size_t reg = AllocateFreeRegisterInternal(blocked_register_pairs, kNumberOfRegisterPairs);
+ size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs);
X86ManagedRegister pair =
X86ManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
- blocked_registers[pair.AsRegisterPairLow()] = true;
- blocked_registers[pair.AsRegisterPairHigh()] = true;
+ blocked_core_registers_[pair.AsRegisterPairLow()] = true;
+ blocked_core_registers_[pair.AsRegisterPairHigh()] = true;
// Block all other register pairs that share a register with `pair`.
for (int i = 0; i < kNumberOfRegisterPairs; i++) {
X86ManagedRegister current =
@@ -210,7 +201,7 @@ Location CodeGeneratorX86::AllocateFreeRegister(Primitive::Type type, bool* bloc
|| current.AsRegisterPairLow() == pair.AsRegisterPairHigh()
|| current.AsRegisterPairHigh() == pair.AsRegisterPairLow()
|| current.AsRegisterPairHigh() == pair.AsRegisterPairHigh()) {
- blocked_register_pairs[i] = true;
+ blocked_register_pairs_[i] = true;
}
}
return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
@@ -223,14 +214,13 @@ Location CodeGeneratorX86::AllocateFreeRegister(Primitive::Type type, bool* bloc
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
Register reg = static_cast<Register>(
- AllocateFreeRegisterInternal(blocked_registers, kNumberOfCpuRegisters));
+ FindFreeEntry(blocked_core_registers_, kNumberOfCpuRegisters));
// Block all register pairs that contain `reg`.
- bool* blocked_register_pairs = GetBlockedRegisterPairs(blocked_registers);
for (int i = 0; i < kNumberOfRegisterPairs; i++) {
X86ManagedRegister current =
X86ManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
if (current.AsRegisterPairLow() == reg || current.AsRegisterPairHigh() == reg) {
- blocked_register_pairs[i] = true;
+ blocked_register_pairs_[i] = true;
}
}
return Location::RegisterLocation(reg);
@@ -238,8 +228,8 @@ Location CodeGeneratorX86::AllocateFreeRegister(Primitive::Type type, bool* bloc
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
- return Location::FpuRegisterLocation(AllocateFreeRegisterInternal(
- GetBlockedXmmRegisters(blocked_registers), kNumberOfXmmRegisters));
+ return Location::FpuRegisterLocation(
+ FindFreeEntry(blocked_fpu_registers_, kNumberOfXmmRegisters));
}
case Primitive::kPrimVoid:
@@ -249,27 +239,21 @@ Location CodeGeneratorX86::AllocateFreeRegister(Primitive::Type type, bool* bloc
return Location();
}
-void CodeGeneratorX86::SetupBlockedRegisters(bool* blocked_registers) const {
- bool* blocked_register_pairs = GetBlockedRegisterPairs(blocked_registers);
-
+void CodeGeneratorX86::SetupBlockedRegisters() const {
// Don't allocate the dalvik style register pair passing.
- blocked_register_pairs[ECX_EDX] = true;
+ blocked_register_pairs_[ECX_EDX] = true;
// Stack register is always reserved.
- blocked_registers[ESP] = true;
+ blocked_core_registers_[ESP] = true;
// TODO: We currently don't use Quick's callee saved registers.
- blocked_registers[EBP] = true;
- blocked_registers[ESI] = true;
- blocked_registers[EDI] = true;
- blocked_register_pairs[EAX_EDI] = true;
- blocked_register_pairs[EDX_EDI] = true;
- blocked_register_pairs[ECX_EDI] = true;
- blocked_register_pairs[EBX_EDI] = true;
-}
-
-size_t CodeGeneratorX86::GetNumberOfRegisters() const {
- return kNumberOfRegIds;
+ blocked_core_registers_[EBP] = true;
+ blocked_core_registers_[ESI] = true;
+ blocked_core_registers_[EDI] = true;
+ blocked_register_pairs_[EAX_EDI] = true;
+ blocked_register_pairs_[EDX_EDI] = true;
+ blocked_register_pairs_[ECX_EDI] = true;
+ blocked_register_pairs_[EBX_EDI] = true;
}
InstructionCodeGeneratorX86::InstructionCodeGeneratorX86(HGraph* graph, CodeGeneratorX86* codegen)
@@ -484,14 +468,14 @@ void CodeGeneratorX86::Move64(Location destination, Location source) {
}
void CodeGeneratorX86::Move(HInstruction* instruction, Location location, HInstruction* move_for) {
- if (instruction->AsIntConstant() != nullptr) {
+ if (instruction->IsIntConstant()) {
Immediate imm(instruction->AsIntConstant()->GetValue());
if (location.IsRegister()) {
__ movl(location.As<Register>(), imm);
} else {
__ movl(Address(ESP, location.GetStackIndex()), imm);
}
- } else if (instruction->AsLongConstant() != nullptr) {
+ } else if (instruction->IsLongConstant()) {
int64_t value = instruction->AsLongConstant()->GetValue();
if (location.IsRegister()) {
__ movl(location.AsRegisterPairLow<Register>(), Immediate(Low32Bits(value)));
@@ -500,7 +484,7 @@ void CodeGeneratorX86::Move(HInstruction* instruction, Location location, HInstr
__ movl(Address(ESP, location.GetStackIndex()), Immediate(Low32Bits(value)));
__ movl(Address(ESP, location.GetHighStackIndex(kX86WordSize)), Immediate(High32Bits(value)));
}
- } else if (instruction->AsLoadLocal() != nullptr) {
+ } else if (instruction->IsLoadLocal()) {
int slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
switch (instruction->GetType()) {
case Primitive::kPrimBoolean:
@@ -593,10 +577,13 @@ void LocationsBuilderX86::VisitIf(HIf* if_instr) {
void InstructionCodeGeneratorX86::VisitIf(HIf* if_instr) {
HInstruction* cond = if_instr->InputAt(0);
- if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
- // Moves do not affect the eflags register, so if the condition is evaluated
- // just before the if, we don't need to evaluate it again.
- if (!cond->IsCondition() || !cond->AsCondition()->IsBeforeWhenDisregardMoves(if_instr)) {
+ bool materialized = !cond->IsCondition() || cond->AsCondition()->NeedsMaterialization();
+ // Moves do not affect the eflags register, so if the condition is evaluated
+ // just before the if, we don't need to evaluate it again.
+ bool eflags_set = cond->IsCondition()
+ && cond->AsCondition()->IsBeforeWhenDisregardMoves(if_instr);
+ if (materialized) {
+ if (!eflags_set) {
// Materialized condition, compare against 0.
Location lhs = if_instr->GetLocations()->InAt(0);
if (lhs.IsRegister()) {
@@ -604,8 +591,11 @@ void InstructionCodeGeneratorX86::VisitIf(HIf* if_instr) {
} else {
__ cmpl(Address(ESP, lhs.GetStackIndex()), Immediate(0));
}
+ __ j(kNotEqual, codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
+ } else {
+ __ j(X86Condition(cond->AsCondition()->GetCondition()),
+ codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
}
- __ j(kNotEqual, codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
} else {
Location lhs = cond->GetLocations()->InAt(0);
Location rhs = cond->GetLocations()->InAt(1);
@@ -1276,9 +1266,10 @@ void InstructionCodeGeneratorX86::VisitInstanceFieldSet(HInstanceFieldSet* instr
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Unimplemented register type " << field_type;
-
+ UNREACHABLE();
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << field_type;
+ UNREACHABLE();
}
}
@@ -1348,9 +1339,10 @@ void InstructionCodeGeneratorX86::VisitInstanceFieldGet(HInstanceFieldGet* instr
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
-
+ UNREACHABLE();
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
}
}
@@ -1478,9 +1470,10 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
-
+ UNREACHABLE();
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
}
}
@@ -1631,9 +1624,10 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
-
+ UNREACHABLE();
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
}
}
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index c52016478a..a1a72a2bd7 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -92,7 +92,7 @@ class LocationsBuilderX86 : public HGraphVisitor {
LocationsBuilderX86(HGraph* graph, CodeGeneratorX86* codegen)
: HGraphVisitor(graph), codegen_(codegen) {}
-#define DECLARE_VISIT_INSTRUCTION(name) \
+#define DECLARE_VISIT_INSTRUCTION(name, super) \
virtual void Visit##name(H##name* instr);
FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
@@ -112,7 +112,7 @@ class InstructionCodeGeneratorX86 : public HGraphVisitor {
public:
InstructionCodeGeneratorX86(HGraph* graph, CodeGeneratorX86* codegen);
-#define DECLARE_VISIT_INSTRUCTION(name) \
+#define DECLARE_VISIT_INSTRUCTION(name, super) \
virtual void Visit##name(H##name* instr);
FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
@@ -165,21 +165,11 @@ class CodeGeneratorX86 : public CodeGenerator {
return &assembler_;
}
- virtual size_t GetNumberOfRegisters() const OVERRIDE;
- virtual void SetupBlockedRegisters(bool* blocked_registers) const OVERRIDE;
- virtual Location AllocateFreeRegister(
- Primitive::Type type, bool* blocked_registers) const OVERRIDE;
+ virtual void SetupBlockedRegisters() const OVERRIDE;
+ virtual Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
- virtual size_t GetNumberOfCoreRegisters() const OVERRIDE {
- return kNumberOfCpuRegisters;
- }
-
- virtual size_t GetNumberOfFloatingPointRegisters() const OVERRIDE {
- return kNumberOfXmmRegisters;
- }
-
virtual void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 393eb1a2d4..059140d9bf 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -185,7 +185,7 @@ void CodeGeneratorX86_64::RestoreCoreRegister(Location stack_location, uint32_t
}
CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph)
- : CodeGenerator(graph, kNumberOfRegIds),
+ : CodeGenerator(graph, kNumberOfCpuRegisters, kNumberOfFloatRegisters, 0),
location_builder_(graph, this),
instruction_visitor_(graph, this),
move_resolver_(graph->GetArena(), this) {}
@@ -200,8 +200,7 @@ InstructionCodeGeneratorX86_64::InstructionCodeGeneratorX86_64(HGraph* graph,
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
-Location CodeGeneratorX86_64::AllocateFreeRegister(Primitive::Type type,
- bool* blocked_registers) const {
+Location CodeGeneratorX86_64::AllocateFreeRegister(Primitive::Type type) const {
switch (type) {
case Primitive::kPrimLong:
case Primitive::kPrimByte:
@@ -210,14 +209,13 @@ Location CodeGeneratorX86_64::AllocateFreeRegister(Primitive::Type type,
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- size_t reg = AllocateFreeRegisterInternal(blocked_registers, kNumberOfCpuRegisters);
+ size_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfCpuRegisters);
return Location::RegisterLocation(reg);
}
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
- size_t reg = AllocateFreeRegisterInternal(
- blocked_registers + kNumberOfCpuRegisters, kNumberOfFloatRegisters);
+ size_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfFloatRegisters);
return Location::FpuRegisterLocation(reg);
}
@@ -228,26 +226,25 @@ Location CodeGeneratorX86_64::AllocateFreeRegister(Primitive::Type type,
return Location();
}
-void CodeGeneratorX86_64::SetupBlockedRegisters(bool* blocked_registers) const {
+void CodeGeneratorX86_64::SetupBlockedRegisters() const {
// Stack register is always reserved.
- blocked_registers[RSP] = true;
+ blocked_core_registers_[RSP] = true;
// Block the register used as TMP.
- blocked_registers[TMP] = true;
+ blocked_core_registers_[TMP] = true;
// TODO: We currently don't use Quick's callee saved registers.
- blocked_registers[RBX] = true;
- blocked_registers[RBP] = true;
- blocked_registers[R12] = true;
- blocked_registers[R13] = true;
- blocked_registers[R14] = true;
- blocked_registers[R15] = true;
+ blocked_core_registers_[RBX] = true;
+ blocked_core_registers_[RBP] = true;
+ blocked_core_registers_[R12] = true;
+ blocked_core_registers_[R13] = true;
+ blocked_core_registers_[R14] = true;
+ blocked_core_registers_[R15] = true;
- bool* blocked_xmm_registers = blocked_registers + kNumberOfCpuRegisters;
- blocked_xmm_registers[XMM12] = true;
- blocked_xmm_registers[XMM13] = true;
- blocked_xmm_registers[XMM14] = true;
- blocked_xmm_registers[XMM15] = true;
+ blocked_fpu_registers_[XMM12] = true;
+ blocked_fpu_registers_[XMM13] = true;
+ blocked_fpu_registers_[XMM14] = true;
+ blocked_fpu_registers_[XMM15] = true;
}
void CodeGeneratorX86_64::GenerateFrameEntry() {
@@ -378,14 +375,14 @@ void CodeGeneratorX86_64::Move(Location destination, Location source) {
void CodeGeneratorX86_64::Move(HInstruction* instruction,
Location location,
HInstruction* move_for) {
- if (instruction->AsIntConstant() != nullptr) {
+ if (instruction->IsIntConstant()) {
Immediate imm(instruction->AsIntConstant()->GetValue());
if (location.IsRegister()) {
__ movl(location.As<CpuRegister>(), imm);
} else {
__ movl(Address(CpuRegister(RSP), location.GetStackIndex()), imm);
}
- } else if (instruction->AsLongConstant() != nullptr) {
+ } else if (instruction->IsLongConstant()) {
int64_t value = instruction->AsLongConstant()->GetValue();
if (location.IsRegister()) {
__ movq(location.As<CpuRegister>(), Immediate(value));
@@ -393,7 +390,7 @@ void CodeGeneratorX86_64::Move(HInstruction* instruction,
__ movq(CpuRegister(TMP), Immediate(value));
__ movq(Address(CpuRegister(RSP), location.GetStackIndex()), CpuRegister(TMP));
}
- } else if (instruction->AsLoadLocal() != nullptr) {
+ } else if (instruction->IsLoadLocal()) {
switch (instruction->GetType()) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
@@ -482,10 +479,13 @@ void LocationsBuilderX86_64::VisitIf(HIf* if_instr) {
void InstructionCodeGeneratorX86_64::VisitIf(HIf* if_instr) {
HInstruction* cond = if_instr->InputAt(0);
- if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
- // Moves do not affect the eflags register, so if the condition is evaluated
- // just before the if, we don't need to evaluate it again.
- if (!cond->IsCondition() || !cond->AsCondition()->IsBeforeWhenDisregardMoves(if_instr)) {
+ bool materialized = !cond->IsCondition() || cond->AsCondition()->NeedsMaterialization();
+ // Moves do not affect the eflags register, so if the condition is evaluated
+ // just before the if, we don't need to evaluate it again.
+ bool eflags_set = cond->IsCondition()
+ && cond->AsCondition()->IsBeforeWhenDisregardMoves(if_instr);
+ if (materialized) {
+ if (!eflags_set) {
// Materialized condition, compare against 0.
Location lhs = if_instr->GetLocations()->InAt(0);
if (lhs.IsRegister()) {
@@ -493,8 +493,11 @@ void InstructionCodeGeneratorX86_64::VisitIf(HIf* if_instr) {
} else {
__ cmpl(Address(CpuRegister(RSP), lhs.GetStackIndex()), Immediate(0));
}
+ __ j(kNotEqual, codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
+ } else {
+ __ j(X86_64Condition(cond->AsCondition()->GetCondition()),
+ codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
}
- __ j(kNotEqual, codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
} else {
Location lhs = cond->GetLocations()->InAt(0);
Location rhs = cond->GetLocations()->InAt(1);
@@ -574,13 +577,13 @@ void InstructionCodeGeneratorX86_64::VisitCondition(HCondition* comp) {
// Clear register: setcc only sets the low byte.
__ xorq(reg, reg);
if (locations->InAt(1).IsRegister()) {
- __ cmpq(locations->InAt(0).As<CpuRegister>(),
+ __ cmpl(locations->InAt(0).As<CpuRegister>(),
locations->InAt(1).As<CpuRegister>());
} else if (locations->InAt(1).IsConstant()) {
- __ cmpq(locations->InAt(0).As<CpuRegister>(),
+ __ cmpl(locations->InAt(0).As<CpuRegister>(),
Immediate(locations->InAt(1).GetConstant()->AsIntConstant()->GetValue()));
} else {
- __ cmpq(locations->InAt(0).As<CpuRegister>(),
+ __ cmpl(locations->InAt(0).As<CpuRegister>(),
Address(CpuRegister(RSP), locations->InAt(1).GetStackIndex()));
}
__ setcc(X86_64Condition(comp->GetCondition()), reg);
@@ -879,10 +882,10 @@ void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke)
size_t class_offset = mirror::Object::ClassOffset().SizeValue();
// temp = object->GetClass();
if (receiver.IsStackSlot()) {
- __ movq(temp, Address(CpuRegister(RSP), receiver.GetStackIndex()));
- __ movq(temp, Address(temp, class_offset));
+ __ movl(temp, Address(CpuRegister(RSP), receiver.GetStackIndex()));
+ __ movl(temp, Address(temp, class_offset));
} else {
- __ movq(temp, Address(receiver.As<CpuRegister>(), class_offset));
+ __ movl(temp, Address(receiver.As<CpuRegister>(), class_offset));
}
// temp = temp->GetMethodAt(method_offset);
__ movl(temp, Address(temp, method_offset));
@@ -1158,9 +1161,10 @@ void InstructionCodeGeneratorX86_64::VisitInstanceFieldSet(HInstanceFieldSet* in
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Unimplemented register type " << field_type;
-
+ UNREACHABLE();
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << field_type;
+ UNREACHABLE();
}
}
@@ -1212,9 +1216,10 @@ void InstructionCodeGeneratorX86_64::VisitInstanceFieldGet(HInstanceFieldGet* in
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
-
+ UNREACHABLE();
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
}
}
@@ -1339,9 +1344,10 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
-
+ UNREACHABLE();
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
}
}
@@ -1465,9 +1471,10 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
-
+ UNREACHABLE();
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
+ UNREACHABLE();
}
}
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index bdaf15f9fd..288f3f61f9 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -94,7 +94,7 @@ class LocationsBuilderX86_64 : public HGraphVisitor {
LocationsBuilderX86_64(HGraph* graph, CodeGeneratorX86_64* codegen)
: HGraphVisitor(graph), codegen_(codegen) {}
-#define DECLARE_VISIT_INSTRUCTION(name) \
+#define DECLARE_VISIT_INSTRUCTION(name, super) \
virtual void Visit##name(H##name* instr);
FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
@@ -114,7 +114,7 @@ class InstructionCodeGeneratorX86_64 : public HGraphVisitor {
public:
InstructionCodeGeneratorX86_64(HGraph* graph, CodeGeneratorX86_64* codegen);
-#define DECLARE_VISIT_INSTRUCTION(name) \
+#define DECLARE_VISIT_INSTRUCTION(name, super) \
virtual void Visit##name(H##name* instr);
FOR_EACH_CONCRETE_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
@@ -173,21 +173,8 @@ class CodeGeneratorX86_64 : public CodeGenerator {
virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
- virtual size_t GetNumberOfRegisters() const OVERRIDE {
- return kNumberOfRegIds;
- }
-
- virtual size_t GetNumberOfCoreRegisters() const OVERRIDE {
- return kNumberOfCpuRegisters;
- }
-
- virtual size_t GetNumberOfFloatingPointRegisters() const OVERRIDE {
- return kNumberOfFloatRegisters;
- }
-
- virtual void SetupBlockedRegisters(bool* blocked_registers) const OVERRIDE;
- virtual Location AllocateFreeRegister(
- Primitive::Type type, bool* blocked_registers) const OVERRIDE;
+ virtual void SetupBlockedRegisters() const OVERRIDE;
+ virtual Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
virtual void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 7161eed9f9..3037f1c2e8 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include <functional>
+
#include "builder.h"
#include "code_generator_arm.h"
#include "code_generator_x86.h"
@@ -24,6 +26,9 @@
#include "instruction_set.h"
#include "nodes.h"
#include "optimizing_unit_test.h"
+#include "prepare_for_register_allocation.h"
+#include "register_allocator.h"
+#include "ssa_liveness_analysis.h"
#include "gtest/gtest.h"
@@ -62,19 +67,11 @@ static void Run(const InternalCodeAllocator& allocator,
}
int32_t result = f();
if (has_result) {
- CHECK_EQ(result, expected);
+ ASSERT_EQ(result, expected);
}
}
-static void TestCode(const uint16_t* data, bool has_result = false, int32_t expected = 0) {
- ArenaPool pool;
- ArenaAllocator arena(&pool);
- HGraphBuilder builder(&arena);
- const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
- HGraph* graph = builder.BuildGraph(*item);
- // Remove suspend checks, they cannot be executed in this context.
- RemoveSuspendChecks(graph);
- ASSERT_NE(graph, nullptr);
+static void RunCodeBaseline(HGraph* graph, bool has_result, int32_t expected) {
InternalCodeAllocator allocator;
x86::CodeGeneratorX86 codegenX86(graph);
@@ -98,6 +95,51 @@ static void TestCode(const uint16_t* data, bool has_result = false, int32_t expe
}
}
+static void RunCodeOptimized(CodeGenerator* codegen,
+ HGraph* graph,
+ std::function<void(HGraph*)> hook_before_codegen,
+ bool has_result,
+ int32_t expected) {
+ SsaLivenessAnalysis liveness(*graph, codegen);
+ liveness.Analyze();
+
+ RegisterAllocator register_allocator(graph->GetArena(), codegen, liveness);
+ register_allocator.AllocateRegisters();
+ hook_before_codegen(graph);
+
+ InternalCodeAllocator allocator;
+ codegen->CompileOptimized(&allocator);
+ Run(allocator, *codegen, has_result, expected);
+}
+
+static void RunCodeOptimized(HGraph* graph,
+ std::function<void(HGraph*)> hook_before_codegen,
+ bool has_result,
+ int32_t expected) {
+ if (kRuntimeISA == kX86) {
+ x86::CodeGeneratorX86 codegenX86(graph);
+ RunCodeOptimized(&codegenX86, graph, hook_before_codegen, has_result, expected);
+ } else if (kRuntimeISA == kArm || kRuntimeISA == kThumb2) {
+ arm::CodeGeneratorARM codegenARM(graph);
+ RunCodeOptimized(&codegenARM, graph, hook_before_codegen, has_result, expected);
+ } else if (kRuntimeISA == kX86_64) {
+ x86_64::CodeGeneratorX86_64 codegenX86_64(graph);
+ RunCodeOptimized(&codegenX86_64, graph, hook_before_codegen, has_result, expected);
+ }
+}
+
+static void TestCode(const uint16_t* data, bool has_result = false, int32_t expected = 0) {
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+ HGraphBuilder builder(&arena);
+ const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
+ HGraph* graph = builder.BuildGraph(*item);
+ // Remove suspend checks, they cannot be executed in this context.
+ RemoveSuspendChecks(graph);
+ ASSERT_NE(graph, nullptr);
+ RunCodeBaseline(graph, has_result, expected);
+}
+
TEST(CodegenTest, ReturnVoid) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(Instruction::RETURN_VOID);
TestCode(data);
@@ -256,4 +298,55 @@ TEST(CodegenTest, ReturnAdd4) {
TestCode(data, true, 7);
}
+TEST(CodegenTest, NonMaterializedCondition) {
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+
+ HGraph* graph = new (&allocator) HGraph(&allocator);
+ HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(entry);
+ graph->SetEntryBlock(entry);
+ entry->AddInstruction(new (&allocator) HGoto());
+
+ HBasicBlock* first_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(first_block);
+ entry->AddSuccessor(first_block);
+ HIntConstant* constant0 = new (&allocator) HIntConstant(0);
+ entry->AddInstruction(constant0);
+ HIntConstant* constant1 = new (&allocator) HIntConstant(1);
+ entry->AddInstruction(constant1);
+ HEqual* equal = new (&allocator) HEqual(constant0, constant0);
+ first_block->AddInstruction(equal);
+ first_block->AddInstruction(new (&allocator) HIf(equal));
+
+ HBasicBlock* then = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* else_ = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* exit = new (&allocator) HBasicBlock(graph);
+
+ graph->AddBlock(then);
+ graph->AddBlock(else_);
+ graph->AddBlock(exit);
+ first_block->AddSuccessor(then);
+ first_block->AddSuccessor(else_);
+ then->AddSuccessor(exit);
+ else_->AddSuccessor(exit);
+
+ exit->AddInstruction(new (&allocator) HExit());
+ then->AddInstruction(new (&allocator) HReturn(constant0));
+ else_->AddInstruction(new (&allocator) HReturn(constant1));
+
+ ASSERT_TRUE(equal->NeedsMaterialization());
+ graph->BuildDominatorTree();
+ PrepareForRegisterAllocation(graph).Run();
+ ASSERT_FALSE(equal->NeedsMaterialization());
+
+ auto hook_before_codegen = [](HGraph* graph) {
+ HBasicBlock* block = graph->GetEntryBlock()->GetSuccessors().Get(0);
+ HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+ block->InsertInstructionBefore(move, block->GetLastInstruction());
+ };
+
+ RunCodeOptimized(graph, hook_before_codegen, true, 0);
+}
+
} // namespace art
diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc
index 8be47461b2..d5f4f902c8 100644
--- a/compiler/optimizing/live_ranges_test.cc
+++ b/compiler/optimizing/live_ranges_test.cc
@@ -21,6 +21,7 @@
#include "dex_instruction.h"
#include "nodes.h"
#include "optimizing_unit_test.h"
+#include "prepare_for_register_allocation.h"
#include "ssa_liveness_analysis.h"
#include "utils/arena_allocator.h"
@@ -38,6 +39,8 @@ static HGraph* BuildGraph(const uint16_t* data, ArenaAllocator* allocator) {
graph->BuildDominatorTree();
graph->TransformToSSA();
graph->FindNaturalLoops();
+ // `Inline` conditions into ifs.
+ PrepareForRegisterAllocation(graph).Run();
return graph;
}
@@ -72,7 +75,7 @@ TEST(LiveRangesTest, CFG1) {
// Last use is the return instruction.
ASSERT_EQ(9u, range->GetEnd());
HBasicBlock* block = graph->GetBlocks().Get(1);
- ASSERT_TRUE(block->GetLastInstruction()->AsReturn() != nullptr);
+ ASSERT_TRUE(block->GetLastInstruction()->IsReturn());
ASSERT_EQ(8u, block->GetLastInstruction()->GetLifetimePosition());
ASSERT_TRUE(range->GetNext() == nullptr);
}
@@ -118,7 +121,7 @@ TEST(LiveRangesTest, CFG2) {
// Last use is the return instruction.
ASSERT_EQ(23u, range->GetEnd());
HBasicBlock* block = graph->GetBlocks().Get(3);
- ASSERT_TRUE(block->GetLastInstruction()->AsReturn() != nullptr);
+ ASSERT_TRUE(block->GetLastInstruction()->IsReturn());
ASSERT_EQ(22u, block->GetLastInstruction()->GetLifetimePosition());
ASSERT_TRUE(range->GetNext() == nullptr);
}
diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc
index 2d861696bb..246e7ef309 100644
--- a/compiler/optimizing/liveness_test.cc
+++ b/compiler/optimizing/liveness_test.cc
@@ -21,6 +21,7 @@
#include "dex_instruction.h"
#include "nodes.h"
#include "optimizing_unit_test.h"
+#include "prepare_for_register_allocation.h"
#include "ssa_liveness_analysis.h"
#include "utils/arena_allocator.h"
@@ -50,6 +51,8 @@ static void TestCode(const uint16_t* data, const char* expected) {
graph->BuildDominatorTree();
graph->TransformToSSA();
graph->FindNaturalLoops();
+ // `Inline` conditions into ifs.
+ PrepareForRegisterAllocation(graph).Run();
x86::CodeGeneratorX86 codegen(graph);
SsaLivenessAnalysis liveness(*graph, &codegen);
liveness.Analyze();
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 4cac3198ea..a058dea6b4 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -317,8 +317,8 @@ static void UpdateInputsUsers(HInstruction* instruction) {
}
void HBasicBlock::InsertInstructionBefore(HInstruction* instruction, HInstruction* cursor) {
- DCHECK(cursor->AsPhi() == nullptr);
- DCHECK(instruction->AsPhi() == nullptr);
+ DCHECK(!cursor->IsPhi());
+ DCHECK(!instruction->IsPhi());
DCHECK_EQ(instruction->GetId(), -1);
DCHECK_NE(cursor->GetId(), -1);
DCHECK_EQ(cursor->GetBlock(), this);
@@ -537,7 +537,7 @@ void HPhi::AddInput(HInstruction* input) {
input->AddUseAt(this, inputs_.Size() - 1);
}
-#define DEFINE_ACCEPT(name) \
+#define DEFINE_ACCEPT(name, super) \
void H##name::Accept(HGraphVisitor* visitor) { \
visitor->Visit##name(this); \
}
@@ -575,24 +575,6 @@ HConstant* HBinaryOperation::TryStaticEvaluation(ArenaAllocator* allocator) cons
return nullptr;
}
-bool HCondition::NeedsMaterialization() const {
- if (!HasOnlyOneUse()) {
- return true;
- }
- HUseListNode<HInstruction>* uses = GetUses();
- HInstruction* user = uses->GetUser();
- if (!user->IsIf()) {
- return true;
- }
-
- // TODO: if there is no intervening instructions with side-effect between this condition
- // and the If instruction, we should move the condition just before the If.
- if (GetNext() != user) {
- return true;
- }
- return false;
-}
-
bool HCondition::IsBeforeWhenDisregardMoves(HIf* if_) const {
HInstruction* previous = if_->GetPrevious();
while (previous != nullptr && previous->IsParallelMove()) {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 2010e7ef39..677a4f8591 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -465,50 +465,51 @@ class HBasicBlock : public ArenaObject {
DISALLOW_COPY_AND_ASSIGN(HBasicBlock);
};
-#define FOR_EACH_CONCRETE_INSTRUCTION(M) \
- M(Add) \
- M(Condition) \
- M(Equal) \
- M(NotEqual) \
- M(LessThan) \
- M(LessThanOrEqual) \
- M(GreaterThan) \
- M(GreaterThanOrEqual) \
- M(Exit) \
- M(Goto) \
- M(If) \
- M(IntConstant) \
- M(InvokeStatic) \
- M(InvokeVirtual) \
- M(LoadLocal) \
- M(Local) \
- M(LongConstant) \
- M(NewInstance) \
- M(Not) \
- M(ParameterValue) \
- M(ParallelMove) \
- M(Phi) \
- M(Return) \
- M(ReturnVoid) \
- M(StoreLocal) \
- M(Sub) \
- M(Compare) \
- M(InstanceFieldGet) \
- M(InstanceFieldSet) \
- M(ArrayGet) \
- M(ArraySet) \
- M(ArrayLength) \
- M(BoundsCheck) \
- M(NullCheck) \
- M(Temporary) \
- M(SuspendCheck) \
-
-#define FOR_EACH_INSTRUCTION(M) \
- FOR_EACH_CONCRETE_INSTRUCTION(M) \
- M(Constant) \
- M(BinaryOperation)
-
-#define FORWARD_DECLARATION(type) class H##type;
+#define FOR_EACH_CONCRETE_INSTRUCTION(M) \
+ M(Add, BinaryOperation) \
+ M(Condition, BinaryOperation) \
+ M(Equal, Condition) \
+ M(NotEqual, Condition) \
+ M(LessThan, Condition) \
+ M(LessThanOrEqual, Condition) \
+ M(GreaterThan, Condition) \
+ M(GreaterThanOrEqual, Condition) \
+ M(Exit, Instruction) \
+ M(Goto, Instruction) \
+ M(If, Instruction) \
+ M(IntConstant, Constant) \
+ M(InvokeStatic, Invoke) \
+ M(InvokeVirtual, Invoke) \
+ M(LoadLocal, Instruction) \
+ M(Local, Instruction) \
+ M(LongConstant, Constant) \
+ M(NewInstance, Instruction) \
+ M(Not, Instruction) \
+ M(ParameterValue, Instruction) \
+ M(ParallelMove, Instruction) \
+ M(Phi, Instruction) \
+ M(Return, Instruction) \
+ M(ReturnVoid, Instruction) \
+ M(StoreLocal, Instruction) \
+ M(Sub, BinaryOperation) \
+ M(Compare, BinaryOperation) \
+ M(InstanceFieldGet, Instruction) \
+ M(InstanceFieldSet, Instruction) \
+ M(ArrayGet, Instruction) \
+ M(ArraySet, Instruction) \
+ M(ArrayLength, Instruction) \
+ M(BoundsCheck, Instruction) \
+ M(NullCheck, Instruction) \
+ M(Temporary, Instruction) \
+ M(SuspendCheck, Instruction) \
+
+#define FOR_EACH_INSTRUCTION(M) \
+ FOR_EACH_CONCRETE_INSTRUCTION(M) \
+ M(Constant, Instruction) \
+ M(BinaryOperation, Instruction) \
+ M(Invoke, Instruction)
+
+#define FORWARD_DECLARATION(type, super) class H##type;
FOR_EACH_INSTRUCTION(FORWARD_DECLARATION)
#undef FORWARD_DECLARATION
@@ -623,7 +624,7 @@ class HInstruction : public ArenaObject {
virtual ~HInstruction() {}
-#define DECLARE_KIND(type) k##type,
+#define DECLARE_KIND(type, super) k##type,
enum InstructionKind {
FOR_EACH_INSTRUCTION(DECLARE_KIND)
};
@@ -709,7 +710,7 @@ class HInstruction : public ArenaObject {
return uses_ != nullptr && uses_->GetTail() == nullptr;
}
-#define INSTRUCTION_TYPE_CHECK(type) \
+#define INSTRUCTION_TYPE_CHECK(type, super) \
bool Is##type() const { return (As##type() != nullptr); } \
virtual const H##type* As##type() const { return nullptr; } \
virtual H##type* As##type() { return nullptr; }
@@ -1118,13 +1119,13 @@ class HBinaryOperation : public HExpression<2> {
class HCondition : public HBinaryOperation {
public:
HCondition(HInstruction* first, HInstruction* second)
- : HBinaryOperation(Primitive::kPrimBoolean, first, second) {}
+ : HBinaryOperation(Primitive::kPrimBoolean, first, second),
+ needs_materialization_(true) {}
virtual bool IsCommutative() { return true; }
- // For register allocation purposes, returns whether this instruction needs to be
- // materialized (that is, not just be in the processor flags).
- bool NeedsMaterialization() const;
+ bool NeedsMaterialization() const { return needs_materialization_; }
+ void ClearNeedsMaterialization() { needs_materialization_ = false; }
// For code generation purposes, returns whether this instruction is just before
// `if_`, and disregard moves in between.
@@ -1135,6 +1136,10 @@ class HCondition : public HBinaryOperation {
virtual IfCondition GetCondition() const = 0;
private:
+ // For register allocation purposes, returns whether this instruction needs to be
+ // materialized (that is, not just be in the processor flags).
+ bool needs_materialization_;
+
DISALLOW_COPY_AND_ASSIGN(HCondition);
};
@@ -1437,6 +1442,8 @@ class HInvoke : public HInstruction {
uint32_t GetDexPc() const { return dex_pc_; }
+ DECLARE_INSTRUCTION(Invoke);
+
protected:
GrowableArray<HInstruction*> inputs_;
const Primitive::Type return_type_;
@@ -1954,7 +1961,7 @@ class HGraphVisitor : public ValueObject {
HGraph* GetGraph() const { return graph_; }
// Visit functions for instruction classes.
-#define DECLARE_VISIT_INSTRUCTION(name) \
+#define DECLARE_VISIT_INSTRUCTION(name, super) \
virtual void Visit##name(H##name* instr) { VisitInstruction(instr); }
FOR_EACH_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
@@ -1967,6 +1974,23 @@ class HGraphVisitor : public ValueObject {
DISALLOW_COPY_AND_ASSIGN(HGraphVisitor);
};
+class HGraphDelegateVisitor : public HGraphVisitor {
+ public:
+ explicit HGraphDelegateVisitor(HGraph* graph) : HGraphVisitor(graph) {}
+ virtual ~HGraphDelegateVisitor() {}
+
+ // Visit functions that delegate to to super class.
+#define DECLARE_VISIT_INSTRUCTION(name, super) \
+ virtual void Visit##name(H##name* instr) OVERRIDE { Visit##super(instr); }
+
+ FOR_EACH_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+
+#undef DECLARE_VISIT_INSTRUCTION
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HGraphDelegateVisitor);
+};
+
class HInsertionOrderIterator : public ValueObject {
public:
explicit HInsertionOrderIterator(const HGraph& graph) : graph_(graph), index_(0) {}
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index bfbbab57bb..a81dc1bcd1 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -37,4 +37,26 @@ void PrepareForRegisterAllocation::VisitBoundsCheck(HBoundsCheck* check) {
check->ReplaceWith(check->InputAt(0));
}
+void PrepareForRegisterAllocation::VisitCondition(HCondition* condition) {
+ bool needs_materialization = false;
+ if (!condition->HasOnlyOneUse()) {
+ needs_materialization = true;
+ } else {
+ HUseListNode<HInstruction>* uses = condition->GetUses();
+ HInstruction* user = uses->GetUser();
+ if (!user->IsIf()) {
+ needs_materialization = true;
+ } else {
+ // TODO: if there is no intervening instructions with side-effect between this condition
+ // and the If instruction, we should move the condition just before the If.
+ if (condition->GetNext() != user) {
+ needs_materialization = true;
+ }
+ }
+ }
+ if (!needs_materialization) {
+ condition->ClearNeedsMaterialization();
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h
index 37f28712b8..e86a39b39f 100644
--- a/compiler/optimizing/prepare_for_register_allocation.h
+++ b/compiler/optimizing/prepare_for_register_allocation.h
@@ -26,15 +26,16 @@ namespace art {
* For example it changes uses of null checks and bounds checks to the original
* objects, to avoid creating a live range for these checks.
*/
-class PrepareForRegisterAllocation : public HGraphVisitor {
+class PrepareForRegisterAllocation : public HGraphDelegateVisitor {
public:
- explicit PrepareForRegisterAllocation(HGraph* graph) : HGraphVisitor(graph) {}
+ explicit PrepareForRegisterAllocation(HGraph* graph) : HGraphDelegateVisitor(graph) {}
void Run();
private:
virtual void VisitNullCheck(HNullCheck* check) OVERRIDE;
virtual void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE;
+ virtual void VisitCondition(HCondition* condition) OVERRIDE;
DISALLOW_COPY_AND_ASSIGN(PrepareForRegisterAllocation);
};
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index a9d159eaa3..5055a76107 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -37,18 +37,18 @@ RegisterAllocator::RegisterAllocator(ArenaAllocator* allocator,
handled_(allocator, 0),
active_(allocator, 0),
inactive_(allocator, 0),
- physical_register_intervals_(allocator, codegen->GetNumberOfRegisters()),
+ physical_register_intervals_(allocator, codegen->GetNumberOfCoreRegisters()),
temp_intervals_(allocator, 4),
spill_slots_(allocator, kDefaultNumberOfSpillSlots),
safepoints_(allocator, 0),
processing_core_registers_(false),
number_of_registers_(-1),
registers_array_(nullptr),
- blocked_registers_(allocator->AllocArray<bool>(codegen->GetNumberOfRegisters())),
+ blocked_registers_(codegen->GetBlockedCoreRegisters()),
reserved_out_slots_(0),
maximum_number_of_live_registers_(0) {
- codegen->SetupBlockedRegisters(blocked_registers_);
- physical_register_intervals_.SetSize(codegen->GetNumberOfRegisters());
+ codegen->SetupBlockedRegisters();
+ physical_register_intervals_.SetSize(codegen->GetNumberOfCoreRegisters());
// Always reserve for the current method and the graph's max out registers.
// TODO: compute it instead.
reserved_out_slots_ = 1 + codegen->GetGraph()->GetMaximumNumberOfOutVRegs();
@@ -742,12 +742,12 @@ void RegisterAllocator::AddInputMoveFor(HInstruction* user,
DCHECK(IsValidDestination(destination));
if (source.Equals(destination)) return;
- DCHECK(user->AsPhi() == nullptr);
+ DCHECK(!user->IsPhi());
HInstruction* previous = user->GetPrevious();
HParallelMove* move = nullptr;
if (previous == nullptr
- || previous->AsParallelMove() == nullptr
+ || !previous->IsParallelMove()
|| !IsInputMove(previous)) {
move = new (allocator_) HParallelMove(allocator_);
move->SetLifetimePosition(kInputMoveLifetimePosition);
@@ -821,6 +821,11 @@ void RegisterAllocator::InsertParallelMoveAtExitOf(HBasicBlock* block,
DCHECK_EQ(block->GetSuccessors().Size(), 1u);
HInstruction* last = block->GetLastInstruction();
+ // We insert moves at exit for phi predecessors and connecting blocks.
+ // A block ending with an if cannot branch to a block with phis because
+ // we do not allow critical edges. It can also not connect
+ // a split interval between two blocks: the move has to happen in the successor.
+ DCHECK(!last->IsIf());
HInstruction* previous = last->GetPrevious();
HParallelMove* move;
// This is a parallel move for connecting blocks. We need to differentiate
@@ -861,7 +866,7 @@ void RegisterAllocator::InsertMoveAfter(HInstruction* instruction,
DCHECK(IsValidDestination(destination));
if (source.Equals(destination)) return;
- if (instruction->AsPhi() != nullptr) {
+ if (instruction->IsPhi()) {
InsertParallelMoveAtEntryOf(instruction->GetBlock(), instruction, source, destination);
return;
}
@@ -1031,7 +1036,7 @@ void RegisterAllocator::Resolve() {
LiveInterval* current = instruction->GetLiveInterval();
LocationSummary* locations = instruction->GetLocations();
Location location = locations->Out();
- if (instruction->AsParameterValue() != nullptr) {
+ if (instruction->IsParameterValue()) {
// Now that we know the frame size, adjust the parameter's location.
if (location.IsStackSlot()) {
location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 471307ec31..be2c03957d 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -51,7 +51,7 @@ void SsaBuilder::BuildSsa() {
!it.Done();
it.Advance()) {
HInstruction* current = it.Current();
- if (current->AsLocal() != nullptr) {
+ if (current->IsLocal()) {
current->GetBlock()->RemoveInstruction(current);
}
}
diff --git a/compiler/utils/scoped_arena_allocator.cc b/compiler/utils/scoped_arena_allocator.cc
index aeb2f768dd..26161501b3 100644
--- a/compiler/utils/scoped_arena_allocator.cc
+++ b/compiler/utils/scoped_arena_allocator.cc
@@ -115,10 +115,18 @@ ScopedArenaAllocator::ScopedArenaAllocator(ArenaStack* arena_stack)
}
ScopedArenaAllocator::~ScopedArenaAllocator() {
- Reset();
+ DoReset();
}
void ScopedArenaAllocator::Reset() {
+ DoReset();
+ // If this allocator was Create()d, we need to move the arena_stack_->top_ptr_ past *this.
+ if (mark_ptr_ == reinterpret_cast<uint8_t*>(this)) {
+ arena_stack_->top_ptr_ = mark_ptr_ + RoundUp(sizeof(ScopedArenaAllocator), 8);
+ }
+}
+
+void ScopedArenaAllocator::DoReset() {
DebugStackReference::CheckTop();
DebugStackRefCounter::CheckNoRefs();
arena_stack_->UpdatePeakStatsAndRestore(*this);
diff --git a/compiler/utils/scoped_arena_allocator.h b/compiler/utils/scoped_arena_allocator.h
index 62ea3302a3..523f158969 100644
--- a/compiler/utils/scoped_arena_allocator.h
+++ b/compiler/utils/scoped_arena_allocator.h
@@ -132,6 +132,8 @@ class ScopedArenaAllocator
uint8_t* mark_ptr_;
uint8_t* mark_end_;
+ void DoReset();
+
template <typename T>
friend class ScopedArenaAllocatorAdapter;
diff --git a/compiler/utils/x86/managed_register_x86.cc b/compiler/utils/x86/managed_register_x86.cc
index 021fe88c22..69e6fce5c4 100644
--- a/compiler/utils/x86/managed_register_x86.cc
+++ b/compiler/utils/x86/managed_register_x86.cc
@@ -51,7 +51,11 @@ static const RegisterPairDescriptor kRegisterPairs[] = {
};
std::ostream& operator<<(std::ostream& os, const RegisterPair& reg) {
- os << X86ManagedRegister::FromRegisterPair(reg);
+ if (reg == kNoRegisterPair) {
+ os << "kNoRegisterPair";
+ } else {
+ os << X86ManagedRegister::FromRegisterPair(reg);
+ }
return os;
}
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index e1f513d046..7be4349067 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -90,7 +90,7 @@ static void UsageError(const char* fmt, ...) {
va_end(ap);
}
-static void Usage(const char* fmt, ...) {
+[[noreturn]] static void Usage(const char* fmt, ...) {
va_list ap;
va_start(ap, fmt);
UsageErrorV(fmt, ap);
@@ -662,7 +662,7 @@ class WatchDog {
Message('W', message);
}
- static void Fatal(const std::string& message) {
+ [[noreturn]] static void Fatal(const std::string& message) {
Message('F', message);
exit(1);
}
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index 6f8e08b3ba..ac883fe4e3 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -407,11 +407,11 @@ uint64_t AdvSIMDExpand(uint32_t op, uint32_t cmode, uint32_t imm8) {
}
uint64_t imm = imm8;
switch (cmode321) {
- case 3: imm <<= 8; // Fall through.
- case 2: imm <<= 8; // Fall through.
- case 1: imm <<= 8; // Fall through.
+ case 3: imm <<= 8; FALLTHROUGH_INTENDED;
+ case 2: imm <<= 8; FALLTHROUGH_INTENDED;
+ case 1: imm <<= 8; FALLTHROUGH_INTENDED;
case 0: return static_cast<int64_t>((imm << 32) | imm);
- case 5: imm <<= 8; // Fall through.
+ case 5: imm <<= 8; FALLTHROUGH_INTENDED;
case 4: return static_cast<int64_t>((imm << 48) | (imm << 32) | (imm << 16) | imm);
case 6:
imm = ((imm + 1u) << ((cmode & 1) != 0 ? 16 : 8)) - 1u; // Add 8 or 16 ones.
@@ -1196,7 +1196,7 @@ size_t DisassemblerArm::DumpThumb32(std::ostream& os, const uint8_t* instr_ptr)
}
break;
}
- // Else deliberate fall-through to B.
+ FALLTHROUGH_INTENDED; // Else deliberate fall-through to B.
case 1: case 3: {
// B
// |111|11|1|0000|000000|11|1 |1|1 |10000000000|
@@ -1597,6 +1597,7 @@ size_t DisassemblerArm::DumpThumb32(std::ostream& os, const uint8_t* instr_ptr)
}
}
}
+ break;
default:
break;
}
diff --git a/disassembler/disassembler_arm64.cc b/disassembler/disassembler_arm64.cc
index 5d0c2189cc..fc1065aff9 100644
--- a/disassembler/disassembler_arm64.cc
+++ b/disassembler/disassembler_arm64.cc
@@ -27,15 +27,11 @@
namespace art {
namespace arm64 {
-static uint32_t ReadU32(const uint8_t* ptr) {
- return *((const uint32_t*)ptr);
-}
-
size_t DisassemblerArm64::Dump(std::ostream& os, const uint8_t* begin) {
- uint32_t instruction = ReadU32(begin);
- decoder.Decode(reinterpret_cast<vixl::Instruction*>(&instruction));
+ const vixl::Instruction* instr = reinterpret_cast<const vixl::Instruction*>(begin);
+ decoder.Decode(instr);
os << FormatInstructionPointer(begin)
- << StringPrintf(": %08x\t%s\n", instruction, disasm.GetOutput());
+ << StringPrintf(": %08x\t%s\n", instr->InstructionBits(), disasm.GetOutput());
return vixl::kInstructionSize;
}
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index 195c45f79d..63a74c7240 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -412,7 +412,7 @@ DISASSEMBLER_ENTRY(cmp,
break;
case 0x2E:
opcode << "u";
- // FALLTHROUGH
+ FALLTHROUGH_INTENDED;
case 0x2F:
if (prefix[2] == 0x66) {
opcode << "comisd";
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index fbb36f3e99..4ed428c200 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -644,7 +644,7 @@ static void UsageError(const char* fmt, ...) {
va_end(ap);
}
-static void Usage(const char *fmt, ...) {
+[[noreturn]] static void Usage(const char *fmt, ...) {
va_list ap;
va_start(ap, fmt);
UsageErrorV(fmt, ap);
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index 17310b6d95..9d74ef5ef8 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -113,7 +113,7 @@ static uint32_t GetInstructionSize(const uint8_t* pc) {
// Group 3
case 0x66:
operand_size_prefix = true;
- // fallthrough
+ FALLTHROUGH_INTENDED;
// Group 1
case 0xf0:
@@ -184,6 +184,7 @@ static uint32_t GetInstructionSize(const uint8_t* pc) {
case 0x80: // group 1, byte immediate.
case 0x83:
+ case 0xc6:
modrm = *pc++;
has_modrm = true;
immediate_size = 1;
diff --git a/runtime/base/histogram-inl.h b/runtime/base/histogram-inl.h
index 4c18ce405c..b329a31b1f 100644
--- a/runtime/base/histogram-inl.h
+++ b/runtime/base/histogram-inl.h
@@ -195,6 +195,11 @@ inline void Histogram<Value>::CreateHistogram(CumulativeData* out_data) const {
DCHECK_LE(std::abs(out_data->perc_.back() - 1.0), 0.001);
}
+#if defined(__clang__)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wfloat-equal"
+#endif
+
template <class Value>
inline double Histogram<Value>::Percentile(double per, const CumulativeData& data) const {
DCHECK_GT(data.perc_.size(), 0ull);
@@ -235,6 +240,10 @@ inline double Histogram<Value>::Percentile(double per, const CumulativeData& dat
return value;
}
+#if defined(__clang__)
+#pragma clang diagnostic pop
+#endif
+
} // namespace art
#endif // ART_RUNTIME_BASE_HISTOGRAM_INL_H_
diff --git a/runtime/base/histogram_test.cc b/runtime/base/histogram_test.cc
index 454f2abd8e..7aa5f9037f 100644
--- a/runtime/base/histogram_test.cc
+++ b/runtime/base/histogram_test.cc
@@ -41,14 +41,14 @@ TEST(Histtest, MeanTest) {
hist->AddValue(static_cast<uint64_t>(50));
}
mean = hist->Mean();
- EXPECT_EQ(mean, 50);
+ EXPECT_DOUBLE_EQ(mean, 50.0);
hist->Reset();
hist->AddValue(9);
hist->AddValue(17);
hist->AddValue(28);
hist->AddValue(28);
mean = hist->Mean();
- EXPECT_EQ(20.5, mean);
+ EXPECT_DOUBLE_EQ(20.5, mean);
}
TEST(Histtest, VarianceTest) {
@@ -60,7 +60,7 @@ TEST(Histtest, VarianceTest) {
hist->AddValue(28);
hist->AddValue(28);
variance = hist->Variance();
- EXPECT_EQ(64.25, variance);
+ EXPECT_DOUBLE_EQ(64.25, variance);
}
TEST(Histtest, Percentile) {
@@ -236,7 +236,7 @@ TEST(Histtest, CappingPercentiles) {
}
hist->CreateHistogram(&data);
per_995 = hist->Percentile(0.995, data);
- EXPECT_EQ(per_995, 0);
+ EXPECT_DOUBLE_EQ(per_995, 0.0);
hist->Reset();
for (size_t idx = 0; idx < 200; idx++) {
for (uint64_t val = 1ull; val <= 4ull; val++) {
@@ -246,8 +246,8 @@ TEST(Histtest, CappingPercentiles) {
hist->CreateHistogram(&data);
per_005 = hist->Percentile(0.005, data);
per_995 = hist->Percentile(0.995, data);
- EXPECT_EQ(1, per_005);
- EXPECT_EQ(4, per_995);
+ EXPECT_DOUBLE_EQ(1.0, per_005);
+ EXPECT_DOUBLE_EQ(4.0, per_995);
}
TEST(Histtest, SpikyValues) {
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index b66d528d1b..f5a38bbf35 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -181,6 +181,48 @@ char (&ArraySizeHelper(T (&array)[N]))[N];
template<typename T> void UNUSED(const T&) {}
#define UNREACHABLE __builtin_unreachable
+// The FALLTHROUGH_INTENDED macro can be used to annotate implicit fall-through
+// between switch labels:
+// switch (x) {
+// case 40:
+// case 41:
+// if (truth_is_out_there) {
+// ++x;
+// FALLTHROUGH_INTENDED; // Use instead of/along with annotations in
+// // comments.
+// } else {
+// return x;
+// }
+// case 42:
+// ...
+//
+// As shown in the example above, the FALLTHROUGH_INTENDED macro should be
+// followed by a semicolon. It is designed to mimic control-flow statements
+// like 'break;', so it can be placed in most places where 'break;' can, but
+// only if there are no statements on the execution path between it and the
+// next switch label.
+//
+// When compiled with clang in C++11 mode, the FALLTHROUGH_INTENDED macro is
+// expanded to [[clang::fallthrough]] attribute, which is analysed when
+// performing switch labels fall-through diagnostic ('-Wimplicit-fallthrough').
+// See clang documentation on language extensions for details:
+// http://clang.llvm.org/docs/LanguageExtensions.html#clang__fallthrough
+//
+// When used with unsupported compilers, the FALLTHROUGH_INTENDED macro has no
+// effect on diagnostics.
+//
+// In either case this macro has no effect on runtime behavior and performance
+// of code.
+#if defined(__clang__) && __cplusplus >= 201103L && defined(__has_warning)
+#if __has_feature(cxx_attributes) && __has_warning("-Wimplicit-fallthrough")
+#define FALLTHROUGH_INTENDED [[clang::fallthrough]] // NOLINT
+#endif
+#endif
+
+#ifndef FALLTHROUGH_INTENDED
+#define FALLTHROUGH_INTENDED do { } while (0)
+#endif
+
// Annotalysis thread-safety analysis support.
#if defined(__SUPPORT_TS_ANNOTATION__) || defined(__clang__)
#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index bfe44a28bc..fec1824a9e 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -1128,7 +1128,7 @@ class ScopedCheck {
*errorKind = "continuation";
return utf8;
}
- // Fall through to take care of the final byte.
+ FALLTHROUGH_INTENDED; // Fall-through to take care of the final byte.
case 0x0c:
case 0x0d:
// Bit pattern 110x, so there is one additional byte.
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index e990181aa8..88e6265df5 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -907,12 +907,12 @@ TEST_F(ClassLinkerTest, StaticFields) {
mirror::ArtField* s6 = mirror::Class::FindStaticField(soa.Self(), statics, "s6", "F");
EXPECT_EQ(s6->GetTypeAsPrimitiveType(), Primitive::kPrimFloat);
- EXPECT_EQ(0.5, s6->GetFloat(statics.Get()));
+ EXPECT_DOUBLE_EQ(0.5, s6->GetFloat(statics.Get()));
s6->SetFloat<false>(statics.Get(), 0.75);
mirror::ArtField* s7 = mirror::Class::FindStaticField(soa.Self(), statics, "s7", "D");
EXPECT_EQ(s7->GetTypeAsPrimitiveType(), Primitive::kPrimDouble);
- EXPECT_EQ(16777217, s7->GetDouble(statics.Get()));
+ EXPECT_DOUBLE_EQ(16777217.0, s7->GetDouble(statics.Get()));
s7->SetDouble<false>(statics.Get(), 16777219);
mirror::ArtField* s8 = mirror::Class::FindStaticField(soa.Self(), statics, "s8",
@@ -930,8 +930,8 @@ TEST_F(ClassLinkerTest, StaticFields) {
EXPECT_EQ(-535, s3->GetShort(statics.Get()));
EXPECT_EQ(2000000001, s4->GetInt(statics.Get()));
EXPECT_EQ(INT64_C(0x34567890abcdef12), s5->GetLong(statics.Get()));
- EXPECT_EQ(0.75, s6->GetFloat(statics.Get()));
- EXPECT_EQ(16777219, s7->GetDouble(statics.Get()));
+ EXPECT_FLOAT_EQ(0.75, s6->GetFloat(statics.Get()));
+ EXPECT_DOUBLE_EQ(16777219.0, s7->GetDouble(statics.Get()));
EXPECT_TRUE(s8->GetObject(statics.Get())->AsString()->Equals("robot"));
}
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 96b44bfdf7..971ff89036 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -3504,6 +3504,7 @@ static char JdwpTagToShortyChar(JDWP::JdwpTag tag) {
switch (tag) {
default:
LOG(FATAL) << "unknown JDWP tag: " << PrintableChar(tag);
+ UNREACHABLE();
// Primitives.
case JDWP::JT_BYTE: return 'B';
@@ -4364,11 +4365,7 @@ void Dbg::DdmSendHeapSegments(bool native) {
Thread* self = Thread::Current();
- // To allow the Walk/InspectAll() below to exclusively-lock the
- // mutator lock, temporarily release the shared access to the
- // mutator lock here by transitioning to the suspended state.
Locks::mutator_lock_->AssertSharedHeld(self);
- self->TransitionFromRunnableToSuspended(kSuspended);
// Send a series of heap segment chunks.
HeapChunkContext context((what == HPSG_WHAT_MERGED_OBJECTS), native);
@@ -4382,32 +4379,39 @@ void Dbg::DdmSendHeapSegments(bool native) {
gc::Heap* heap = Runtime::Current()->GetHeap();
for (const auto& space : heap->GetContinuousSpaces()) {
if (space->IsDlMallocSpace()) {
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
// dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
// allocation then the first sizeof(size_t) may belong to it.
context.SetChunkOverhead(sizeof(size_t));
space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
} else if (space->IsRosAllocSpace()) {
context.SetChunkOverhead(0);
- space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
+ // Need to acquire the mutator lock before the heap bitmap lock with exclusive access since
+ // RosAlloc's internal logic doesn't know to release and reacquire the heap bitmap lock.
+ self->TransitionFromRunnableToSuspended(kSuspended);
+ ThreadList* tl = Runtime::Current()->GetThreadList();
+ tl->SuspendAll();
+ {
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
+ }
+ tl->ResumeAll();
+ self->TransitionFromSuspendedToRunnable();
} else if (space->IsBumpPointerSpace()) {
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
context.SetChunkOverhead(0);
- ReaderMutexLock mu(self, *Locks::mutator_lock_);
- WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
space->AsBumpPointerSpace()->Walk(BumpPointerSpaceCallback, &context);
} else {
UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space;
}
context.ResetStartOfNextChunk();
}
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
// Walk the large objects, these are not in the AllocSpace.
context.SetChunkOverhead(0);
heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
}
- // Shared-lock the mutator lock back.
- self->TransitionFromSuspendedToRunnable();
- Locks::mutator_lock_->AssertSharedHeld(self);
-
// Finally, send a heap end chunk.
Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(heap_id), heap_id);
}
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 059725357a..a3f3de8514 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -142,7 +142,7 @@ bool DexFileVerifier::CheckShortyDescriptorMatch(char shorty_char, const char* d
ErrorStringPrintf("Invalid use of void");
return false;
}
- // Intentional fallthrough.
+ FALLTHROUGH_INTENDED;
case 'B':
case 'C':
case 'D':
diff --git a/runtime/dex_instruction-inl.h b/runtime/dex_instruction-inl.h
index ad9491fda6..dd65f2c0c6 100644
--- a/runtime/dex_instruction-inl.h
+++ b/runtime/dex_instruction-inl.h
@@ -460,11 +460,21 @@ inline void Instruction::GetVarArgs(uint32_t arg[5], uint16_t inst_data) const {
* copies of those.) Note that cases 5..2 fall through.
*/
switch (count) {
- case 5: arg[4] = InstA(inst_data);
- case 4: arg[3] = (regList >> 12) & 0x0f;
- case 3: arg[2] = (regList >> 8) & 0x0f;
- case 2: arg[1] = (regList >> 4) & 0x0f;
- case 1: arg[0] = regList & 0x0f; break;
+ case 5:
+ arg[4] = InstA(inst_data);
+ FALLTHROUGH_INTENDED;
+ case 4:
+ arg[3] = (regList >> 12) & 0x0f;
+ FALLTHROUGH_INTENDED;
+ case 3:
+ arg[2] = (regList >> 8) & 0x0f;
+ FALLTHROUGH_INTENDED;
+ case 2:
+ arg[1] = (regList >> 4) & 0x0f;
+ FALLTHROUGH_INTENDED;
+ case 1:
+ arg[0] = regList & 0x0f;
+ break;
default: // case 0
break; // Valid, but no need to do anything.
}
diff --git a/runtime/dex_instruction.cc b/runtime/dex_instruction.cc
index 0a71d621e1..7e775f4ed3 100644
--- a/runtime/dex_instruction.cc
+++ b/runtime/dex_instruction.cc
@@ -111,7 +111,7 @@ size_t Instruction::SizeInCodeUnitsComplexOpcode() const {
if ((*insns & 0xFF) == 0) {
return 1; // NOP.
} else {
- LOG(FATAL) << "Unreachable: " << DumpString(NULL);
+ LOG(FATAL) << "Unreachable: " << DumpString(nullptr);
return 0;
}
}
@@ -161,21 +161,23 @@ std::string Instruction::DumpString(const DexFile* file) const {
case k21c: {
switch (Opcode()) {
case CONST_STRING:
- if (file != NULL) {
+ if (file != nullptr) {
uint32_t string_idx = VRegB_21c();
os << StringPrintf("const-string v%d, %s // string@%d", VRegA_21c(),
PrintableString(file->StringDataByIdx(string_idx)).c_str(), string_idx);
break;
- } // else fall-through
+ }
+ FALLTHROUGH_INTENDED;
case CHECK_CAST:
case CONST_CLASS:
case NEW_INSTANCE:
- if (file != NULL) {
+ if (file != nullptr) {
uint32_t type_idx = VRegB_21c();
os << opcode << " v" << static_cast<int>(VRegA_21c()) << ", " << PrettyType(type_idx, *file)
<< " // type@" << type_idx;
break;
- } // else fall-through
+ }
+ FALLTHROUGH_INTENDED;
case SGET:
case SGET_WIDE:
case SGET_OBJECT:
@@ -183,12 +185,13 @@ std::string Instruction::DumpString(const DexFile* file) const {
case SGET_BYTE:
case SGET_CHAR:
case SGET_SHORT:
- if (file != NULL) {
+ if (file != nullptr) {
uint32_t field_idx = VRegB_21c();
os << opcode << " v" << static_cast<int>(VRegA_21c()) << ", " << PrettyField(field_idx, *file, true)
<< " // field@" << field_idx;
break;
- } // else fall-through
+ }
+ FALLTHROUGH_INTENDED;
case SPUT:
case SPUT_WIDE:
case SPUT_OBJECT:
@@ -196,12 +199,13 @@ std::string Instruction::DumpString(const DexFile* file) const {
case SPUT_BYTE:
case SPUT_CHAR:
case SPUT_SHORT:
- if (file != NULL) {
+ if (file != nullptr) {
uint32_t field_idx = VRegB_21c();
os << opcode << " v" << static_cast<int>(VRegA_21c()) << ", " << PrettyField(field_idx, *file, true)
<< " // field@" << field_idx;
break;
- } // else fall-through
+ }
+ FALLTHROUGH_INTENDED;
default:
os << StringPrintf("%s v%d, thing@%d", opcode, VRegA_21c(), VRegB_21c());
break;
@@ -221,20 +225,22 @@ std::string Instruction::DumpString(const DexFile* file) const {
case IGET_BYTE:
case IGET_CHAR:
case IGET_SHORT:
- if (file != NULL) {
+ if (file != nullptr) {
uint32_t field_idx = VRegC_22c();
os << opcode << " v" << static_cast<int>(VRegA_22c()) << ", v" << static_cast<int>(VRegB_22c()) << ", "
<< PrettyField(field_idx, *file, true) << " // field@" << field_idx;
break;
- } // else fall-through
+ }
+ FALLTHROUGH_INTENDED;
case IGET_QUICK:
case IGET_OBJECT_QUICK:
- if (file != NULL) {
+ if (file != nullptr) {
uint32_t field_idx = VRegC_22c();
os << opcode << " v" << static_cast<int>(VRegA_22c()) << ", v" << static_cast<int>(VRegB_22c()) << ", "
<< "// offset@" << field_idx;
break;
- } // else fall-through
+ }
+ FALLTHROUGH_INTENDED;
case IPUT:
case IPUT_WIDE:
case IPUT_OBJECT:
@@ -242,34 +248,38 @@ std::string Instruction::DumpString(const DexFile* file) const {
case IPUT_BYTE:
case IPUT_CHAR:
case IPUT_SHORT:
- if (file != NULL) {
+ if (file != nullptr) {
uint32_t field_idx = VRegC_22c();
os << opcode << " v" << static_cast<int>(VRegA_22c()) << ", v" << static_cast<int>(VRegB_22c()) << ", "
<< PrettyField(field_idx, *file, true) << " // field@" << field_idx;
break;
- } // else fall-through
+ }
+ FALLTHROUGH_INTENDED;
case IPUT_QUICK:
case IPUT_OBJECT_QUICK:
- if (file != NULL) {
+ if (file != nullptr) {
uint32_t field_idx = VRegC_22c();
os << opcode << " v" << static_cast<int>(VRegA_22c()) << ", v" << static_cast<int>(VRegB_22c()) << ", "
<< "// offset@" << field_idx;
break;
- } // else fall-through
+ }
+ FALLTHROUGH_INTENDED;
case INSTANCE_OF:
- if (file != NULL) {
+ if (file != nullptr) {
uint32_t type_idx = VRegC_22c();
os << opcode << " v" << static_cast<int>(VRegA_22c()) << ", v" << static_cast<int>(VRegB_22c()) << ", "
<< PrettyType(type_idx, *file) << " // type@" << type_idx;
break;
}
+ FALLTHROUGH_INTENDED;
case NEW_ARRAY:
- if (file != NULL) {
+ if (file != nullptr) {
uint32_t type_idx = VRegC_22c();
os << opcode << " v" << static_cast<int>(VRegA_22c()) << ", v" << static_cast<int>(VRegB_22c()) << ", "
<< PrettyType(type_idx, *file) << " // type@" << type_idx;
break;
- } // else fall-through
+ }
+ FALLTHROUGH_INTENDED;
default:
os << StringPrintf("%s v%d, v%d, thing@%d", opcode, VRegA_22c(), VRegB_22c(), VRegC_22c());
break;
@@ -283,7 +293,7 @@ std::string Instruction::DumpString(const DexFile* file) const {
case k31c:
if (Opcode() == CONST_STRING_JUMBO) {
uint32_t string_idx = VRegB_31c();
- if (file != NULL) {
+ if (file != nullptr) {
os << StringPrintf("%s v%d, %s // string@%d", opcode, VRegA_31c(),
PrintableString(file->StringDataByIdx(string_idx)).c_str(),
string_idx);
@@ -317,7 +327,7 @@ std::string Instruction::DumpString(const DexFile* file) const {
case INVOKE_DIRECT:
case INVOKE_STATIC:
case INVOKE_INTERFACE:
- if (file != NULL) {
+ if (file != nullptr) {
os << opcode << " {";
uint32_t method_idx = VRegB_35c();
for (size_t i = 0; i < VRegA_35c(); ++i) {
@@ -328,9 +338,10 @@ std::string Instruction::DumpString(const DexFile* file) const {
}
os << "}, " << PrettyMethod(method_idx, *file) << " // method@" << method_idx;
break;
- } // else fall-through
+ }
+ FALLTHROUGH_INTENDED;
case INVOKE_VIRTUAL_QUICK:
- if (file != NULL) {
+ if (file != nullptr) {
os << opcode << " {";
uint32_t method_idx = VRegB_35c();
for (size_t i = 0; i < VRegA_35c(); ++i) {
@@ -341,7 +352,8 @@ std::string Instruction::DumpString(const DexFile* file) const {
}
os << "}, // vtable@" << method_idx;
break;
- } // else fall-through
+ }
+ FALLTHROUGH_INTENDED;
default:
os << opcode << " {v" << arg[0] << ", v" << arg[1] << ", v" << arg[2]
<< ", v" << arg[3] << ", v" << arg[4] << "}, thing@" << VRegB_35c();
@@ -356,19 +368,21 @@ std::string Instruction::DumpString(const DexFile* file) const {
case INVOKE_DIRECT_RANGE:
case INVOKE_STATIC_RANGE:
case INVOKE_INTERFACE_RANGE:
- if (file != NULL) {
+ if (file != nullptr) {
uint32_t method_idx = VRegB_3rc();
os << StringPrintf("%s, {v%d .. v%d}, ", opcode, VRegC_3rc(), (VRegC_3rc() + VRegA_3rc() - 1))
<< PrettyMethod(method_idx, *file) << " // method@" << method_idx;
break;
- } // else fall-through
+ }
+ FALLTHROUGH_INTENDED;
case INVOKE_VIRTUAL_RANGE_QUICK:
- if (file != NULL) {
+ if (file != nullptr) {
uint32_t method_idx = VRegB_3rc();
os << StringPrintf("%s, {v%d .. v%d}, ", opcode, VRegC_3rc(), (VRegC_3rc() + VRegA_3rc() - 1))
<< "// vtable@" << method_idx;
break;
- } // else fall-through
+ }
+ FALLTHROUGH_INTENDED;
default:
os << StringPrintf("%s, {v%d .. v%d}, thing@%d", opcode, VRegC_3rc(),
(VRegC_3rc() + VRegA_3rc() - 1), VRegB_3rc());
diff --git a/runtime/entrypoints/quick/quick_math_entrypoints.cc b/runtime/entrypoints/quick/quick_math_entrypoints.cc
index 014aad3b90..1c658b7057 100644
--- a/runtime/entrypoints/quick/quick_math_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_math_entrypoints.cc
@@ -18,6 +18,11 @@
namespace art {
+#if defined(__clang__)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wfloat-equal"
+#endif
+
int CmplFloat(float a, float b) {
if (a == b) {
return 0;
@@ -62,6 +67,10 @@ int CmplDouble(double a, double b) {
return -1;
}
+#if defined(__clang__)
+#pragma clang diagnostic pop
+#endif
+
extern "C" int64_t artLmul(int64_t a, int64_t b) {
return a * b;
}
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 054dd4698d..96903db414 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -1275,8 +1275,8 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
// is at *m = sp. Will update to point to the bottom of the save frame.
//
// Note: assumes ComputeAll() has been run before.
- void LayoutCalleeSaveFrame(StackReference<mirror::ArtMethod>** m, void* sp, HandleScope** table,
- uint32_t* handle_scope_entries)
+ void LayoutCalleeSaveFrame(Thread* self, StackReference<mirror::ArtMethod>** m, void* sp,
+ HandleScope** handle_scope)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* method = (*m)->AsMirrorPtr();
@@ -1289,8 +1289,6 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
sp8 += sizeof(void*); // In the callee-save frame we use a full pointer.
// Under the callee saves put handle scope and new method stack reference.
- *handle_scope_entries = num_handle_scope_references_;
-
size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_);
size_t scope_and_method = handle_scope_size + sizeof(StackReference<mirror::ArtMethod>);
@@ -1300,8 +1298,8 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
reinterpret_cast<uintptr_t>(sp8), kStackAlignment));
uint8_t* sp8_table = sp8 + sizeof(StackReference<mirror::ArtMethod>);
- *table = reinterpret_cast<HandleScope*>(sp8_table);
- (*table)->SetNumberOfReferences(num_handle_scope_references_);
+ *handle_scope = HandleScope::Create(sp8_table, self->GetTopHandleScope(),
+ num_handle_scope_references_);
// Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us.
uint8_t* method_pointer = sp8;
@@ -1319,12 +1317,12 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
// Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie.
// Returns the new bottom. Note: this may be unaligned.
- uint8_t* LayoutJNISaveFrame(StackReference<mirror::ArtMethod>** m, void* sp, HandleScope** table,
- uint32_t* handle_scope_entries)
+ uint8_t* LayoutJNISaveFrame(Thread* self, StackReference<mirror::ArtMethod>** m, void* sp,
+ HandleScope** handle_scope)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// First, fix up the layout of the callee-save frame.
// We have to squeeze in the HandleScope, and relocate the method pointer.
- LayoutCalleeSaveFrame(m, sp, table, handle_scope_entries);
+ LayoutCalleeSaveFrame(self, m, sp, handle_scope);
// The bottom of the callee-save frame is now where the method is, *m.
uint8_t* sp8 = reinterpret_cast<uint8_t*>(*m);
@@ -1336,14 +1334,15 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
}
// WARNING: After this, *sp won't be pointing to the method anymore!
- uint8_t* ComputeLayout(StackReference<mirror::ArtMethod>** m, bool is_static, const char* shorty,
- uint32_t shorty_len, HandleScope** table, uint32_t* handle_scope_entries,
+ uint8_t* ComputeLayout(Thread* self, StackReference<mirror::ArtMethod>** m,
+ bool is_static, const char* shorty, uint32_t shorty_len,
+ HandleScope** handle_scope,
uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Walk(shorty, shorty_len);
// JNI part.
- uint8_t* sp8 = LayoutJNISaveFrame(m, reinterpret_cast<void*>(*m), table, handle_scope_entries);
+ uint8_t* sp8 = LayoutJNISaveFrame(self, m, reinterpret_cast<void*>(*m), handle_scope);
sp8 = LayoutNativeCall(sp8, start_stack, start_gpr, start_fpr);
@@ -1426,20 +1425,19 @@ class FillNativeCall {
// of transitioning into native code.
class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
public:
- BuildGenericJniFrameVisitor(StackReference<mirror::ArtMethod>** sp, bool is_static,
- const char* shorty, uint32_t shorty_len, Thread* self)
+ BuildGenericJniFrameVisitor(Thread* self, bool is_static, const char* shorty, uint32_t shorty_len,
+ StackReference<mirror::ArtMethod>** sp)
: QuickArgumentVisitor(*sp, is_static, shorty, shorty_len),
jni_call_(nullptr, nullptr, nullptr, nullptr), sm_(&jni_call_) {
ComputeGenericJniFrameSize fsc;
uintptr_t* start_gpr_reg;
uint32_t* start_fpr_reg;
uintptr_t* start_stack_arg;
- uint32_t handle_scope_entries;
- bottom_of_used_area_ = fsc.ComputeLayout(sp, is_static, shorty, shorty_len, &handle_scope_,
- &handle_scope_entries, &start_stack_arg,
+ bottom_of_used_area_ = fsc.ComputeLayout(self, sp, is_static, shorty, shorty_len,
+ &handle_scope_,
+ &start_stack_arg,
&start_gpr_reg, &start_fpr_reg);
- handle_scope_->SetNumberOfReferences(handle_scope_entries);
jni_call_.Reset(start_gpr_reg, start_fpr_reg, start_stack_arg, handle_scope_);
// jni environment is always first argument
@@ -1611,7 +1609,7 @@ extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self,
const char* shorty = called->GetShorty(&shorty_len);
// Run the visitor.
- BuildGenericJniFrameVisitor visitor(&sp, called->IsStatic(), shorty, shorty_len, self);
+ BuildGenericJniFrameVisitor visitor(self, called->IsStatic(), shorty, shorty_len, &sp);
visitor.VisitArguments();
visitor.FinalizeHandleScope(self);
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index a3408cf0fa..0cea89dc17 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -507,13 +507,12 @@ size_t RosAlloc::FreeInternal(Thread* self, void* ptr) {
--pm_idx;
DCHECK_LT(pm_idx, capacity_ / kPageSize);
} while (page_map_[pm_idx] != kPageMapRun);
- // Fall-through.
+ FALLTHROUGH_INTENDED;
case kPageMapRun:
run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
DCHECK_EQ(run->magic_num_, kMagicNum);
break;
case kPageMapReleased:
- // Fall-through.
case kPageMapEmpty:
LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
return 0;
@@ -2138,7 +2137,7 @@ size_t RosAlloc::ReleasePages() {
break;
}
}
- // Fall through.
+ FALLTHROUGH_INTENDED;
}
case kPageMapLargeObject: // Fall through.
case kPageMapLargeObjectPart: // Fall through.
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 8374ff70b2..ad7f901181 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -285,7 +285,7 @@ class RosAlloc {
// Returns the byte size of the bracket size from the index.
static size_t IndexToBracketSize(size_t idx) {
- DCHECK(idx < kNumOfSizeBrackets);
+ DCHECK_LT(idx, kNumOfSizeBrackets);
return bracketSizes[idx];
}
// Returns the index of the size bracket from the bracket size.
diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h
index 7bc811db87..2717180b4d 100644
--- a/runtime/handle_scope-inl.h
+++ b/runtime/handle_scope-inl.h
@@ -26,9 +26,10 @@ namespace art {
template<size_t kNumReferences>
inline StackHandleScope<kNumReferences>::StackHandleScope(Thread* self)
- : HandleScope(kNumReferences), self_(self), pos_(0) {
+ : HandleScope(self->GetTopHandleScope(), kNumReferences), self_(self), pos_(0) {
+ COMPILE_ASSERT(kNumReferences >= 1, stack_handle_scope_must_contain_at_least_1_reference);
// TODO: Figure out how to use a compile assert.
- DCHECK_EQ(&references_[0], &references_storage_[0]);
+ CHECK_EQ(&storage_[0], GetReferences());
for (size_t i = 0; i < kNumReferences; ++i) {
SetReference(i, nullptr);
}
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index 99059f9e59..f795e387f0 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -66,39 +66,28 @@ class PACKED(4) HandleScope {
return link_;
}
- void SetLink(HandleScope* link) {
- DCHECK_NE(this, link);
- link_ = link;
- }
-
- // Sets the number_of_references_ field for constructing tables out of raw memory. Warning: will
- // not resize anything.
- void SetNumberOfReferences(uint32_t num_references) {
- number_of_references_ = num_references;
- }
-
- mirror::Object* GetReference(size_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- ALWAYS_INLINE {
+ ALWAYS_INLINE mirror::Object* GetReference(size_t i) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK_LT(i, number_of_references_);
- return references_[i].AsMirrorPtr();
+ return GetReferences()[i].AsMirrorPtr();
}
- Handle<mirror::Object> GetHandle(size_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- ALWAYS_INLINE {
+ ALWAYS_INLINE Handle<mirror::Object> GetHandle(size_t i)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK_LT(i, number_of_references_);
- return Handle<mirror::Object>(&references_[i]);
+ return Handle<mirror::Object>(&GetReferences()[i]);
}
- MutableHandle<mirror::Object> GetMutableHandle(size_t i)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
+ ALWAYS_INLINE MutableHandle<mirror::Object> GetMutableHandle(size_t i)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK_LT(i, number_of_references_);
- return MutableHandle<mirror::Object>(&references_[i]);
+ return MutableHandle<mirror::Object>(&GetReferences()[i]);
}
- void SetReference(size_t i, mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- ALWAYS_INLINE {
+ ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK_LT(i, number_of_references_);
- references_[i].Assign(object);
+ GetReferences()[i].Assign(object);
}
bool Contains(StackReference<mirror::Object>* handle_scope_entry) const {
@@ -106,39 +95,53 @@ class PACKED(4) HandleScope {
// jni_compiler should have a jobject/jclass as a native method is
// passed in a this pointer or a class
DCHECK_GT(number_of_references_, 0U);
- return &references_[0] <= handle_scope_entry &&
- handle_scope_entry <= &references_[number_of_references_ - 1];
+ return &GetReferences()[0] <= handle_scope_entry &&
+ handle_scope_entry <= &GetReferences()[number_of_references_ - 1];
}
- // Offset of link within HandleScope, used by generated code
+ // Offset of link within HandleScope, used by generated code.
static size_t LinkOffset(size_t pointer_size) {
return 0;
}
- // Offset of length within handle scope, used by generated code
+ // Offset of length within handle scope, used by generated code.
static size_t NumberOfReferencesOffset(size_t pointer_size) {
return pointer_size;
}
- // Offset of link within handle scope, used by generated code
+ // Offset of link within handle scope, used by generated code.
static size_t ReferencesOffset(size_t pointer_size) {
return pointer_size + sizeof(number_of_references_);
}
+ // Placement new creation.
+ static HandleScope* Create(void* storage, HandleScope* link, uint32_t num_references)
+ WARN_UNUSED {
+ return new (storage) HandleScope(link, num_references);
+ }
+
protected:
- explicit HandleScope(size_t number_of_references) :
- link_(nullptr), number_of_references_(number_of_references) {
+ // Return backing storage used for references.
+ ALWAYS_INLINE StackReference<mirror::Object>* GetReferences() const {
+ uintptr_t address = reinterpret_cast<uintptr_t>(this) + ReferencesOffset(sizeof(void*));
+ return reinterpret_cast<StackReference<mirror::Object>*>(address);
}
- HandleScope* link_;
- uint32_t number_of_references_;
+ // Semi-hidden constructor. Construction expected by generated code and StackHandleScope.
+ explicit HandleScope(HandleScope* link, uint32_t num_references) :
+ link_(link), number_of_references_(num_references) {
+ }
- // number_of_references_ are available if this is allocated and filled in by jni_compiler.
- StackReference<mirror::Object> references_[0];
+ // Link-list of handle scopes. The root is held by a Thread.
+ HandleScope* const link_;
- private:
- template<size_t kNumReferences> friend class StackHandleScope;
+ // Number of handlerized references.
+ const uint32_t number_of_references_;
+
+ // Storage for references.
+ // StackReference<mirror::Object> references_[number_of_references_]
+ private:
DISALLOW_COPY_AND_ASSIGN(HandleScope);
};
@@ -169,22 +172,22 @@ class PACKED(4) StackHandleScope FINAL : public HandleScope {
// Currently unused, using this GetReference instead of the one in HandleScope is preferred to
// avoid compiler optimizations incorrectly optimizing out of bound array accesses.
// TODO: Remove this when it is un-necessary.
- mirror::Object* GetReference(size_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- ALWAYS_INLINE {
- DCHECK_LT(i, number_of_references_);
- return references_storage_[i].AsMirrorPtr();
+ ALWAYS_INLINE mirror::Object* GetReference(size_t i) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK_LT(i, kNumReferences);
+ return GetReferences()[i].AsMirrorPtr();
}
- MutableHandle<mirror::Object> GetHandle(size_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- ALWAYS_INLINE {
- DCHECK_LT(i, number_of_references_);
- return MutableHandle<mirror::Object>(&references_storage_[i]);
+ ALWAYS_INLINE MutableHandle<mirror::Object> GetHandle(size_t i)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK_LT(i, kNumReferences);
+ return MutableHandle<mirror::Object>(&GetReferences()[i]);
}
- void SetReference(size_t i, mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- ALWAYS_INLINE {
- DCHECK_LT(i, number_of_references_);
- references_storage_[i].Assign(object);
+ ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK_LT(i, kNumReferences);
+ GetReferences()[i].Assign(object);
}
template<class T>
@@ -204,9 +207,8 @@ class PACKED(4) StackHandleScope FINAL : public HandleScope {
}
private:
- // References_storage_ needs to be first so that it appears in the same location as
- // HandleScope::references_.
- StackReference<mirror::Object> references_storage_[kNumReferences];
+ // Reference storage needs to be first as expected by the HandleScope layout.
+ StackReference<mirror::Object> storage_[kNumReferences];
// The thread that the stack handle scope is a linked list upon. The stack handle scope will
// push and pop itself from this thread.
diff --git a/runtime/handle_scope_test.cc b/runtime/handle_scope_test.cc
index 7afd279942..dc999877d0 100644
--- a/runtime/handle_scope_test.cc
+++ b/runtime/handle_scope_test.cc
@@ -25,7 +25,7 @@ namespace art {
template<size_t kNumReferences>
class NoThreadStackHandleScope : public HandleScope {
public:
- explicit NoThreadStackHandleScope() : HandleScope(kNumReferences) {
+ explicit NoThreadStackHandleScope(HandleScope* link) : HandleScope(link, kNumReferences) {
}
~NoThreadStackHandleScope() {
}
@@ -41,10 +41,8 @@ class NoThreadStackHandleScope : public HandleScope {
TEST(HandleScopeTest, Offsets) NO_THREAD_SAFETY_ANALYSIS {
// As the members of HandleScope are private, we cannot use OFFSETOF_MEMBER
// here. So do the inverse: set some data, and access it through pointers created from the offsets.
- NoThreadStackHandleScope<1> test_table;
+ NoThreadStackHandleScope<0x9ABC> test_table(reinterpret_cast<HandleScope*>(0x5678));
test_table.SetReference(0, reinterpret_cast<mirror::Object*>(0x1234));
- test_table.SetLink(reinterpret_cast<HandleScope*>(0x5678));
- test_table.SetNumberOfReferences(0x9ABC);
uint8_t* table_base_ptr = reinterpret_cast<uint8_t*>(&test_table);
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index fd67197986..a2d37b3def 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -719,9 +719,9 @@ static HprofBasicType SignatureToBasicTypeAndSize(const char* sig, size_t* sizeO
case 'D': ret = hprof_basic_double; size = 8; break;
case 'B': ret = hprof_basic_byte; size = 1; break;
case 'S': ret = hprof_basic_short; size = 2; break;
- default: CHECK(false);
case 'I': ret = hprof_basic_int; size = 4; break;
case 'J': ret = hprof_basic_long; size = 8; break;
+ default: LOG(FATAL) << "UNREACHABLE"; UNREACHABLE();
}
if (sizeOut != NULL) {
@@ -742,9 +742,9 @@ static HprofBasicType PrimitiveToBasicTypeAndSize(Primitive::Type prim, size_t*
case Primitive::kPrimDouble: ret = hprof_basic_double; size = 8; break;
case Primitive::kPrimByte: ret = hprof_basic_byte; size = 1; break;
case Primitive::kPrimShort: ret = hprof_basic_short; size = 2; break;
- default: CHECK(false);
case Primitive::kPrimInt: ret = hprof_basic_int; size = 4; break;
case Primitive::kPrimLong: ret = hprof_basic_long; size = 8; break;
+ default: LOG(FATAL) << "UNREACHABLE"; UNREACHABLE();
}
if (sizeOut != NULL) {
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index db7c452819..88d6544e80 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -662,6 +662,11 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem*
}
HANDLE_INSTRUCTION_END();
+#if defined(__clang__)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wfloat-equal"
+#endif
+
HANDLE_INSTRUCTION_START(CMPL_FLOAT) {
float val1 = shadow_frame.GetVRegFloat(inst->VRegB_23x());
float val2 = shadow_frame.GetVRegFloat(inst->VRegC_23x());
@@ -726,6 +731,10 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem*
}
HANDLE_INSTRUCTION_END();
+#if defined(__clang__)
+#pragma clang diagnostic pop
+#endif
+
HANDLE_INSTRUCTION_START(CMP_LONG) {
int64_t val1 = shadow_frame.GetVRegLong(inst->VRegB_23x());
int64_t val2 = shadow_frame.GetVRegLong(inst->VRegC_23x());
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index fe0af27f00..14e8a522eb 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -562,6 +562,12 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
inst = inst->RelativeAt(offset);
break;
}
+
+#if defined(__clang__)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wfloat-equal"
+#endif
+
case Instruction::CMPL_FLOAT: {
PREAMBLE();
float val1 = shadow_frame.GetVRegFloat(inst->VRegB_23x());
@@ -627,6 +633,11 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
inst = inst->Next_2xx();
break;
}
+
+#if defined(__clang__)
+#pragma clang diagnostic pop
+#endif
+
case Instruction::CMP_LONG: {
PREAMBLE();
int64_t val1 = shadow_frame.GetVRegLong(inst->VRegB_23x());
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 7c8c63ce46..d1229b28a8 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -181,8 +181,14 @@ JdwpError JdwpState::RegisterEvent(JdwpEvent* pEvent) {
for (int i = 0; i < pEvent->modCount; i++) {
const JdwpEventMod* pMod = &pEvent->mods[i];
if (pMod->modKind == MK_LOCATION_ONLY) {
- /* should only be for Breakpoint, Step, and Exception */
- Dbg::WatchLocation(&pMod->locationOnly.loc, &req);
+ // Should only concern breakpoint, field access, field modification, step, and exception
+ // events.
+ // However breakpoint requires specific handling. Field access, field modification and step
+ // events need full deoptimization to be reported while exception event is reported during
+ // exception handling.
+ if (pEvent->eventKind == EK_BREAKPOINT) {
+ Dbg::WatchLocation(&pMod->locationOnly.loc, &req);
+ }
} else if (pMod->modKind == MK_STEP) {
/* should only be for EK_SINGLE_STEP; should only be one */
JdwpStepSize size = static_cast<JdwpStepSize>(pMod->step.size);
@@ -258,8 +264,10 @@ void JdwpState::UnregisterEvent(JdwpEvent* pEvent) {
for (int i = 0; i < pEvent->modCount; i++) {
JdwpEventMod* pMod = &pEvent->mods[i];
if (pMod->modKind == MK_LOCATION_ONLY) {
- /* should only be for Breakpoint, Step, and Exception */
- Dbg::UnwatchLocation(&pMod->locationOnly.loc, &req);
+ // Like in RegisterEvent, we need specific handling for breakpoint only.
+ if (pEvent->eventKind == EK_BREAKPOINT) {
+ Dbg::UnwatchLocation(&pMod->locationOnly.loc, &req);
+ }
}
if (pMod->modKind == MK_STEP) {
/* should only be for EK_SINGLE_STEP; should only be one */
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 20d031c3e5..cab907c378 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -1527,14 +1527,14 @@ TEST_F(JniInternalTest, GetObjectArrayElement_SetObjectArrayElement) {
EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
}
-#define EXPECT_STATIC_PRIMITIVE_FIELD(type, field_name, sig, value1, value2) \
+#define EXPECT_STATIC_PRIMITIVE_FIELD(expect_eq, type, field_name, sig, value1, value2) \
do { \
jfieldID fid = env_->GetStaticFieldID(c, field_name, sig); \
EXPECT_NE(fid, nullptr); \
env_->SetStatic ## type ## Field(c, fid, value1); \
- EXPECT_EQ(value1, env_->GetStatic ## type ## Field(c, fid)); \
+ expect_eq(value1, env_->GetStatic ## type ## Field(c, fid)); \
env_->SetStatic ## type ## Field(c, fid, value2); \
- EXPECT_EQ(value2, env_->GetStatic ## type ## Field(c, fid)); \
+ expect_eq(value2, env_->GetStatic ## type ## Field(c, fid)); \
\
bool old_check_jni = vm_->SetCheckJniEnabled(false); \
{ \
@@ -1560,14 +1560,14 @@ TEST_F(JniInternalTest, GetObjectArrayElement_SetObjectArrayElement) {
EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni)); \
} while (false)
-#define EXPECT_PRIMITIVE_FIELD(instance, type, field_name, sig, value1, value2) \
+#define EXPECT_PRIMITIVE_FIELD(expect_eq, instance, type, field_name, sig, value1, value2) \
do { \
jfieldID fid = env_->GetFieldID(c, field_name, sig); \
EXPECT_NE(fid, nullptr); \
env_->Set ## type ## Field(instance, fid, value1); \
- EXPECT_EQ(value1, env_->Get ## type ## Field(instance, fid)); \
+ expect_eq(value1, env_->Get ## type ## Field(instance, fid)); \
env_->Set ## type ## Field(instance, fid, value2); \
- EXPECT_EQ(value2, env_->Get ## type ## Field(instance, fid)); \
+ expect_eq(value2, env_->Get ## type ## Field(instance, fid)); \
\
bool old_check_jni = vm_->SetCheckJniEnabled(false); \
CheckJniAbortCatcher jni_abort_catcher; \
@@ -1604,23 +1604,23 @@ TEST_F(JniInternalTest, GetPrimitiveField_SetPrimitiveField) {
jobject o = env_->AllocObject(c);
ASSERT_NE(o, nullptr);
- EXPECT_STATIC_PRIMITIVE_FIELD(Boolean, "sZ", "Z", JNI_TRUE, JNI_FALSE);
- EXPECT_STATIC_PRIMITIVE_FIELD(Byte, "sB", "B", 1, 2);
- EXPECT_STATIC_PRIMITIVE_FIELD(Char, "sC", "C", 'a', 'b');
- EXPECT_STATIC_PRIMITIVE_FIELD(Double, "sD", "D", 1.0, 2.0);
- EXPECT_STATIC_PRIMITIVE_FIELD(Float, "sF", "F", 1.0, 2.0);
- EXPECT_STATIC_PRIMITIVE_FIELD(Int, "sI", "I", 1, 2);
- EXPECT_STATIC_PRIMITIVE_FIELD(Long, "sJ", "J", 1, 2);
- EXPECT_STATIC_PRIMITIVE_FIELD(Short, "sS", "S", 1, 2);
-
- EXPECT_PRIMITIVE_FIELD(o, Boolean, "iZ", "Z", JNI_TRUE, JNI_FALSE);
- EXPECT_PRIMITIVE_FIELD(o, Byte, "iB", "B", 1, 2);
- EXPECT_PRIMITIVE_FIELD(o, Char, "iC", "C", 'a', 'b');
- EXPECT_PRIMITIVE_FIELD(o, Double, "iD", "D", 1.0, 2.0);
- EXPECT_PRIMITIVE_FIELD(o, Float, "iF", "F", 1.0, 2.0);
- EXPECT_PRIMITIVE_FIELD(o, Int, "iI", "I", 1, 2);
- EXPECT_PRIMITIVE_FIELD(o, Long, "iJ", "J", 1, 2);
- EXPECT_PRIMITIVE_FIELD(o, Short, "iS", "S", 1, 2);
+ EXPECT_STATIC_PRIMITIVE_FIELD(EXPECT_EQ, Boolean, "sZ", "Z", JNI_TRUE, JNI_FALSE);
+ EXPECT_STATIC_PRIMITIVE_FIELD(EXPECT_EQ, Byte, "sB", "B", 1, 2);
+ EXPECT_STATIC_PRIMITIVE_FIELD(EXPECT_EQ, Char, "sC", "C", 'a', 'b');
+ EXPECT_STATIC_PRIMITIVE_FIELD(EXPECT_DOUBLE_EQ, Double, "sD", "D", 1.0, 2.0);
+ EXPECT_STATIC_PRIMITIVE_FIELD(EXPECT_FLOAT_EQ, Float, "sF", "F", 1.0, 2.0);
+ EXPECT_STATIC_PRIMITIVE_FIELD(EXPECT_EQ, Int, "sI", "I", 1, 2);
+ EXPECT_STATIC_PRIMITIVE_FIELD(EXPECT_EQ, Long, "sJ", "J", 1, 2);
+ EXPECT_STATIC_PRIMITIVE_FIELD(EXPECT_EQ, Short, "sS", "S", 1, 2);
+
+ EXPECT_PRIMITIVE_FIELD(EXPECT_EQ, o, Boolean, "iZ", "Z", JNI_TRUE, JNI_FALSE);
+ EXPECT_PRIMITIVE_FIELD(EXPECT_EQ, o, Byte, "iB", "B", 1, 2);
+ EXPECT_PRIMITIVE_FIELD(EXPECT_EQ, o, Char, "iC", "C", 'a', 'b');
+ EXPECT_PRIMITIVE_FIELD(EXPECT_DOUBLE_EQ, o, Double, "iD", "D", 1.0, 2.0);
+ EXPECT_PRIMITIVE_FIELD(EXPECT_FLOAT_EQ, o, Float, "iF", "F", 1.0, 2.0);
+ EXPECT_PRIMITIVE_FIELD(EXPECT_EQ, o, Int, "iI", "I", 1, 2);
+ EXPECT_PRIMITIVE_FIELD(EXPECT_EQ, o, Long, "iJ", "J", 1, 2);
+ EXPECT_PRIMITIVE_FIELD(EXPECT_EQ, o, Short, "iS", "S", 1, 2);
}
TEST_F(JniInternalTest, GetObjectField_SetObjectField) {
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 4ed96fca7e..1bbcf8ef1c 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -62,7 +62,7 @@ class Throwable;
static constexpr bool kCheckFieldAssignments = false;
// Size of Object.
-static constexpr uint32_t kObjectHeaderSize = kUseBrooksReadBarrier ? 16 : 8;
+static constexpr uint32_t kObjectHeaderSize = kUseBakerOrBrooksReadBarrier ? 16 : 8;
// C++ mirror of java.lang.Object
class MANAGED LOCKABLE Object {
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 1aeba7487c..a2a062617a 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -248,12 +248,6 @@ TEST_F(ObjectTest, PrimitiveArray_Byte_Alloc) {
TEST_F(ObjectTest, PrimitiveArray_Char_Alloc) {
TestPrimitiveArray<CharArray>(class_linker_);
}
-TEST_F(ObjectTest, PrimitiveArray_Double_Alloc) {
- TestPrimitiveArray<DoubleArray>(class_linker_);
-}
-TEST_F(ObjectTest, PrimitiveArray_Float_Alloc) {
- TestPrimitiveArray<FloatArray>(class_linker_);
-}
TEST_F(ObjectTest, PrimitiveArray_Int_Alloc) {
TestPrimitiveArray<IntArray>(class_linker_);
}
@@ -264,6 +258,67 @@ TEST_F(ObjectTest, PrimitiveArray_Short_Alloc) {
TestPrimitiveArray<ShortArray>(class_linker_);
}
+TEST_F(ObjectTest, PrimitiveArray_Double_Alloc) {
+ typedef DoubleArray ArrayT;
+ ScopedObjectAccess soa(Thread::Current());
+ typedef typename ArrayT::ElementType T;
+
+ ArrayT* a = ArrayT::Alloc(soa.Self(), 2);
+ EXPECT_EQ(2, a->GetLength());
+ EXPECT_DOUBLE_EQ(0, a->Get(0));
+ EXPECT_DOUBLE_EQ(0, a->Get(1));
+ a->Set(0, T(123));
+ EXPECT_DOUBLE_EQ(T(123), a->Get(0));
+ EXPECT_DOUBLE_EQ(0, a->Get(1));
+ a->Set(1, T(321));
+ EXPECT_DOUBLE_EQ(T(123), a->Get(0));
+ EXPECT_DOUBLE_EQ(T(321), a->Get(1));
+
+ Class* aioobe = class_linker_->FindSystemClass(soa.Self(),
+ "Ljava/lang/ArrayIndexOutOfBoundsException;");
+
+ EXPECT_DOUBLE_EQ(0, a->Get(-1));
+ EXPECT_TRUE(soa.Self()->IsExceptionPending());
+ EXPECT_EQ(aioobe, soa.Self()->GetException(NULL)->GetClass());
+ soa.Self()->ClearException();
+
+ EXPECT_DOUBLE_EQ(0, a->Get(2));
+ EXPECT_TRUE(soa.Self()->IsExceptionPending());
+ EXPECT_EQ(aioobe, soa.Self()->GetException(NULL)->GetClass());
+ soa.Self()->ClearException();
+}
+
+TEST_F(ObjectTest, PrimitiveArray_Float_Alloc) {
+ typedef FloatArray ArrayT;
+ ScopedObjectAccess soa(Thread::Current());
+ typedef typename ArrayT::ElementType T;
+
+ ArrayT* a = ArrayT::Alloc(soa.Self(), 2);
+ EXPECT_FLOAT_EQ(2, a->GetLength());
+ EXPECT_FLOAT_EQ(0, a->Get(0));
+ EXPECT_FLOAT_EQ(0, a->Get(1));
+ a->Set(0, T(123));
+ EXPECT_FLOAT_EQ(T(123), a->Get(0));
+ EXPECT_FLOAT_EQ(0, a->Get(1));
+ a->Set(1, T(321));
+ EXPECT_FLOAT_EQ(T(123), a->Get(0));
+ EXPECT_FLOAT_EQ(T(321), a->Get(1));
+
+ Class* aioobe = class_linker_->FindSystemClass(soa.Self(),
+ "Ljava/lang/ArrayIndexOutOfBoundsException;");
+
+ EXPECT_FLOAT_EQ(0, a->Get(-1));
+ EXPECT_TRUE(soa.Self()->IsExceptionPending());
+ EXPECT_EQ(aioobe, soa.Self()->GetException(NULL)->GetClass());
+ soa.Self()->ClearException();
+
+ EXPECT_FLOAT_EQ(0, a->Get(2));
+ EXPECT_TRUE(soa.Self()->IsExceptionPending());
+ EXPECT_EQ(aioobe, soa.Self()->GetException(NULL)->GetClass());
+ soa.Self()->ClearException();
+}
+
+
TEST_F(ObjectTest, CheckAndAllocArrayFromCode) {
// pretend we are trying to call 'new char[3]' from String.toCharArray
ScopedObjectAccess soa(Thread::Current());
diff --git a/runtime/native/java_lang_Runtime.cc b/runtime/native/java_lang_Runtime.cc
index a85eec7464..f9a1cee2d8 100644
--- a/runtime/native/java_lang_Runtime.cc
+++ b/runtime/native/java_lang_Runtime.cc
@@ -37,7 +37,7 @@ static void Runtime_gc(JNIEnv*, jclass) {
Runtime::Current()->GetHeap()->CollectGarbage(false);
}
-static void Runtime_nativeExit(JNIEnv*, jclass, jint status) {
+[[noreturn]] static void Runtime_nativeExit(JNIEnv*, jclass, jint status) {
LOG(INFO) << "System.exit called, status: " << status;
Runtime::Current()->CallExitHook(status);
exit(status);
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index d166be030a..7f5a611d83 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -268,6 +268,7 @@ static void SetFieldValue(ScopedFastNativeObjectAccess& soa, mirror::Object* o,
break;
}
// Else fall through to report an error.
+ FALLTHROUGH_INTENDED;
case Primitive::kPrimVoid:
// Never okay.
ThrowIllegalArgumentException(nullptr, StringPrintf("Not a primitive field: %s",
diff --git a/runtime/parsed_options_test.cc b/runtime/parsed_options_test.cc
index 5154d69292..61481b16e6 100644
--- a/runtime/parsed_options_test.cc
+++ b/runtime/parsed_options_test.cc
@@ -64,7 +64,7 @@ TEST_F(ParsedOptionsTest, ParsedOptions) {
EXPECT_EQ(2048U, parsed->heap_initial_size_);
EXPECT_EQ(4 * KB, parsed->heap_maximum_size_);
EXPECT_EQ(1 * MB, parsed->stack_size_);
- EXPECT_EQ(0.75, parsed->heap_target_utilization_);
+ EXPECT_DOUBLE_EQ(0.75, parsed->heap_target_utilization_);
EXPECT_TRUE(test_vfprintf == parsed->hook_vfprintf_);
EXPECT_TRUE(test_exit == parsed->hook_exit_);
EXPECT_TRUE(test_abort == parsed->hook_abort_);
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index 75211e00d7..f8e0f47130 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -193,19 +193,19 @@ class ReflectionTest : public CommonCompilerTest {
args[0].d = 0.0;
JValue result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
- EXPECT_EQ(0.0, result.GetD());
+ EXPECT_DOUBLE_EQ(0.0, result.GetD());
args[0].d = -1.0;
result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
- EXPECT_EQ(-1.0, result.GetD());
+ EXPECT_DOUBLE_EQ(-1.0, result.GetD());
args[0].d = DBL_MAX;
result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
- EXPECT_EQ(DBL_MAX, result.GetD());
+ EXPECT_DOUBLE_EQ(DBL_MAX, result.GetD());
args[0].d = DBL_MIN;
result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
- EXPECT_EQ(DBL_MIN, result.GetD());
+ EXPECT_DOUBLE_EQ(DBL_MIN, result.GetD());
}
void InvokeSumIntIntMethod(bool is_static) {
@@ -375,27 +375,27 @@ class ReflectionTest : public CommonCompilerTest {
args[0].d = 0.0;
args[1].d = 0.0;
JValue result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
- EXPECT_EQ(0.0, result.GetD());
+ EXPECT_DOUBLE_EQ(0.0, result.GetD());
args[0].d = 1.0;
args[1].d = 2.0;
result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
- EXPECT_EQ(3.0, result.GetD());
+ EXPECT_DOUBLE_EQ(3.0, result.GetD());
args[0].d = 1.0;
args[1].d = -2.0;
result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
- EXPECT_EQ(-1.0, result.GetD());
+ EXPECT_DOUBLE_EQ(-1.0, result.GetD());
args[0].d = DBL_MAX;
args[1].d = DBL_MIN;
result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
- EXPECT_EQ(1.7976931348623157e308, result.GetD());
+ EXPECT_DOUBLE_EQ(1.7976931348623157e308, result.GetD());
args[0].d = DBL_MAX;
args[1].d = DBL_MAX;
result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
- EXPECT_EQ(INFINITY, result.GetD());
+ EXPECT_DOUBLE_EQ(INFINITY, result.GetD());
}
void InvokeSumDoubleDoubleDoubleMethod(bool is_static) {
@@ -409,19 +409,19 @@ class ReflectionTest : public CommonCompilerTest {
args[1].d = 0.0;
args[2].d = 0.0;
JValue result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
- EXPECT_EQ(0.0, result.GetD());
+ EXPECT_DOUBLE_EQ(0.0, result.GetD());
args[0].d = 1.0;
args[1].d = 2.0;
args[2].d = 3.0;
result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
- EXPECT_EQ(6.0, result.GetD());
+ EXPECT_DOUBLE_EQ(6.0, result.GetD());
args[0].d = 1.0;
args[1].d = -2.0;
args[2].d = 3.0;
result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
- EXPECT_EQ(2.0, result.GetD());
+ EXPECT_DOUBLE_EQ(2.0, result.GetD());
}
void InvokeSumDoubleDoubleDoubleDoubleMethod(bool is_static) {
@@ -436,21 +436,21 @@ class ReflectionTest : public CommonCompilerTest {
args[2].d = 0.0;
args[3].d = 0.0;
JValue result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
- EXPECT_EQ(0.0, result.GetD());
+ EXPECT_DOUBLE_EQ(0.0, result.GetD());
args[0].d = 1.0;
args[1].d = 2.0;
args[2].d = 3.0;
args[3].d = 4.0;
result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
- EXPECT_EQ(10.0, result.GetD());
+ EXPECT_DOUBLE_EQ(10.0, result.GetD());
args[0].d = 1.0;
args[1].d = -2.0;
args[2].d = 3.0;
args[3].d = -4.0;
result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
- EXPECT_EQ(-2.0, result.GetD());
+ EXPECT_DOUBLE_EQ(-2.0, result.GetD());
}
void InvokeSumDoubleDoubleDoubleDoubleDoubleMethod(bool is_static) {
@@ -466,7 +466,7 @@ class ReflectionTest : public CommonCompilerTest {
args[3].d = 0.0;
args[4].d = 0.0;
JValue result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
- EXPECT_EQ(0.0, result.GetD());
+ EXPECT_DOUBLE_EQ(0.0, result.GetD());
args[0].d = 1.0;
args[1].d = 2.0;
@@ -474,7 +474,7 @@ class ReflectionTest : public CommonCompilerTest {
args[3].d = 4.0;
args[4].d = 5.0;
result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
- EXPECT_EQ(15.0, result.GetD());
+ EXPECT_DOUBLE_EQ(15.0, result.GetD());
args[0].d = 1.0;
args[1].d = -2.0;
@@ -482,7 +482,7 @@ class ReflectionTest : public CommonCompilerTest {
args[3].d = -4.0;
args[4].d = 5.0;
result = InvokeWithJValues(soa, receiver, soa.EncodeMethod(method), args);
- EXPECT_EQ(3.0, result.GetD());
+ EXPECT_DOUBLE_EQ(3.0, result.GetD());
}
JavaVMExt* vm_;
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 1a6c6e0c54..7bffc335ef 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -177,10 +177,7 @@ class Runtime {
// Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most
// callers should prefer.
- // This isn't marked ((noreturn)) because then gcc will merge multiple calls
- // in a single function together. This reduces code size slightly, but means
- // that the native stack trace we get may point at the wrong call site.
- static void Abort() LOCKS_EXCLUDED(Locks::abort_lock_);
+ [[noreturn]] static void Abort() LOCKS_EXCLUDED(Locks::abort_lock_);
// Returns the "main" ThreadGroup, used when attaching user threads.
jobject GetMainThreadGroup() const;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index b0c8fe1b3a..fd37703949 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -947,10 +947,36 @@ static bool ShouldShowNativeStack(const Thread* thread)
}
void Thread::DumpJavaStack(std::ostream& os) const {
+ // Dumping the Java stack involves the verifier for locks. The verifier operates under the
+ // assumption that there is no exception pending on entry. Thus, stash any pending exception.
+ // TODO: Find a way to avoid const_cast.
+ StackHandleScope<3> scope(const_cast<Thread*>(this));
+ Handle<mirror::Throwable> exc;
+ Handle<mirror::Object> throw_location_this_object;
+ Handle<mirror::ArtMethod> throw_location_method;
+ uint32_t throw_location_dex_pc;
+ bool have_exception = false;
+ if (IsExceptionPending()) {
+ ThrowLocation exc_location;
+ exc = scope.NewHandle(GetException(&exc_location));
+ throw_location_this_object = scope.NewHandle(exc_location.GetThis());
+ throw_location_method = scope.NewHandle(exc_location.GetMethod());
+ throw_location_dex_pc = exc_location.GetDexPc();
+ const_cast<Thread*>(this)->ClearException();
+ have_exception = true;
+ }
+
std::unique_ptr<Context> context(Context::Create());
StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(),
!tls32_.throwing_OutOfMemoryError);
dumper.WalkStack();
+
+ if (have_exception) {
+ ThrowLocation exc_location(throw_location_this_object.Get(),
+ throw_location_method.Get(),
+ throw_location_dex_pc);
+ const_cast<Thread*>(this)->SetException(exc_location, exc.Get());
+ }
}
void Thread::DumpStack(std::ostream& os) const {
diff --git a/runtime/thread.h b/runtime/thread.h
index 998e47275c..b0be841730 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -692,7 +692,7 @@ class Thread {
}
void PushHandleScope(HandleScope* handle_scope) {
- handle_scope->SetLink(tlsPtr_.top_handle_scope);
+ DCHECK_EQ(handle_scope->GetLink(), tlsPtr_.top_handle_scope);
tlsPtr_.top_handle_scope = handle_scope;
}
diff --git a/runtime/transaction_test.cc b/runtime/transaction_test.cc
index a14889c852..8c4b90d347 100644
--- a/runtime/transaction_test.cc
+++ b/runtime/transaction_test.cc
@@ -147,12 +147,12 @@ TEST_F(TransactionTest, StaticFieldsTest) {
mirror::ArtField* floatField = h_klass->FindDeclaredStaticField("floatField", "F");
ASSERT_TRUE(floatField != nullptr);
ASSERT_EQ(floatField->GetTypeAsPrimitiveType(), Primitive::kPrimFloat);
- ASSERT_EQ(floatField->GetFloat(h_klass.Get()), static_cast<float>(0.0f));
+ ASSERT_FLOAT_EQ(floatField->GetFloat(h_klass.Get()), static_cast<float>(0.0f));
mirror::ArtField* doubleField = h_klass->FindDeclaredStaticField("doubleField", "D");
ASSERT_TRUE(doubleField != nullptr);
ASSERT_EQ(doubleField->GetTypeAsPrimitiveType(), Primitive::kPrimDouble);
- ASSERT_EQ(doubleField->GetDouble(h_klass.Get()), static_cast<double>(0.0));
+ ASSERT_DOUBLE_EQ(doubleField->GetDouble(h_klass.Get()), static_cast<double>(0.0));
mirror::ArtField* objectField = h_klass->FindDeclaredStaticField("objectField",
"Ljava/lang/Object;");
@@ -190,8 +190,8 @@ TEST_F(TransactionTest, StaticFieldsTest) {
EXPECT_EQ(shortField->GetShort(h_klass.Get()), 0);
EXPECT_EQ(intField->GetInt(h_klass.Get()), 0);
EXPECT_EQ(longField->GetLong(h_klass.Get()), static_cast<int64_t>(0));
- EXPECT_EQ(floatField->GetFloat(h_klass.Get()), static_cast<float>(0.0f));
- EXPECT_EQ(doubleField->GetDouble(h_klass.Get()), static_cast<double>(0.0));
+ EXPECT_FLOAT_EQ(floatField->GetFloat(h_klass.Get()), static_cast<float>(0.0f));
+ EXPECT_DOUBLE_EQ(doubleField->GetDouble(h_klass.Get()), static_cast<double>(0.0));
EXPECT_EQ(objectField->GetObject(h_klass.Get()), nullptr);
}
@@ -246,12 +246,12 @@ TEST_F(TransactionTest, InstanceFieldsTest) {
mirror::ArtField* floatField = h_klass->FindDeclaredInstanceField("floatField", "F");
ASSERT_TRUE(floatField != nullptr);
ASSERT_EQ(floatField->GetTypeAsPrimitiveType(), Primitive::kPrimFloat);
- ASSERT_EQ(floatField->GetFloat(h_instance.Get()), static_cast<float>(0.0f));
+ ASSERT_FLOAT_EQ(floatField->GetFloat(h_instance.Get()), static_cast<float>(0.0f));
mirror::ArtField* doubleField = h_klass->FindDeclaredInstanceField("doubleField", "D");
ASSERT_TRUE(doubleField != nullptr);
ASSERT_EQ(doubleField->GetTypeAsPrimitiveType(), Primitive::kPrimDouble);
- ASSERT_EQ(doubleField->GetDouble(h_instance.Get()), static_cast<double>(0.0));
+ ASSERT_DOUBLE_EQ(doubleField->GetDouble(h_instance.Get()), static_cast<double>(0.0));
mirror::ArtField* objectField = h_klass->FindDeclaredInstanceField("objectField",
"Ljava/lang/Object;");
@@ -289,8 +289,8 @@ TEST_F(TransactionTest, InstanceFieldsTest) {
EXPECT_EQ(shortField->GetShort(h_instance.Get()), 0);
EXPECT_EQ(intField->GetInt(h_instance.Get()), 0);
EXPECT_EQ(longField->GetLong(h_instance.Get()), static_cast<int64_t>(0));
- EXPECT_EQ(floatField->GetFloat(h_instance.Get()), static_cast<float>(0.0f));
- EXPECT_EQ(doubleField->GetDouble(h_instance.Get()), static_cast<double>(0.0));
+ EXPECT_FLOAT_EQ(floatField->GetFloat(h_instance.Get()), static_cast<float>(0.0f));
+ EXPECT_DOUBLE_EQ(doubleField->GetDouble(h_instance.Get()), static_cast<double>(0.0));
EXPECT_EQ(objectField->GetObject(h_instance.Get()), nullptr);
}
@@ -356,14 +356,14 @@ TEST_F(TransactionTest, StaticArrayFieldsTest) {
mirror::FloatArray* floatArray = floatArrayField->GetObject(h_klass.Get())->AsFloatArray();
ASSERT_TRUE(floatArray != nullptr);
ASSERT_EQ(floatArray->GetLength(), 1);
- ASSERT_EQ(floatArray->GetWithoutChecks(0), static_cast<float>(0.0f));
+ ASSERT_FLOAT_EQ(floatArray->GetWithoutChecks(0), static_cast<float>(0.0f));
mirror::ArtField* doubleArrayField = h_klass->FindDeclaredStaticField("doubleArrayField", "[D");
ASSERT_TRUE(doubleArrayField != nullptr);
mirror::DoubleArray* doubleArray = doubleArrayField->GetObject(h_klass.Get())->AsDoubleArray();
ASSERT_TRUE(doubleArray != nullptr);
ASSERT_EQ(doubleArray->GetLength(), 1);
- ASSERT_EQ(doubleArray->GetWithoutChecks(0), static_cast<double>(0.0f));
+ ASSERT_DOUBLE_EQ(doubleArray->GetWithoutChecks(0), static_cast<double>(0.0f));
mirror::ArtField* objectArrayField = h_klass->FindDeclaredStaticField("objectArrayField",
"[Ljava/lang/Object;");
@@ -404,8 +404,8 @@ TEST_F(TransactionTest, StaticArrayFieldsTest) {
EXPECT_EQ(shortArray->GetWithoutChecks(0), 0);
EXPECT_EQ(intArray->GetWithoutChecks(0), 0);
EXPECT_EQ(longArray->GetWithoutChecks(0), static_cast<int64_t>(0));
- EXPECT_EQ(floatArray->GetWithoutChecks(0), static_cast<float>(0.0f));
- EXPECT_EQ(doubleArray->GetWithoutChecks(0), static_cast<double>(0.0f));
+ EXPECT_FLOAT_EQ(floatArray->GetWithoutChecks(0), static_cast<float>(0.0f));
+ EXPECT_DOUBLE_EQ(doubleArray->GetWithoutChecks(0), static_cast<double>(0.0f));
EXPECT_EQ(objectArray->GetWithoutChecks(0), nullptr);
}
diff --git a/test/083-compiler-regressions/expected.txt b/test/083-compiler-regressions/expected.txt
index e907fd1d58..c43d1f78cb 100644
--- a/test/083-compiler-regressions/expected.txt
+++ b/test/083-compiler-regressions/expected.txt
@@ -16,6 +16,7 @@ false
b13679511Test finishing
b16177324TestWrapper caught NPE as expected.
b16230771TestWrapper caught NPE as expected.
+b17969907TestWrapper caught NPE as expected.
largeFrame passes
largeFrameFloat passes
mulBy1Test passes
diff --git a/test/083-compiler-regressions/src/Main.java b/test/083-compiler-regressions/src/Main.java
index 8d7bf01192..9c772b9fb0 100644
--- a/test/083-compiler-regressions/src/Main.java
+++ b/test/083-compiler-regressions/src/Main.java
@@ -38,6 +38,7 @@ public class Main {
b13679511Test();
b16177324TestWrapper();
b16230771TestWrapper();
+ b17969907TestWrapper();
largeFrameTest();
largeFrameTestFloat();
mulBy1Test();
@@ -977,6 +978,24 @@ public class Main {
}
}
+ static void b17969907TestWrapper() {
+ try {
+ b17969907Test();
+ System.out.println("b17969907Test unexpectedly didn't throw NPE.");
+ } catch (NullPointerException expected) {
+ System.out.println("b17969907TestWrapper caught NPE as expected.");
+ }
+ }
+
+ public static void b17969907Test() {
+ Integer i = new Integer(1);
+ int sum = 0;
+ while (sum < 100) {
+ sum += i;
+ i = null;
+ }
+ }
+
static double TooManyArgs(
long l00,
long l01,
diff --git a/test/800-smali/build b/test/800-smali/build
new file mode 100644
index 0000000000..1b5a4e3381
--- /dev/null
+++ b/test/800-smali/build
@@ -0,0 +1,32 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Stop if something fails.
+set -e
+
+# Compile Java classes
+mkdir classes
+${JAVAC} -d classes `find src -name '*.java'`
+${DX} -JXmx256m --debug --dex --output=java_classes.dex classes
+
+# Compile Smali classes
+${SMALI} -JXmx256m --output smali_classes.dex `find src -name '*.smali'`
+
+# Combine files.
+${DXMERGER} classes.dex java_classes.dex smali_classes.dex
+
+# Zip up output.
+zip $TEST_NAME.jar classes.dex
diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt
new file mode 100644
index 0000000000..468e7a6ee1
--- /dev/null
+++ b/test/800-smali/expected.txt
@@ -0,0 +1,2 @@
+b/17790197
+Done!
diff --git a/test/800-smali/info.txt b/test/800-smali/info.txt
new file mode 100644
index 0000000000..cfcc23095b
--- /dev/null
+++ b/test/800-smali/info.txt
@@ -0,0 +1,4 @@
+Smali-based tests.
+Will compile and run all the smali files in src/ and run the test cases mentioned in src/Main.java.
+
+Obviously needs to run under Dalvik or ART.
diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java
new file mode 100644
index 0000000000..0ef3a9d195
--- /dev/null
+++ b/test/800-smali/src/Main.java
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+import java.util.LinkedList;
+import java.util.List;
+
+/**
+ * Smali excercise.
+ */
+public class Main {
+
+ private static class TestCase {
+ public TestCase(String testName, String testClass, String testMethodName, Object[] values,
+ Throwable expectedException, Object expectedReturn) {
+ this.testName = testName;
+ this.testClass = testClass;
+ this.testMethodName = testMethodName;
+ this.values = values;
+ this.expectedException = expectedException;
+ this.expectedReturn = expectedReturn;
+ }
+
+ String testName;
+ String testClass;
+ String testMethodName;
+ Object[] values;
+ Throwable expectedException;
+ Object expectedReturn;
+ }
+
+ private List<TestCase> testCases;
+
+ public Main() {
+ // Create the test cases.
+ testCases = new LinkedList<TestCase>();
+
+ testCases.add(new TestCase("b/17790197", "B17790197", "getInt", null, null, 100));
+ }
+
+ public void runTests() {
+ for (TestCase tc : testCases) {
+ System.out.println(tc.testName);
+ try {
+ runTest(tc);
+ } catch (Exception exc) {
+ exc.printStackTrace(System.out);
+ }
+ }
+ }
+
+ private void runTest(TestCase tc) throws Exception {
+ Class<?> c = Class.forName(tc.testClass);
+
+ Method[] methods = c.getDeclaredMethods();
+
+ // For simplicity we assume that test methods are not overloaded. So searching by name
+ // will give us the method we need to run.
+ Method method = null;
+ for (Method m : methods) {
+ if (m.getName().equals(tc.testMethodName)) {
+ method = m;
+ break;
+ }
+ }
+
+ if (method == null) {
+ throw new IllegalArgumentException("Could not find test method " + tc.testMethodName +
+ " in class " + tc.testClass + " for test " + tc.testName);
+ }
+
+ Exception errorReturn = null;
+ try {
+ Object retValue = method.invoke(null, tc.values);
+ if (tc.expectedException != null) {
+ errorReturn = new IllegalStateException("Expected an exception in test " +
+ tc.testName);
+ }
+ if (tc.expectedReturn == null && retValue != null) {
+ errorReturn = new IllegalStateException("Expected a null result in test " +
+ tc.testName);
+ } else if (tc.expectedReturn != null &&
+ (retValue == null || !tc.expectedReturn.equals(retValue))) {
+ errorReturn = new IllegalStateException("Expected return " + tc.expectedReturn +
+ ", but got " + retValue);
+ }
+ } catch (Exception exc) {
+ if (tc.expectedException == null) {
+ errorReturn = new IllegalStateException("Did not expect exception", exc);
+ } else if (!tc.expectedException.getClass().equals(exc.getClass())) {
+ errorReturn = new IllegalStateException("Expected " +
+ tc.expectedException.getClass().getName() +
+ ", but got " + exc.getClass(), exc);
+ }
+ } finally {
+ if (errorReturn != null) {
+ throw errorReturn;
+ }
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ Main main = new Main();
+
+ main.runTests();
+
+ System.out.println("Done!");
+ }
+}
diff --git a/test/800-smali/src/b_17790197.smali b/test/800-smali/src/b_17790197.smali
new file mode 100644
index 0000000000..7560fcf834
--- /dev/null
+++ b/test/800-smali/src/b_17790197.smali
@@ -0,0 +1,17 @@
+.class public LB17790197;
+
+.super Ljava/lang/Object;
+
+.method public static getInt()I
+ .registers 4
+ const/16 v0, 100
+ const/4 v1, 1
+ const/4 v2, 7
+ :loop
+ if-eq v2, v0, :done
+ add-int v2, v2, v1
+ goto :loop
+ :done
+ add-float v3, v0, v1
+ return v2
+.end method
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 5ca8bec9aa..9082b47811 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -36,9 +36,11 @@ TEST_ART_RUN_TEST_BUILD_RULES :=
# $(1): the test number
define define-build-art-run-test
dmart_target := $(art_run_tests_dir)/art-run-tests/$(1)/touch
-$$(dmart_target): $(DX) $(HOST_OUT_EXECUTABLES)/jasmin
+$$(dmart_target): $(DX) $(HOST_OUT_EXECUTABLES)/jasmin $(HOST_OUT_EXECUTABLES)/smali $(HOST_OUT_EXECUTABLES)/dexmerger
$(hide) rm -rf $$(dir $$@) && mkdir -p $$(dir $$@)
$(hide) DX=$(abspath $(DX)) JASMIN=$(abspath $(HOST_OUT_EXECUTABLES)/jasmin) \
+ SMALI=$(abspath $(HOST_OUT_EXECUTABLES)/smali) \
+ DXMERGER=$(abspath $(HOST_OUT_EXECUTABLES)/dexmerger) \
$(LOCAL_PATH)/run-test --build-only --output-path $$(abspath $$(dir $$@)) $(1)
$(hide) touch $$@
@@ -50,7 +52,7 @@ $(foreach test, $(TEST_ART_RUN_TESTS), $(eval $(call define-build-art-run-test,$
include $(CLEAR_VARS)
LOCAL_MODULE_TAGS := tests
LOCAL_MODULE := art-run-tests
-LOCAL_ADDITIONAL_DEPENDENCIES := $(TEST_ART_RUN_TEST_BUILD_RULES)
+LOCAL_ADDITIONAL_DEPENDENCIES := $(TEST_ART_RUN_TEST_BUILD_RULES) smali dexmerger
# The build system use this flag to pick up files generated by declare-make-art-run-test.
LOCAL_PICKUP_FILES := $(art_run_tests_dir)
@@ -204,7 +206,8 @@ TEST_ART_BROKEN_NO_RELOCATE_TESTS :=
# Tests that are broken with GC stress.
TEST_ART_BROKEN_GCSTRESS_RUN_TESTS := \
- 004-SignalTest
+ 004-SignalTest \
+ 114-ParallelGC
ifneq (,$(filter gcstress,$(GC_TYPES)))
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(PREBUILD_TYPES), \
@@ -454,9 +457,11 @@ define define-test-art-run-test
$$(run_test_options)
$$(run_test_rule_name): PRIVATE_RUN_TEST_OPTIONS := $$(run_test_options)
.PHONY: $$(run_test_rule_name)
-$$(run_test_rule_name): $(DX) $(HOST_OUT_EXECUTABLES)/jasmin $$(prereq_rule)
+$$(run_test_rule_name): $(DX) $(HOST_OUT_EXECUTABLES)/jasmin $(HOST_OUT_EXECUTABLES)/smali $(HOST_OUT_EXECUTABLES)/dexmerger $$(prereq_rule)
$(hide) $$(call ART_TEST_SKIP,$$@) && \
DX=$(abspath $(DX)) JASMIN=$(abspath $(HOST_OUT_EXECUTABLES)/jasmin) \
+ SMALI=$(abspath $(HOST_OUT_EXECUTABLES)/smali) \
+ DXMERGER=$(abspath $(HOST_OUT_EXECUTABLES)/dexmerger) \
art/test/run-test $$(PRIVATE_RUN_TEST_OPTIONS) $(9) \
&& $$(call ART_TEST_PASSED,$$@) || $$(call ART_TEST_FAILED,$$@)
$$(hide) (echo $(MAKECMDGOALS) | grep -q $$@ && \
diff --git a/test/run-test b/test/run-test
index 3b5df0de7c..3e98d6d09a 100755
--- a/test/run-test
+++ b/test/run-test
@@ -55,6 +55,16 @@ if [ -z "$JASMIN" ]; then
export JASMIN="jasmin"
fi
+# If smali was not set by the environment variable, assume it is in the path.
+if [ -z "$SMALI" ]; then
+ export SMALI="smali"
+fi
+
+# If dexmerger was not set by the environment variable, assume it is in the path.
+if [ -z "$DXMERGER" ]; then
+ export DXMERGER="dexmerger"
+fi
+
info="info.txt"
build="build"