Merge "Compiler cleanup: remove some old JIT leftovers." into dalvik-dev
diff --git a/src/asm_support.h b/src/asm_support.h
index 0a8489c..470b81f 100644
--- a/src/asm_support.h
+++ b/src/asm_support.h
@@ -17,22 +17,29 @@
#ifndef ART_SRC_ASM_SUPPORT_H_
#define ART_SRC_ASM_SUPPORT_H_
+// Value loaded into rSUSPEND for quick. When this value is counted down to zero we do a suspend
+// check.
#define SUSPEND_CHECK_INTERVAL (1000)
+// Offsets within java.lang.String.
#define STRING_VALUE_OFFSET 8
#define STRING_COUNT_OFFSET 12
#define STRING_OFFSET_OFFSET 20
#define STRING_DATA_OFFSET 12
#if defined(__arm__)
+// Register holding suspend check count down.
#define rSUSPEND r4
+// Register holding Thread::Current().
#define rSELF r9
// Offset of field Thread::suspend_count_ verified in InitCpu
#define THREAD_FLAGS_OFFSET 0
// Offset of field Thread::exception_ verified in InitCpu
#define THREAD_EXCEPTION_OFFSET 12
#elif defined(__mips__)
+// Register holding suspend check count down.
#define rSUSPEND $s0
+// Register holding Thread::Current().
#define rSELF $s1
// Offset of field Thread::suspend_count_ verified in InitCpu
#define THREAD_FLAGS_OFFSET 0
diff --git a/src/base/macros.h b/src/base/macros.h
index 48cb9c0..3a24c08 100644
--- a/src/base/macros.h
+++ b/src/base/macros.h
@@ -130,10 +130,10 @@
#define LIKELY(x) __builtin_expect((x), true)
#define UNLIKELY(x) __builtin_expect((x), false)
-#ifdef NDEBUG
+#ifndef NDEBUG
#define ALWAYS_INLINE
#else
-#define ALWAYS_INLINE __attribute__((always_inline))
+#define ALWAYS_INLINE __attribute__ ((always_inline))
#endif
// bionic and glibc both have TEMP_FAILURE_RETRY, but Mac OS' libc doesn't.
diff --git a/src/compiler/codegen/arm/int_arm.cc b/src/compiler/codegen/arm/int_arm.cc
index 857ec93..2736215 100644
--- a/src/compiler/codegen/arm/int_arm.cc
+++ b/src/compiler/codegen/arm/int_arm.cc
@@ -457,7 +457,7 @@
RegLocation rl_object = LoadValue(cu, rl_src_obj, kCoreReg);
RegLocation rl_new_value = LoadValue(cu, rl_src_new_value, kCoreReg);
- if (need_write_barrier) {
+ if (need_write_barrier && !IsConstantNullRef(cu, rl_new_value)) {
// Mark card for object assuming new value is stored.
MarkGCCard(cu, rl_new_value.low_reg, rl_object.low_reg);
}
@@ -940,7 +940,9 @@
StoreBaseIndexed(cu, r_ptr, r_index, r_value, scale, kWord);
FreeTemp(cu, r_ptr);
FreeTemp(cu, r_index);
- MarkGCCard(cu, r_value, r_array);
+ if (!IsConstantNullRef(cu, rl_src)) {
+ MarkGCCard(cu, r_value, r_array);
+ }
}
void ArmCodegen::GenShiftImmOpLong(CompilationUnit* cu, Instruction::Code opcode,
diff --git a/src/compiler/codegen/gen_common.cc b/src/compiler/codegen/gen_common.cc
index 1ed6f9d..0a46593 100644
--- a/src/compiler/codegen/gen_common.cc
+++ b/src/compiler/codegen/gen_common.cc
@@ -425,7 +425,7 @@
if (is_volatile) {
GenMemBarrier(cu, kStoreLoad);
}
- if (is_object) {
+ if (is_object && !IsConstantNullRef(cu, rl_src)) {
MarkGCCard(cu, rl_src.low_reg, rBase);
}
FreeTemp(cu, rBase);
@@ -766,7 +766,7 @@
if (is_volatile) {
GenMemBarrier(cu, kLoadLoad);
}
- if (is_object) {
+ if (is_object && !IsConstantNullRef(cu, rl_src)) {
MarkGCCard(cu, rl_src.low_reg, rl_obj.low_reg);
}
}
diff --git a/src/compiler/codegen/mips/int_mips.cc b/src/compiler/codegen/mips/int_mips.cc
index 31c13f2..113183c 100644
--- a/src/compiler/codegen/mips/int_mips.cc
+++ b/src/compiler/codegen/mips/int_mips.cc
@@ -632,7 +632,9 @@
StoreBaseIndexed(cu, r_ptr, r_index, r_value, scale, kWord);
FreeTemp(cu, r_ptr);
FreeTemp(cu, r_index);
- MarkGCCard(cu, r_value, r_array);
+ if (!IsConstantNullRef(cu, rl_src)) {
+ MarkGCCard(cu, r_value, r_array);
+ }
}
void MipsCodegen::GenShiftImmOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
diff --git a/src/compiler/codegen/mir_to_gbc.cc b/src/compiler/codegen/mir_to_gbc.cc
index 79ac242..a2c2bbc 100644
--- a/src/compiler/codegen/mir_to_gbc.cc
+++ b/src/compiler/codegen/mir_to_gbc.cc
@@ -1018,7 +1018,7 @@
}
EmitPopShadowFrame(cu);
cu->irb->CreateRet(GetLLVMValue(cu, rl_src[0].orig_sreg));
- bb->has_return = true;
+ DCHECK(bb->terminated_by_return);
}
break;
@@ -1028,7 +1028,7 @@
}
EmitPopShadowFrame(cu);
cu->irb->CreateRetVoid();
- bb->has_return = true;
+ DCHECK(bb->terminated_by_return);
}
break;
@@ -1916,7 +1916,7 @@
if (bb->block_type == kEntryBlock) {
cu->entryTarget_bb = GetLLVMBlock(cu, bb->fall_through->id);
- } else if ((bb->fall_through != NULL) && !bb->has_return) {
+ } else if ((bb->fall_through != NULL) && !bb->terminated_by_return) {
cu->irb->CreateBr(GetLLVMBlock(cu, bb->fall_through->id));
}
diff --git a/src/compiler/codegen/x86/int_x86.cc b/src/compiler/codegen/x86/int_x86.cc
index 4dff95a..b2292fb 100644
--- a/src/compiler/codegen/x86/int_x86.cc
+++ b/src/compiler/codegen/x86/int_x86.cc
@@ -580,7 +580,9 @@
StoreBaseIndexedDisp(cu, r_array, r_index, scale,
data_offset, r_value, INVALID_REG, kWord, INVALID_SREG);
FreeTemp(cu, r_index);
- MarkGCCard(cu, r_value, r_array);
+ if (!IsConstantNullRef(cu, rl_src)) {
+ MarkGCCard(cu, r_value, r_array);
+ }
}
void X86Codegen::GenShiftImmOpLong(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
diff --git a/src/compiler/compiler_ir.h b/src/compiler/compiler_ir.h
index 056c308..a3a4ff9 100644
--- a/src/compiler/compiler_ir.h
+++ b/src/compiler/compiler_ir.h
@@ -230,8 +230,8 @@
bool catch_entry;
bool explicit_throw;
bool conditional_branch;
- bool has_return; // Contains a return.
- bool dominates_return; // Is a member of return extended basic block
+ bool terminated_by_return; // Block ends with a Dalvik return opcode.
+ bool dominates_return; // Is a member of return extended basic block.
uint16_t start_offset;
uint16_t nesting_depth;
BBType block_type;
@@ -605,6 +605,11 @@
Low32Bits(static_cast<int64_t>(cu->constant_values[loc.orig_sreg]));
}
+static inline bool IsConstantNullRef(const CompilationUnit* cu, RegLocation loc)
+{
+ return loc.ref && loc.is_const && (ConstantValue(cu, loc) == 0);
+}
+
static inline bool MustFlushConstant(const CompilationUnit* cu, RegLocation loc)
{
DCHECK(IsConst(cu, loc));
diff --git a/src/compiler/dataflow.cc b/src/compiler/dataflow.cc
index 1e20cbd..4d7e9d7 100644
--- a/src/compiler/dataflow.cc
+++ b/src/compiler/dataflow.cc
@@ -1890,6 +1890,11 @@
bb->taken = bb_next->taken;
// Include the rest of the instructions
bb->last_mir_insn = bb_next->last_mir_insn;
+ /*
+ * If lower-half of pair of blocks to combine contained a return, move the flag
+ * to the newly combined block.
+ */
+ bb->terminated_by_return = bb_next->terminated_by_return;
/*
* NOTE: we aren't updating all dataflow info here. Should either make sure this pass
@@ -2096,17 +2101,17 @@
}
BasicBlock* start_bb = bb;
cu->extended_basic_blocks.push_back(bb);
- bool has_return = false;
+ bool terminated_by_return = false;
// Visit blocks strictly dominated by this head.
while (bb != NULL) {
bb->visited = true;
- has_return |= bb->has_return;
+ terminated_by_return |= bb->terminated_by_return;
bb = NextDominatedBlock(cu, bb);
if (cu->verbose && (bb != NULL)) {
LOG(INFO) << "...added bb " << bb->id;
}
}
- if (has_return) {
+ if (terminated_by_return) {
// This extended basic block contains a return, so mark all members.
bb = start_bb;
while (bb != NULL) {
diff --git a/src/compiler/frontend.cc b/src/compiler/frontend.cc
index 44baea2..9afd18e 100644
--- a/src/compiler/frontend.cc
+++ b/src/compiler/frontend.cc
@@ -148,6 +148,10 @@
bottom_block->first_mir_insn = insn;
bottom_block->last_mir_insn = orig_block->last_mir_insn;
+ /* If this block was terminated by a return, the flag needs to go with the bottom block */
+ bottom_block->terminated_by_return = orig_block->terminated_by_return;
+ orig_block->terminated_by_return = false;
+
/* Add it to the quick lookup cache */
cu->block_map.Put(bottom_block->start_offset, bottom_block);
@@ -972,6 +976,7 @@
cur_block = ProcessCanBranch(cu.get(), cur_block, insn, cur_offset,
width, flags, code_ptr, code_end);
} else if (flags & Instruction::kReturn) {
+ cur_block->terminated_by_return = true;
cur_block->fall_through = exit_block;
InsertGrowableList(cu.get(), exit_block->predecessors,
reinterpret_cast<uintptr_t>(cur_block));