summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/Android.mk4
-rw-r--r--compiler/dex/mir_graph.cc23
-rw-r--r--compiler/dex/mir_graph.h41
-rw-r--r--compiler/dex/mir_optimization.cc154
-rw-r--r--compiler/dex/quick/arm/call_arm.cc8
-rw-r--r--compiler/dex/quick/arm/codegen_arm.h4
-rw-r--r--compiler/dex/quick/arm64/call_arm64.cc8
-rw-r--r--compiler/dex/quick/arm64/codegen_arm64.h5
-rw-r--r--compiler/dex/quick/codegen_util.cc9
-rw-r--r--compiler/dex/quick/gen_common.cc19
-rw-r--r--compiler/dex/quick/mips/call_mips.cc8
-rw-r--r--compiler/dex/quick/mips/codegen_mips.h4
-rw-r--r--compiler/dex/quick/mir_to_lir.h14
-rw-r--r--compiler/dex/quick/x86/call_x86.cc9
-rw-r--r--compiler/dex/quick/x86/codegen_x86.h5
-rwxr-xr-xcompiler/dex/quick/x86/target_x86.cc3
-rw-r--r--compiler/dex/ssa_transformation.cc32
-rw-r--r--compiler/utils/arm/assembler_arm32_test.cc3
-rw-r--r--oatdump/oatdump.cc35
-rw-r--r--runtime/Android.mk4
-rw-r--r--runtime/check_reference_map_visitor.h2
-rw-r--r--runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc2
-rw-r--r--runtime/fault_handler.cc3
-rw-r--r--runtime/instrumentation.cc4
-rw-r--r--runtime/instrumentation.h2
-rw-r--r--runtime/mirror/art_method-inl.h31
-rw-r--r--runtime/mirror/art_method.cc20
-rw-r--r--runtime/mirror/art_method.h29
-rw-r--r--runtime/stack.cc18
-rw-r--r--runtime/thread.cc8
30 files changed, 288 insertions, 223 deletions
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 3e0378ce2f..eb9ad475b4 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -207,7 +207,9 @@ define build-libart-compiler
ifeq ($$(art_ndebug_or_debug),ndebug)
LOCAL_MODULE := libart-compiler
LOCAL_SHARED_LIBRARIES += libart
- LOCAL_FDO_SUPPORT := true
+ ifeq ($$(art_target_or_host),target)
+ LOCAL_FDO_SUPPORT := true
+ endif
else # debug
LOCAL_MODULE := libartd-compiler
LOCAL_SHARED_LIBRARIES += libartd
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index b87ab66347..29972ddba5 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -97,11 +97,6 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
max_nested_loops_(0u),
i_dom_list_(NULL),
temp_scoped_alloc_(),
- temp_insn_data_(nullptr),
- temp_bit_vector_size_(0u),
- temp_bit_vector_(nullptr),
- temp_bit_matrix_(nullptr),
- temp_gvn_(),
block_list_(arena->Adapter(kArenaAllocBBList)),
try_block_addr_(NULL),
entry_block_(NULL),
@@ -133,6 +128,7 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
sfield_lowering_infos_(arena->Adapter(kArenaAllocLoweringInfo)),
method_lowering_infos_(arena->Adapter(kArenaAllocLoweringInfo)),
gen_suspend_test_list_(arena->Adapter()) {
+ memset(&temp_, 0, sizeof(temp_));
use_counts_.reserve(256);
raw_use_counts_.reserve(256);
block_list_.reserve(100);
@@ -1348,9 +1344,10 @@ void MIRGraph::DisassembleExtendedInstr(const MIR* mir, std::string* decoded_mir
decoded_mir->append(", ");
decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[1], false));
}
- decoded_mir->append(StringPrintf(" = vect%d", mir->dalvikInsn.vB));
+ decoded_mir->append(StringPrintf(" = vect%d (extr_idx:%d)", mir->dalvikInsn.vB, mir->dalvikInsn.arg[0]));
} else {
- decoded_mir->append(StringPrintf(" v%d = vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vB));
+ decoded_mir->append(StringPrintf(" v%d = vect%d (extr_idx:%d)", mir->dalvikInsn.vA,
+ mir->dalvikInsn.vB, mir->dalvikInsn.arg[0]));
}
FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
break;
@@ -1681,9 +1678,9 @@ void MIRGraph::InitializeMethodUses() {
void MIRGraph::SSATransformationStart() {
DCHECK(temp_scoped_alloc_.get() == nullptr);
temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
- temp_bit_vector_size_ = GetNumOfCodeAndTempVRs();
- temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
- temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapRegisterV);
+ temp_.ssa.num_vregs = GetNumOfCodeAndTempVRs();
+ temp_.ssa.work_live_vregs = new (temp_scoped_alloc_.get()) ArenaBitVector(
+ temp_scoped_alloc_.get(), temp_.ssa.num_vregs, false, kBitMapRegisterV);
}
void MIRGraph::SSATransformationEnd() {
@@ -1692,9 +1689,9 @@ void MIRGraph::SSATransformationEnd() {
VerifyDataflow();
}
- temp_bit_vector_size_ = 0u;
- temp_bit_vector_ = nullptr;
- temp_bit_matrix_ = nullptr; // Def block matrix.
+ temp_.ssa.num_vregs = 0u;
+ temp_.ssa.work_live_vregs = nullptr;
+ temp_.ssa.def_block_matrix = nullptr;
DCHECK(temp_scoped_alloc_.get() != nullptr);
temp_scoped_alloc_.reset();
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index a1d24e2c37..d77ad6f76a 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -1270,15 +1270,38 @@ class MIRGraph {
size_t max_nested_loops_;
int* i_dom_list_;
std::unique_ptr<ScopedArenaAllocator> temp_scoped_alloc_;
- uint16_t* temp_insn_data_;
- uint32_t temp_bit_vector_size_;
- ArenaBitVector* temp_bit_vector_;
- // temp_bit_matrix_ used as one of
- // - def_block_matrix: original num registers x num_blocks_,
- // - ending_null_check_matrix: num_blocks_ x original num registers,
- // - ending_clinit_check_matrix: num_blocks_ x unique class count.
- ArenaBitVector** temp_bit_matrix_;
- std::unique_ptr<GlobalValueNumbering> temp_gvn_;
+ // Union of temporaries used by different passes.
+ union {
+ // Class init check elimination.
+ struct {
+ size_t num_class_bits; // 2 bits per class: class initialized and class in dex cache.
+ ArenaBitVector* work_classes_to_check;
+ ArenaBitVector** ending_classes_to_check_matrix; // num_blocks_ x num_class_bits.
+ uint16_t* indexes;
+ } cice;
+ // Null check elimination.
+ struct {
+ size_t num_vregs;
+ ArenaBitVector* work_vregs_to_check;
+ ArenaBitVector** ending_vregs_to_check_matrix; // num_blocks_ x num_vregs.
+ } nce;
+ // Special method inlining.
+ struct {
+ size_t num_indexes;
+ ArenaBitVector* processed_indexes;
+ uint16_t* lowering_infos;
+ } smi;
+ // SSA transformation.
+ struct {
+ size_t num_vregs;
+ ArenaBitVector* work_live_vregs;
+ ArenaBitVector** def_block_matrix; // num_vregs x num_blocks_.
+ } ssa;
+ // Global value numbering.
+ struct {
+ GlobalValueNumbering* gvn;
+ } gvn;
+ } temp_;
static const int kInvalidEntry = -1;
ArenaVector<BasicBlock*> block_list_;
ArenaBitVector* try_block_addr_;
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index a0ad2133be..d025d08168 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -891,12 +891,12 @@ bool MIRGraph::EliminateNullChecksGate() {
DCHECK(temp_scoped_alloc_.get() == nullptr);
temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
- temp_bit_vector_size_ = GetNumOfCodeVRs();
- temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
- temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapNullCheck);
- temp_bit_matrix_ = static_cast<ArenaBitVector**>(
+ temp_.nce.num_vregs = GetNumOfCodeVRs();
+ temp_.nce.work_vregs_to_check = new (temp_scoped_alloc_.get()) ArenaBitVector(
+ temp_scoped_alloc_.get(), temp_.nce.num_vregs, false, kBitMapNullCheck);
+ temp_.nce.ending_vregs_to_check_matrix = static_cast<ArenaBitVector**>(
temp_scoped_alloc_->Alloc(sizeof(ArenaBitVector*) * GetNumBlocks(), kArenaAllocMisc));
- std::fill_n(temp_bit_matrix_, GetNumBlocks(), nullptr);
+ std::fill_n(temp_.nce.ending_vregs_to_check_matrix, GetNumBlocks(), nullptr);
// reset MIR_MARK
AllNodesIterator iter(this);
@@ -919,7 +919,7 @@ bool MIRGraph::EliminateNullChecks(BasicBlock* bb) {
return false;
}
- ArenaBitVector* vregs_to_check = temp_bit_vector_;
+ ArenaBitVector* vregs_to_check = temp_.nce.work_vregs_to_check;
/*
* Set initial state. Catch blocks don't need any special treatment.
*/
@@ -940,7 +940,7 @@ bool MIRGraph::EliminateNullChecks(BasicBlock* bb) {
// Starting state is union of all incoming arcs.
bool copied_first = false;
for (BasicBlockId pred_id : bb->predecessors) {
- if (temp_bit_matrix_[pred_id] == nullptr) {
+ if (temp_.nce.ending_vregs_to_check_matrix[pred_id] == nullptr) {
continue;
}
BasicBlock* pred_bb = GetBasicBlock(pred_id);
@@ -962,9 +962,9 @@ bool MIRGraph::EliminateNullChecks(BasicBlock* bb) {
}
if (!copied_first) {
copied_first = true;
- vregs_to_check->Copy(temp_bit_matrix_[pred_id]);
+ vregs_to_check->Copy(temp_.nce.ending_vregs_to_check_matrix[pred_id]);
} else {
- vregs_to_check->Union(temp_bit_matrix_[pred_id]);
+ vregs_to_check->Union(temp_.nce.ending_vregs_to_check_matrix[pred_id]);
}
if (null_check_insn != nullptr) {
vregs_to_check->ClearBit(null_check_insn->dalvikInsn.vA);
@@ -1057,27 +1057,27 @@ bool MIRGraph::EliminateNullChecks(BasicBlock* bb) {
// Did anything change?
bool nce_changed = false;
- ArenaBitVector* old_ending_ssa_regs_to_check = temp_bit_matrix_[bb->id];
+ ArenaBitVector* old_ending_ssa_regs_to_check = temp_.nce.ending_vregs_to_check_matrix[bb->id];
if (old_ending_ssa_regs_to_check == nullptr) {
DCHECK(temp_scoped_alloc_.get() != nullptr);
nce_changed = vregs_to_check->GetHighestBitSet() != -1;
- temp_bit_matrix_[bb->id] = vregs_to_check;
+ temp_.nce.ending_vregs_to_check_matrix[bb->id] = vregs_to_check;
// Create a new vregs_to_check for next BB.
- temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
- temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapNullCheck);
+ temp_.nce.work_vregs_to_check = new (temp_scoped_alloc_.get()) ArenaBitVector(
+ temp_scoped_alloc_.get(), temp_.nce.num_vregs, false, kBitMapNullCheck);
} else if (!vregs_to_check->SameBitsSet(old_ending_ssa_regs_to_check)) {
nce_changed = true;
- temp_bit_matrix_[bb->id] = vregs_to_check;
- temp_bit_vector_ = old_ending_ssa_regs_to_check; // Reuse for vregs_to_check for next BB.
+ temp_.nce.ending_vregs_to_check_matrix[bb->id] = vregs_to_check;
+ temp_.nce.work_vregs_to_check = old_ending_ssa_regs_to_check; // Reuse for next BB.
}
return nce_changed;
}
void MIRGraph::EliminateNullChecksEnd() {
// Clean up temporaries.
- temp_bit_vector_size_ = 0u;
- temp_bit_vector_ = nullptr;
- temp_bit_matrix_ = nullptr;
+ temp_.nce.num_vregs = 0u;
+ temp_.nce.work_vregs_to_check = nullptr;
+ temp_.nce.ending_vregs_to_check_matrix = nullptr;
DCHECK(temp_scoped_alloc_.get() != nullptr);
temp_scoped_alloc_.reset();
@@ -1124,9 +1124,9 @@ bool MIRGraph::EliminateClassInitChecksGate() {
// Each insn we use here has at least 2 code units, offset/2 will be a unique index.
const size_t end = (GetNumDalvikInsns() + 1u) / 2u;
- temp_insn_data_ = static_cast<uint16_t*>(
- temp_scoped_alloc_->Alloc(end * sizeof(*temp_insn_data_), kArenaAllocGrowableArray));
- std::fill_n(temp_insn_data_, end, 0xffffu);
+ temp_.cice.indexes = static_cast<uint16_t*>(
+ temp_scoped_alloc_->Alloc(end * sizeof(*temp_.cice.indexes), kArenaAllocGrowableArray));
+ std::fill_n(temp_.cice.indexes, end, 0xffffu);
uint32_t unique_class_count = 0u;
{
@@ -1173,8 +1173,8 @@ bool MIRGraph::EliminateClassInitChecksGate() {
static_cast<uint16_t>(class_to_index_map.size())
};
uint16_t index = class_to_index_map.insert(entry).first->index;
- // Using offset/2 for index into temp_insn_data_.
- temp_insn_data_[mir->offset / 2u] = index;
+ // Using offset/2 for index into temp_.cice.indexes.
+ temp_.cice.indexes[mir->offset / 2u] = index;
}
} else if (mir->dalvikInsn.opcode == Instruction::INVOKE_STATIC ||
mir->dalvikInsn.opcode == Instruction::INVOKE_STATIC_RANGE) {
@@ -1187,8 +1187,8 @@ bool MIRGraph::EliminateClassInitChecksGate() {
static_cast<uint16_t>(class_to_index_map.size())
};
uint16_t index = class_to_index_map.insert(entry).first->index;
- // Using offset/2 for index into temp_insn_data_.
- temp_insn_data_[mir->offset / 2u] = index;
+ // Using offset/2 for index into temp_.cice.indexes.
+ temp_.cice.indexes[mir->offset / 2u] = index;
}
}
}
@@ -1199,19 +1199,19 @@ bool MIRGraph::EliminateClassInitChecksGate() {
if (unique_class_count == 0u) {
// All SGET/SPUTs refer to initialized classes. Nothing to do.
- temp_insn_data_ = nullptr;
+ temp_.cice.indexes = nullptr;
temp_scoped_alloc_.reset();
return false;
}
// 2 bits for each class: is class initialized, is class in dex cache.
- temp_bit_vector_size_ = 2u * unique_class_count;
- temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
- temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapClInitCheck);
- temp_bit_matrix_ = static_cast<ArenaBitVector**>(
+ temp_.cice.num_class_bits = 2u * unique_class_count;
+ temp_.cice.work_classes_to_check = new (temp_scoped_alloc_.get()) ArenaBitVector(
+ temp_scoped_alloc_.get(), temp_.cice.num_class_bits, false, kBitMapClInitCheck);
+ temp_.cice.ending_classes_to_check_matrix = static_cast<ArenaBitVector**>(
temp_scoped_alloc_->Alloc(sizeof(ArenaBitVector*) * GetNumBlocks(), kArenaAllocMisc));
- std::fill_n(temp_bit_matrix_, GetNumBlocks(), nullptr);
- DCHECK_GT(temp_bit_vector_size_, 0u);
+ std::fill_n(temp_.cice.ending_classes_to_check_matrix, GetNumBlocks(), nullptr);
+ DCHECK_GT(temp_.cice.num_class_bits, 0u);
return true;
}
@@ -1229,22 +1229,22 @@ bool MIRGraph::EliminateClassInitChecks(BasicBlock* bb) {
/*
* Set initial state. Catch blocks don't need any special treatment.
*/
- ArenaBitVector* classes_to_check = temp_bit_vector_;
+ ArenaBitVector* classes_to_check = temp_.cice.work_classes_to_check;
DCHECK(classes_to_check != nullptr);
if (bb->block_type == kEntryBlock) {
- classes_to_check->SetInitialBits(temp_bit_vector_size_);
+ classes_to_check->SetInitialBits(temp_.cice.num_class_bits);
} else {
// Starting state is union of all incoming arcs.
bool copied_first = false;
for (BasicBlockId pred_id : bb->predecessors) {
- if (temp_bit_matrix_[pred_id] == nullptr) {
+ if (temp_.cice.ending_classes_to_check_matrix[pred_id] == nullptr) {
continue;
}
if (!copied_first) {
copied_first = true;
- classes_to_check->Copy(temp_bit_matrix_[pred_id]);
+ classes_to_check->Copy(temp_.cice.ending_classes_to_check_matrix[pred_id]);
} else {
- classes_to_check->Union(temp_bit_matrix_[pred_id]);
+ classes_to_check->Union(temp_.cice.ending_classes_to_check_matrix[pred_id]);
}
}
DCHECK(copied_first); // At least one predecessor must have been processed before this bb.
@@ -1253,7 +1253,7 @@ bool MIRGraph::EliminateClassInitChecks(BasicBlock* bb) {
// Walk through the instruction in the block, updating as necessary
for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
- uint16_t index = temp_insn_data_[mir->offset / 2u];
+ uint16_t index = temp_.cice.indexes[mir->offset / 2u];
if (index != 0xffffu) {
bool check_initialization = false;
bool check_dex_cache = false;
@@ -1299,29 +1299,29 @@ bool MIRGraph::EliminateClassInitChecks(BasicBlock* bb) {
// Did anything change?
bool changed = false;
- ArenaBitVector* old_ending_classes_to_check = temp_bit_matrix_[bb->id];
+ ArenaBitVector* old_ending_classes_to_check = temp_.cice.ending_classes_to_check_matrix[bb->id];
if (old_ending_classes_to_check == nullptr) {
DCHECK(temp_scoped_alloc_.get() != nullptr);
changed = classes_to_check->GetHighestBitSet() != -1;
- temp_bit_matrix_[bb->id] = classes_to_check;
+ temp_.cice.ending_classes_to_check_matrix[bb->id] = classes_to_check;
// Create a new classes_to_check for next BB.
- temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
- temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapClInitCheck);
+ temp_.cice.work_classes_to_check = new (temp_scoped_alloc_.get()) ArenaBitVector(
+ temp_scoped_alloc_.get(), temp_.cice.num_class_bits, false, kBitMapClInitCheck);
} else if (!classes_to_check->Equal(old_ending_classes_to_check)) {
changed = true;
- temp_bit_matrix_[bb->id] = classes_to_check;
- temp_bit_vector_ = old_ending_classes_to_check; // Reuse for classes_to_check for next BB.
+ temp_.cice.ending_classes_to_check_matrix[bb->id] = classes_to_check;
+ temp_.cice.work_classes_to_check = old_ending_classes_to_check; // Reuse for next BB.
}
return changed;
}
void MIRGraph::EliminateClassInitChecksEnd() {
// Clean up temporaries.
- temp_bit_vector_size_ = 0u;
- temp_bit_vector_ = nullptr;
- temp_bit_matrix_ = nullptr;
- DCHECK(temp_insn_data_ != nullptr);
- temp_insn_data_ = nullptr;
+ temp_.cice.num_class_bits = 0u;
+ temp_.cice.work_classes_to_check = nullptr;
+ temp_.cice.ending_classes_to_check_matrix = nullptr;
+ DCHECK(temp_.cice.indexes != nullptr);
+ temp_.cice.indexes = nullptr;
DCHECK(temp_scoped_alloc_.get() != nullptr);
temp_scoped_alloc_.reset();
}
@@ -1333,39 +1333,39 @@ bool MIRGraph::ApplyGlobalValueNumberingGate() {
DCHECK(temp_scoped_alloc_ == nullptr);
temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
- DCHECK(temp_gvn_ == nullptr);
- temp_gvn_.reset(
- new (temp_scoped_alloc_.get()) GlobalValueNumbering(cu_, temp_scoped_alloc_.get(),
- GlobalValueNumbering::kModeGvn));
+ DCHECK(temp_.gvn.gvn == nullptr);
+ temp_.gvn.gvn = new (temp_scoped_alloc_.get()) GlobalValueNumbering(
+ cu_, temp_scoped_alloc_.get(), GlobalValueNumbering::kModeGvn);
return true;
}
bool MIRGraph::ApplyGlobalValueNumbering(BasicBlock* bb) {
- DCHECK(temp_gvn_ != nullptr);
- LocalValueNumbering* lvn = temp_gvn_->PrepareBasicBlock(bb);
+ DCHECK(temp_.gvn.gvn != nullptr);
+ LocalValueNumbering* lvn = temp_.gvn.gvn->PrepareBasicBlock(bb);
if (lvn != nullptr) {
for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
lvn->GetValueNumber(mir);
}
}
- bool change = (lvn != nullptr) && temp_gvn_->FinishBasicBlock(bb);
+ bool change = (lvn != nullptr) && temp_.gvn.gvn->FinishBasicBlock(bb);
return change;
}
void MIRGraph::ApplyGlobalValueNumberingEnd() {
// Perform modifications.
- if (temp_gvn_->Good()) {
+ DCHECK(temp_.gvn.gvn != nullptr);
+ if (temp_.gvn.gvn->Good()) {
if (max_nested_loops_ != 0u) {
- temp_gvn_->StartPostProcessing();
+ temp_.gvn.gvn->StartPostProcessing();
TopologicalSortIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
ScopedArenaAllocator allocator(&cu_->arena_stack); // Reclaim memory after each LVN.
- LocalValueNumbering* lvn = temp_gvn_->PrepareBasicBlock(bb, &allocator);
+ LocalValueNumbering* lvn = temp_.gvn.gvn->PrepareBasicBlock(bb, &allocator);
if (lvn != nullptr) {
for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
lvn->GetValueNumber(mir);
}
- bool change = temp_gvn_->FinishBasicBlock(bb);
+ bool change = temp_.gvn.gvn->FinishBasicBlock(bb);
DCHECK(!change) << PrettyMethod(cu_->method_idx, *cu_->dex_file);
}
}
@@ -1376,16 +1376,16 @@ void MIRGraph::ApplyGlobalValueNumberingEnd() {
LOG(WARNING) << "GVN failed for " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
}
- DCHECK(temp_gvn_ != nullptr);
- temp_gvn_.reset();
+ delete temp_.gvn.gvn;
+ temp_.gvn.gvn = nullptr;
DCHECK(temp_scoped_alloc_ != nullptr);
temp_scoped_alloc_.reset();
}
void MIRGraph::ComputeInlineIFieldLoweringInfo(uint16_t field_idx, MIR* invoke, MIR* iget_or_iput) {
uint32_t method_index = invoke->meta.method_lowering_info;
- if (temp_bit_vector_->IsBitSet(method_index)) {
- iget_or_iput->meta.ifield_lowering_info = temp_insn_data_[method_index];
+ if (temp_.smi.processed_indexes->IsBitSet(method_index)) {
+ iget_or_iput->meta.ifield_lowering_info = temp_.smi.lowering_infos[method_index];
DCHECK_EQ(field_idx, GetIFieldLoweringInfo(iget_or_iput).FieldIndex());
return;
}
@@ -1402,8 +1402,8 @@ void MIRGraph::ComputeInlineIFieldLoweringInfo(uint16_t field_idx, MIR* invoke,
uint32_t field_info_index = ifield_lowering_infos_.size();
ifield_lowering_infos_.push_back(inlined_field_info);
- temp_bit_vector_->SetBit(method_index);
- temp_insn_data_[method_index] = field_info_index;
+ temp_.smi.processed_indexes->SetBit(method_index);
+ temp_.smi.lowering_infos[method_index] = field_info_index;
iget_or_iput->meta.ifield_lowering_info = field_info_index;
}
@@ -1425,12 +1425,12 @@ void MIRGraph::InlineSpecialMethodsStart() {
DCHECK(temp_scoped_alloc_.get() == nullptr);
temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
- temp_bit_vector_size_ = method_lowering_infos_.size();
- temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
- temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapMisc);
- temp_bit_vector_->ClearAllBits();
- temp_insn_data_ = static_cast<uint16_t*>(temp_scoped_alloc_->Alloc(
- temp_bit_vector_size_ * sizeof(*temp_insn_data_), kArenaAllocGrowableArray));
+ temp_.smi.num_indexes = method_lowering_infos_.size();
+ temp_.smi.processed_indexes = new (temp_scoped_alloc_.get()) ArenaBitVector(
+ temp_scoped_alloc_.get(), temp_.smi.num_indexes, false, kBitMapMisc);
+ temp_.smi.processed_indexes->ClearAllBits();
+ temp_.smi.lowering_infos = static_cast<uint16_t*>(temp_scoped_alloc_->Alloc(
+ temp_.smi.num_indexes * sizeof(*temp_.smi.lowering_infos), kArenaAllocGrowableArray));
}
void MIRGraph::InlineSpecialMethods(BasicBlock* bb) {
@@ -1477,10 +1477,12 @@ void MIRGraph::InlineSpecialMethods(BasicBlock* bb) {
}
void MIRGraph::InlineSpecialMethodsEnd() {
- DCHECK(temp_insn_data_ != nullptr);
- temp_insn_data_ = nullptr;
- DCHECK(temp_bit_vector_ != nullptr);
- temp_bit_vector_ = nullptr;
+ // Clean up temporaries.
+ DCHECK(temp_.smi.lowering_infos != nullptr);
+ temp_.smi.lowering_infos = nullptr;
+ temp_.smi.num_indexes = 0u;
+ DCHECK(temp_.smi.processed_indexes != nullptr);
+ temp_.smi.processed_indexes = nullptr;
DCHECK(temp_scoped_alloc_.get() != nullptr);
temp_scoped_alloc_.reset();
}
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index a3b4df3c7e..f15d707a5c 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -288,18 +288,12 @@ void ArmMir2Lir::GenMoveException(RegLocation rl_dest) {
StoreValue(rl_dest, rl_result);
}
-/*
- * Mark garbage collection card. Skip if the value we're storing is null.
- */
-void ArmMir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) {
+void ArmMir2Lir::UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) {
RegStorage reg_card_base = AllocTemp();
RegStorage reg_card_no = AllocTemp();
- LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
LoadWordDisp(rs_rARM_SELF, Thread::CardTableOffset<4>().Int32Value(), reg_card_base);
OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
- LIR* target = NewLIR0(kPseudoTargetLabel);
- branch_over->target = target;
FreeTemp(reg_card_base);
FreeTemp(reg_card_no);
}
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index d2351997b7..e8d0c32ffd 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -106,7 +106,9 @@ class ArmMir2Lir FINAL : public Mir2Lir {
OpSize size, VolatileKind is_volatile) OVERRIDE;
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
OpSize size) OVERRIDE;
- void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
+
+ /// @copydoc Mir2Lir::UnconditionallyMarkGCCard(RegStorage)
+ void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
// Required for target - register utilities.
RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index 3e5b7bfe0a..089e4b6709 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -251,20 +251,14 @@ void Arm64Mir2Lir::GenMoveException(RegLocation rl_dest) {
StoreValue(rl_dest, rl_result);
}
-/*
- * Mark garbage collection card. Skip if the value we're storing is null.
- */
-void Arm64Mir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) {
+void Arm64Mir2Lir::UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) {
RegStorage reg_card_base = AllocTempWide();
RegStorage reg_card_no = AllocTempWide(); // Needs to be wide as addr is ref=64b
- LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
LoadWordDisp(rs_xSELF, Thread::CardTableOffset<8>().Int32Value(), reg_card_base);
OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
// TODO(Arm64): generate "strb wB, [xB, wC, uxtw]" rather than "strb wB, [xB, xC]"?
StoreBaseIndexed(reg_card_base, reg_card_no, As32BitReg(reg_card_base),
0, kUnsignedByte);
- LIR* target = NewLIR0(kPseudoTargetLabel);
- branch_over->target = target;
FreeTemp(reg_card_base);
FreeTemp(reg_card_no);
}
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 5182a89474..5e10f80fa5 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -94,7 +94,10 @@ class Arm64Mir2Lir FINAL : public Mir2Lir {
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
OpSize size) OVERRIDE;
LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale) OVERRIDE;
- void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) OVERRIDE;
+
+ /// @copydoc Mir2Lir::UnconditionallyMarkGCCard(RegStorage)
+ void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
+
LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
int offset, int check_value, LIR* target, LIR** compare) OVERRIDE;
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 9403516641..80cb535307 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -314,6 +314,15 @@ void Mir2Lir::UpdateLIROffsets() {
}
}
+void Mir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) {
+ DCHECK(val_reg.Valid());
+ DCHECK_EQ(val_reg.Is64Bit(), cu_->target64);
+ LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, nullptr);
+ UnconditionallyMarkGCCard(tgt_addr_reg);
+ LIR* target = NewLIR0(kPseudoTargetLabel);
+ branch_over->target = target;
+}
+
/* Dump instructions and constant pool contents */
void Mir2Lir::CodegenDump() {
LOG(INFO) << "Dumping LIR insns for "
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 98ddc36f63..c00f90b6e2 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -416,8 +416,8 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) {
// share array alignment with ints (see comment at head of function)
size_t component_size = sizeof(int32_t);
- // Having a range of 0 is legal
- if (info->is_range && (elems > 0)) {
+ if (elems > 5) {
+ DCHECK(info->is_range); // Non-range insn can't encode more than 5 elems.
/*
* Bit of ugliness here. We're going generate a mem copy loop
* on the register range, but it is possible that some regs
@@ -487,7 +487,11 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) {
OpRegRegImm(kOpAdd, ref_reg, r_dst,
-mirror::Array::DataOffset(component_size).Int32Value());
}
- } else if (!info->is_range) {
+ FreeTemp(r_idx);
+ FreeTemp(r_dst);
+ FreeTemp(r_src);
+ } else {
+ DCHECK_LE(elems, 5); // Usually but not necessarily non-range.
// TUNING: interleave
for (int i = 0; i < elems; i++) {
RegLocation rl_arg;
@@ -507,6 +511,15 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) {
}
}
}
+ if (elems != 0 && info->args[0].ref) {
+ // If there is at least one potentially non-null value, unconditionally mark the GC card.
+ for (int i = 0; i < elems; i++) {
+ if (!mir_graph_->IsConstantNullRef(info->args[i])) {
+ UnconditionallyMarkGCCard(ref_reg);
+ break;
+ }
+ }
+ }
if (info->result.location != kLocInvalid) {
StoreValue(info->result, GetReturn(kRefReg));
}
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index ed73ef0a00..3bb81bf28e 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -222,19 +222,13 @@ void MipsMir2Lir::GenMoveException(RegLocation rl_dest) {
StoreValue(rl_dest, rl_result);
}
-/*
- * Mark garbage collection card. Skip if the value we're storing is null.
- */
-void MipsMir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) {
+void MipsMir2Lir::UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) {
RegStorage reg_card_base = AllocTemp();
RegStorage reg_card_no = AllocTemp();
- LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
// NOTE: native pointer.
LoadWordDisp(rs_rMIPS_SELF, Thread::CardTableOffset<4>().Int32Value(), reg_card_base);
OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
- LIR* target = NewLIR0(kPseudoTargetLabel);
- branch_over->target = target;
FreeTemp(reg_card_base);
FreeTemp(reg_card_no);
}
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 7e9d80df65..e08846c325 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -49,7 +49,9 @@ class MipsMir2Lir FINAL : public Mir2Lir {
OpSize size) OVERRIDE;
LIR* GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest);
LIR* GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src);
- void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
+
+ /// @copydoc Mir2Lir::UnconditionallyMarkGCCard(RegStorage)
+ void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
// Required for target - register utilities.
RegStorage Solo64ToPair64(RegStorage reg);
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 13ebc1e5d6..886b238ee3 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -1071,6 +1071,13 @@ class Mir2Lir : public Backend {
// Update LIR for verbose listings.
void UpdateLIROffsets();
+ /**
+ * @brief Mark a garbage collection card. Skip if the stored value is null.
+ * @param val_reg the register holding the stored value to check against null.
+ * @param tgt_addr_reg the address of the object or array where the value was stored.
+ */
+ void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
+
/*
* @brief Load the address of the dex method into the register.
* @param target_method The MethodReference of the method to be invoked.
@@ -1139,7 +1146,12 @@ class Mir2Lir : public Backend {
OpSize size, VolatileKind is_volatile) = 0;
virtual LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
int scale, OpSize size) = 0;
- virtual void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) = 0;
+
+ /**
+ * @brief Unconditionally mark a garbage collection card.
+ * @param tgt_addr_reg the address of the object or array where the value was stored.
+ */
+ virtual void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) = 0;
// Required for target - register utilities.
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 61dcc28afc..a808459715 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -136,23 +136,16 @@ void X86Mir2Lir::GenMoveException(RegLocation rl_dest) {
StoreValue(rl_dest, rl_result);
}
-/*
- * Mark garbage collection card. Skip if the value we're storing is null.
- */
-void X86Mir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) {
+void X86Mir2Lir::UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) {
DCHECK_EQ(tgt_addr_reg.Is64Bit(), cu_->target64);
- DCHECK_EQ(val_reg.Is64Bit(), cu_->target64);
RegStorage reg_card_base = AllocTempRef();
RegStorage reg_card_no = AllocTempRef();
- LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
int ct_offset = cu_->target64 ?
Thread::CardTableOffset<8>().Int32Value() :
Thread::CardTableOffset<4>().Int32Value();
NewLIR2(cu_->target64 ? kX86Mov64RT : kX86Mov32RT, reg_card_base.GetReg(), ct_offset);
OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
- LIR* target = NewLIR0(kPseudoTargetLabel);
- branch_over->target = target;
FreeTemp(reg_card_base);
FreeTemp(reg_card_no);
}
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index d57dffb01d..26641f8d59 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -94,7 +94,10 @@ class X86Mir2Lir : public Mir2Lir {
OpSize size, VolatileKind is_volatile) OVERRIDE;
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
OpSize size) OVERRIDE;
- void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) OVERRIDE;
+
+ /// @copydoc Mir2Lir::UnconditionallyMarkGCCard(RegStorage)
+ void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
+
void GenImplicitNullCheck(RegStorage reg, int opt_flags) OVERRIDE;
// Required for target - register utilities.
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index ead31b37b6..998aeff368 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -2263,7 +2263,8 @@ void X86Mir2Lir::GenReduceVector(MIR* mir) {
StoreFinalValue(rl_dest, rl_result);
} else {
int displacement = SRegOffset(rl_result.s_reg_low);
- LIR *l = NewLIR3(extr_opcode, rs_rX86_SP_32.GetReg(), displacement, vector_src.GetReg());
+ LIR *l = NewLIR4(extr_opcode, rs_rX86_SP_32.GetReg(), displacement, vector_src.GetReg(),
+ extract_index);
AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is_wide /* is_64bit */);
AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is_wide /* is_64bit */);
}
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index d3d76badd0..ed3388285f 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -126,7 +126,7 @@ bool MIRGraph::FillDefBlockMatrix(BasicBlock* bb) {
for (uint32_t idx : bb->data_flow_info->def_v->Indexes()) {
/* Block bb defines register idx */
- temp_bit_matrix_[idx]->SetBit(bb->id);
+ temp_.ssa.def_block_matrix[idx]->SetBit(bb->id);
}
return true;
}
@@ -135,16 +135,16 @@ void MIRGraph::ComputeDefBlockMatrix() {
int num_registers = GetNumOfCodeAndTempVRs();
/* Allocate num_registers bit vector pointers */
DCHECK(temp_scoped_alloc_ != nullptr);
- DCHECK(temp_bit_matrix_ == nullptr);
- temp_bit_matrix_ = static_cast<ArenaBitVector**>(
+ DCHECK(temp_.ssa.def_block_matrix == nullptr);
+ temp_.ssa.def_block_matrix = static_cast<ArenaBitVector**>(
temp_scoped_alloc_->Alloc(sizeof(ArenaBitVector*) * num_registers, kArenaAllocDFInfo));
int i;
/* Initialize num_register vectors with num_blocks bits each */
for (i = 0; i < num_registers; i++) {
- temp_bit_matrix_[i] = new (temp_scoped_alloc_.get()) ArenaBitVector(arena_, GetNumBlocks(),
- false, kBitMapBMatrix);
- temp_bit_matrix_[i]->ClearAllBits();
+ temp_.ssa.def_block_matrix[i] = new (temp_scoped_alloc_.get()) ArenaBitVector(
+ arena_, GetNumBlocks(), false, kBitMapBMatrix);
+ temp_.ssa.def_block_matrix[i]->ClearAllBits();
}
AllNodesIterator iter(this);
@@ -163,7 +163,7 @@ void MIRGraph::ComputeDefBlockMatrix() {
int num_regs = GetNumOfCodeVRs();
int in_reg = GetFirstInVR();
for (; in_reg < num_regs; in_reg++) {
- temp_bit_matrix_[in_reg]->SetBit(GetEntryBlock()->id);
+ temp_.ssa.def_block_matrix[in_reg]->SetBit(GetEntryBlock()->id);
}
}
@@ -435,32 +435,32 @@ void MIRGraph::ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src
* insert a phi node if the variable is live-in to the block.
*/
bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) {
- DCHECK_EQ(temp_bit_vector_size_, cu_->mir_graph.get()->GetNumOfCodeAndTempVRs());
- ArenaBitVector* temp_dalvik_register_v = temp_bit_vector_;
+ DCHECK_EQ(temp_.ssa.num_vregs, cu_->mir_graph.get()->GetNumOfCodeAndTempVRs());
+ ArenaBitVector* temp_live_vregs = temp_.ssa.work_live_vregs;
if (bb->data_flow_info == NULL) {
return false;
}
- temp_dalvik_register_v->Copy(bb->data_flow_info->live_in_v);
+ temp_live_vregs->Copy(bb->data_flow_info->live_in_v);
BasicBlock* bb_taken = GetBasicBlock(bb->taken);
BasicBlock* bb_fall_through = GetBasicBlock(bb->fall_through);
if (bb_taken && bb_taken->data_flow_info)
- ComputeSuccLineIn(temp_dalvik_register_v, bb_taken->data_flow_info->live_in_v,
+ ComputeSuccLineIn(temp_live_vregs, bb_taken->data_flow_info->live_in_v,
bb->data_flow_info->def_v);
if (bb_fall_through && bb_fall_through->data_flow_info)
- ComputeSuccLineIn(temp_dalvik_register_v, bb_fall_through->data_flow_info->live_in_v,
+ ComputeSuccLineIn(temp_live_vregs, bb_fall_through->data_flow_info->live_in_v,
bb->data_flow_info->def_v);
if (bb->successor_block_list_type != kNotUsed) {
for (SuccessorBlockInfo* successor_block_info : bb->successor_blocks) {
BasicBlock* succ_bb = GetBasicBlock(successor_block_info->block);
if (succ_bb->data_flow_info) {
- ComputeSuccLineIn(temp_dalvik_register_v, succ_bb->data_flow_info->live_in_v,
+ ComputeSuccLineIn(temp_live_vregs, succ_bb->data_flow_info->live_in_v,
bb->data_flow_info->def_v);
}
}
}
- if (!temp_dalvik_register_v->Equal(bb->data_flow_info->live_in_v)) {
- bb->data_flow_info->live_in_v->Copy(temp_dalvik_register_v);
+ if (!temp_live_vregs->Equal(bb->data_flow_info->live_in_v)) {
+ bb->data_flow_info->live_in_v->Copy(temp_live_vregs);
return true;
}
return false;
@@ -482,7 +482,7 @@ void MIRGraph::InsertPhiNodes() {
/* Iterate through each Dalvik register */
for (dalvik_reg = GetNumOfCodeAndTempVRs() - 1; dalvik_reg >= 0; dalvik_reg--) {
- input_blocks->Copy(temp_bit_matrix_[dalvik_reg]);
+ input_blocks->Copy(temp_.ssa.def_block_matrix[dalvik_reg]);
phi_blocks->ClearAllBits();
do {
// TUNING: When we repeat this, we could skip indexes from the previous pass.
diff --git a/compiler/utils/arm/assembler_arm32_test.cc b/compiler/utils/arm/assembler_arm32_test.cc
index fe12a226c8..837fe1ec18 100644
--- a/compiler/utils/arm/assembler_arm32_test.cc
+++ b/compiler/utils/arm/assembler_arm32_test.cc
@@ -49,7 +49,7 @@ class AssemblerArm32Test : public AssemblerArmTest<arm::Arm32Assembler,
}
std::string GetAssemblerParameters() OVERRIDE {
- return " -march=armv7ve"; // Arm-v7a with virtualization extension (means we have sdiv).
+ return " -march=armv7-a -mcpu=cortex-a15"; // Arm-v7a, cortex-a15 (means we have sdiv).
}
const char* GetAssemblyHeader() OVERRIDE {
@@ -421,6 +421,7 @@ class AssemblerArm32Test : public AssemblerArmTest<arm::Arm32Assembler,
template <typename... Args>
void GenericTemplateHelper(std::function<void(Args...)> f, bool without_pc,
std::string fmt, std::string test_name) {
+ first_ = false;
WarnOnCombinations(CountHelper<Args...>(without_pc));
std::ostringstream oss;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index feee598c94..08352de7e6 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -386,7 +386,8 @@ class OatDumper {
: oat_file_(oat_file),
oat_dex_files_(oat_file.GetOatDexFiles()),
options_(options),
- disassembler_(Disassembler::Create(oat_file_.GetOatHeader().GetInstructionSet(),
+ instruction_set_(oat_file_.GetOatHeader().GetInstructionSet()),
+ disassembler_(Disassembler::Create(instruction_set_,
new DisassemblerOptions(options_->absolute_addresses_,
oat_file.Begin(),
true /* can_read_litals_ */))) {
@@ -399,6 +400,10 @@ class OatDumper {
delete disassembler_;
}
+ InstructionSet GetInstructionSet() {
+ return instruction_set_;
+ }
+
bool Dump(std::ostream& os) {
bool success = true;
const OatHeader& oat_header = oat_file_.GetOatHeader();
@@ -515,7 +520,7 @@ class OatDumper {
return end_offset - begin_offset;
}
- InstructionSet GetInstructionSet() {
+ InstructionSet GetOatInstructionSet() {
return oat_file_.GetOatHeader().GetInstructionSet();
}
@@ -1260,6 +1265,7 @@ class OatDumper {
const OatFile& oat_file_;
const std::vector<const OatFile::OatDexFile*> oat_dex_files_;
const OatDumperOptions* options_;
+ InstructionSet instruction_set_;
std::set<uintptr_t> offsets_;
Disassembler* disassembler_;
};
@@ -1521,7 +1527,8 @@ class ImageDumper {
const void* GetQuickOatCodeBegin(mirror::ArtMethod* m)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- const void* quick_code = m->GetEntryPointFromQuickCompiledCode();
+ const void* quick_code = m->GetEntryPointFromQuickCompiledCodePtrSize(
+ InstructionSetPointerSize(oat_dumper_->GetOatInstructionSet()));
if (Runtime::Current()->GetClassLinker()->IsQuickResolutionStub(quick_code)) {
quick_code = oat_dumper_->GetQuickOatCode(m);
}
@@ -1622,11 +1629,14 @@ class ImageDumper {
}
}
} else if (obj->IsArtMethod()) {
+ const size_t image_pointer_size = InstructionSetPointerSize(
+ state->oat_dumper_->GetOatInstructionSet());
mirror::ArtMethod* method = obj->AsArtMethod();
if (method->IsNative()) {
// TODO: portable dumping.
- DCHECK(method->GetNativeGcMap() == nullptr) << PrettyMethod(method);
- DCHECK(method->GetMappingTable() == nullptr) << PrettyMethod(method);
+ DCHECK(method->GetNativeGcMapPtrSize(image_pointer_size) == nullptr)
+ << PrettyMethod(method);
+ DCHECK(method->GetMappingTable(image_pointer_size) == nullptr) << PrettyMethod(method);
bool first_occurrence;
const void* quick_oat_code = state->GetQuickOatCodeBegin(method);
uint32_t quick_oat_code_size = state->GetQuickOatCodeSize(method);
@@ -1634,33 +1644,36 @@ class ImageDumper {
if (first_occurrence) {
state->stats_.native_to_managed_code_bytes += quick_oat_code_size;
}
- if (quick_oat_code != method->GetEntryPointFromQuickCompiledCode()) {
+ if (quick_oat_code != method->GetEntryPointFromQuickCompiledCodePtrSize(
+ image_pointer_size)) {
indent_os << StringPrintf("OAT CODE: %p\n", quick_oat_code);
}
} else if (method->IsAbstract() || method->IsCalleeSaveMethod() ||
method->IsResolutionMethod() || method->IsImtConflictMethod() ||
method->IsImtUnimplementedMethod() || method->IsClassInitializer()) {
- DCHECK(method->GetNativeGcMap() == nullptr) << PrettyMethod(method);
- DCHECK(method->GetMappingTable() == nullptr) << PrettyMethod(method);
+ DCHECK(method->GetNativeGcMapPtrSize(image_pointer_size) == nullptr)
+ << PrettyMethod(method);
+ DCHECK(method->GetMappingTable(image_pointer_size) == nullptr) << PrettyMethod(method);
} else {
const DexFile::CodeItem* code_item = method->GetCodeItem();
size_t dex_instruction_bytes = code_item->insns_size_in_code_units_ * 2;
state->stats_.dex_instruction_bytes += dex_instruction_bytes;
bool first_occurrence;
- size_t gc_map_bytes = state->ComputeOatSize(method->GetNativeGcMap(), &first_occurrence);
+ size_t gc_map_bytes = state->ComputeOatSize(
+ method->GetNativeGcMapPtrSize(image_pointer_size), &first_occurrence);
if (first_occurrence) {
state->stats_.gc_map_bytes += gc_map_bytes;
}
size_t pc_mapping_table_bytes =
- state->ComputeOatSize(method->GetMappingTable(), &first_occurrence);
+ state->ComputeOatSize(method->GetMappingTable(image_pointer_size), &first_occurrence);
if (first_occurrence) {
state->stats_.pc_mapping_table_bytes += pc_mapping_table_bytes;
}
size_t vmap_table_bytes =
- state->ComputeOatSize(method->GetVmapTable(), &first_occurrence);
+ state->ComputeOatSize(method->GetVmapTable(image_pointer_size), &first_occurrence);
if (first_occurrence) {
state->stats_.vmap_table_bytes += vmap_table_bytes;
}
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 58f7940eda..087c0ea990 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -390,7 +390,9 @@ define build-libart
LOCAL_CPP_EXTENSION := $$(ART_CPP_EXTENSION)
ifeq ($$(art_ndebug_or_debug),ndebug)
LOCAL_MODULE := libart
- LOCAL_FDO_SUPPORT := true
+ ifeq ($$(art_target_or_host),target)
+ LOCAL_FDO_SUPPORT := true
+ endif
else # debug
LOCAL_MODULE := libartd
endif
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index 9d2d59c9a4..821d613040 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -53,7 +53,7 @@ class CheckReferenceMapVisitor : public StackVisitor {
void CheckReferences(int* registers, int number_of_references, uint32_t native_pc_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (GetMethod()->IsOptimized()) {
+ if (GetMethod()->IsOptimized(sizeof(void*))) {
CheckOptimizedMethod(registers, number_of_references, native_pc_offset);
} else {
CheckQuickMethod(registers, number_of_references, native_pc_offset);
diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
index d5493bdcaf..54dbd8c770 100644
--- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
@@ -35,7 +35,7 @@ extern "C" const void* artInstrumentationMethodEntryFromCode(mirror::ArtMethod*
if (instrumentation->IsDeoptimized(method)) {
result = GetQuickToInterpreterBridge();
} else {
- result = instrumentation->GetQuickCodeFor(method);
+ result = instrumentation->GetQuickCodeFor(method, sizeof(void*));
DCHECK(!Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(result));
}
bool interpreter_entry = (result == GetQuickToInterpreterBridge());
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index ab3ec62d69..835485c351 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -360,7 +360,8 @@ bool FaultManager::IsInGeneratedCode(siginfo_t* siginfo, void* context, bool che
// at the return PC address.
if (true || kIsDebugBuild) {
VLOG(signals) << "looking for dex pc for return pc " << std::hex << return_pc;
- const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(method_obj);
+ const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(method_obj,
+ sizeof(void*));
uint32_t sought_offset = return_pc - reinterpret_cast<uintptr_t>(code);
VLOG(signals) << "pc offset: " << std::hex << sought_offset;
}
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 003e1601ce..639b0f0766 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -869,10 +869,10 @@ void Instrumentation::DisableMethodTracing() {
ConfigureStubs(false, false);
}
-const void* Instrumentation::GetQuickCodeFor(mirror::ArtMethod* method) const {
+const void* Instrumentation::GetQuickCodeFor(mirror::ArtMethod* method, size_t pointer_size) const {
Runtime* runtime = Runtime::Current();
if (LIKELY(!instrumentation_stubs_installed_)) {
- const void* code = method->GetEntryPointFromQuickCompiledCode();
+ const void* code = method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size);
DCHECK(code != nullptr);
ClassLinker* class_linker = runtime->GetClassLinker();
if (LIKELY(!class_linker->IsQuickResolutionStub(code) &&
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 369039d39f..effa9f7b2e 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -200,7 +200,7 @@ class Instrumentation {
// Get the quick code for the given method. More efficient than asking the class linker as it
// will short-cut to GetCode if instrumentation and static method resolution stubs aren't
// installed.
- const void* GetQuickCodeFor(mirror::ArtMethod* method) const
+ const void* GetQuickCodeFor(mirror::ArtMethod* method, size_t pointer_size) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ForceInterpretOnly() {
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index 62d17abaa2..b93651156e 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -199,17 +199,17 @@ inline void ArtMethod::SetPortableOatCodeOffset(uint32_t code_offset) {
SetEntryPointFromPortableCompiledCode(reinterpret_cast<void*>(code_offset));
}
-inline const uint8_t* ArtMethod::GetMappingTable() {
- const void* code_pointer = GetQuickOatCodePointer();
+inline const uint8_t* ArtMethod::GetMappingTable(size_t pointer_size) {
+ const void* code_pointer = GetQuickOatCodePointer(pointer_size);
if (code_pointer == nullptr) {
return nullptr;
}
- return GetMappingTable(code_pointer);
+ return GetMappingTable(code_pointer, pointer_size);
}
-inline const uint8_t* ArtMethod::GetMappingTable(const void* code_pointer) {
+inline const uint8_t* ArtMethod::GetMappingTable(const void* code_pointer, size_t pointer_size) {
DCHECK(code_pointer != nullptr);
- DCHECK(code_pointer == GetQuickOatCodePointer());
+ DCHECK_EQ(code_pointer, GetQuickOatCodePointer(pointer_size));
uint32_t offset =
reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].mapping_table_offset_;
if (UNLIKELY(offset == 0u)) {
@@ -218,18 +218,18 @@ inline const uint8_t* ArtMethod::GetMappingTable(const void* code_pointer) {
return reinterpret_cast<const uint8_t*>(code_pointer) - offset;
}
-inline const uint8_t* ArtMethod::GetVmapTable() {
- const void* code_pointer = GetQuickOatCodePointer();
+inline const uint8_t* ArtMethod::GetVmapTable(size_t pointer_size) {
+ const void* code_pointer = GetQuickOatCodePointer(pointer_size);
if (code_pointer == nullptr) {
return nullptr;
}
- return GetVmapTable(code_pointer);
+ return GetVmapTable(code_pointer, pointer_size);
}
-inline const uint8_t* ArtMethod::GetVmapTable(const void* code_pointer) {
- CHECK(!IsOptimized()) << "Unimplemented vmap table for optimized compiler";
+inline const uint8_t* ArtMethod::GetVmapTable(const void* code_pointer, size_t pointer_size) {
+ CHECK(!IsOptimized(pointer_size)) << "Unimplemented vmap table for optimized compiler";
DCHECK(code_pointer != nullptr);
- DCHECK(code_pointer == GetQuickOatCodePointer());
+ DCHECK_EQ(code_pointer, GetQuickOatCodePointer(pointer_size));
uint32_t offset =
reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].vmap_table_offset_;
if (UNLIKELY(offset == 0u)) {
@@ -243,8 +243,8 @@ inline StackMap ArtMethod::GetStackMap(uint32_t native_pc_offset) {
}
inline CodeInfo ArtMethod::GetOptimizedCodeInfo() {
- DCHECK(IsOptimized());
- const void* code_pointer = GetQuickOatCodePointer();
+ DCHECK(IsOptimized(sizeof(void*)));
+ const void* code_pointer = GetQuickOatCodePointer(sizeof(void*));
DCHECK(code_pointer != nullptr);
uint32_t offset =
reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].vmap_table_offset_;
@@ -303,13 +303,14 @@ inline bool ArtMethod::IsImtUnimplementedMethod() {
}
inline uintptr_t ArtMethod::NativeQuickPcOffset(const uintptr_t pc) {
- const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this);
+ const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(
+ this, sizeof(void*));
return pc - reinterpret_cast<uintptr_t>(code);
}
inline QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo(const void* code_pointer) {
DCHECK(code_pointer != nullptr);
- DCHECK_EQ(code_pointer, GetQuickOatCodePointer());
+ DCHECK_EQ(code_pointer, GetQuickOatCodePointer(sizeof(void*)));
return reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].frame_info_;
}
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 3b4d5f317d..6d4af83058 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -165,9 +165,9 @@ uint32_t ArtMethod::ToDexPc(const uintptr_t pc, bool abort_on_failure) {
// Portable doesn't use the machine pc, we just use dex pc instead.
return static_cast<uint32_t>(pc);
}
- const void* entry_point = GetQuickOatEntryPoint();
- MappingTable table(
- entry_point != nullptr ? GetMappingTable(EntryPointToCodePointer(entry_point)) : nullptr);
+ const void* entry_point = GetQuickOatEntryPoint(sizeof(void*));
+ MappingTable table(entry_point != nullptr ?
+ GetMappingTable(EntryPointToCodePointer(entry_point), sizeof(void*)) : nullptr);
if (table.TotalSize() == 0) {
// NOTE: Special methods (see Mir2Lir::GenSpecialCase()) have an empty mapping
// but they have no suspend checks and, consequently, we never call ToDexPc() for them.
@@ -198,9 +198,9 @@ uint32_t ArtMethod::ToDexPc(const uintptr_t pc, bool abort_on_failure) {
}
uintptr_t ArtMethod::ToNativeQuickPc(const uint32_t dex_pc, bool abort_on_failure) {
- const void* entry_point = GetQuickOatEntryPoint();
- MappingTable table(
- entry_point != nullptr ? GetMappingTable(EntryPointToCodePointer(entry_point)) : nullptr);
+ const void* entry_point = GetQuickOatEntryPoint(sizeof(void*));
+ MappingTable table(entry_point != nullptr ?
+ GetMappingTable(EntryPointToCodePointer(entry_point), sizeof(void*)) : nullptr);
if (table.TotalSize() == 0) {
DCHECK_EQ(dex_pc, 0U);
return 0; // Special no mapping/pc == 0 case
@@ -320,13 +320,13 @@ bool ArtMethod::IsEntrypointInterpreter() {
}
}
-const void* ArtMethod::GetQuickOatEntryPoint() {
+const void* ArtMethod::GetQuickOatEntryPoint(size_t pointer_size) {
if (IsPortableCompiled() || IsAbstract() || IsRuntimeMethod() || IsProxyMethod()) {
return nullptr;
}
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
- const void* code = runtime->GetInstrumentation()->GetQuickCodeFor(this);
+ const void* code = runtime->GetInstrumentation()->GetQuickCodeFor(this, pointer_size);
// On failure, instead of nullptr we get the quick-generic-jni-trampoline for native method
// indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
// for non-native methods.
@@ -340,7 +340,7 @@ const void* ArtMethod::GetQuickOatEntryPoint() {
#ifndef NDEBUG
uintptr_t ArtMethod::NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point) {
CHECK_NE(quick_entry_point, GetQuickToInterpreterBridge());
- CHECK_EQ(quick_entry_point, Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this));
+ CHECK_EQ(quick_entry_point, Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this, sizeof(void*)));
return pc - reinterpret_cast<uintptr_t>(quick_entry_point);
}
#endif
@@ -447,7 +447,7 @@ QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() {
return runtime->GetRuntimeMethodFrameInfo(this);
}
- const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(this);
+ const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(this, sizeof(void*));
ClassLinker* class_linker = runtime->GetClassLinker();
// On failure, instead of nullptr we get the quick-generic-jni-trampoline for native method
// indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 4a7831fd62..d292552af2 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -146,13 +146,13 @@ class MANAGED ArtMethod FINAL : public Object {
SetAccessFlags(GetAccessFlags() | kAccPreverified);
}
- bool IsOptimized() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool IsOptimized(size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Temporary solution for detecting if a method has been optimized: the compiler
// does not create a GC map. Instead, the vmap table contains the stack map
// (as in stack_map.h).
- return (GetEntryPointFromQuickCompiledCode() != nullptr)
- && (GetQuickOatCodePointer() != nullptr)
- && (GetNativeGcMap() == nullptr);
+ return GetEntryPointFromQuickCompiledCodePtrSize(pointer_size) != nullptr
+ && GetQuickOatCodePointer(pointer_size) != nullptr
+ && GetNativeGcMapPtrSize(pointer_size) == nullptr;
}
bool IsPortableCompiled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -381,22 +381,25 @@ class MANAGED ArtMethod FINAL : public Object {
return reinterpret_cast<const void*>(code);
}
- // Actual entry point pointer to compiled oat code or nullptr if method has no compiled code.
- const void* GetQuickOatEntryPoint() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
+ // Actual entry point pointer to compiled oat code or nullptr.
+ const void* GetQuickOatEntryPoint(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Actual pointer to compiled oat code or nullptr.
- const void* GetQuickOatCodePointer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return EntryPointToCodePointer(GetQuickOatEntryPoint());
+ const void* GetQuickOatCodePointer(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size));
}
// Callers should wrap the uint8_t* in a MappingTable instance for convenient access.
- const uint8_t* GetMappingTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const uint8_t* GetMappingTable(const void* code_pointer)
+ const uint8_t* GetMappingTable(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const uint8_t* GetMappingTable(const void* code_pointer, size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Callers should wrap the uint8_t* in a VmapTable instance for convenient access.
- const uint8_t* GetVmapTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const uint8_t* GetVmapTable(const void* code_pointer)
+ const uint8_t* GetVmapTable(size_t pointer_size)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const uint8_t* GetVmapTable(const void* code_pointer, size_t pointer_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
StackMap GetStackMap(uint32_t native_pc_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 44086096f0..43714b95e8 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -125,7 +125,7 @@ mirror::Object* StackVisitor::GetThisObject() const {
} else {
return cur_shadow_frame_->GetVRegReference(0);
}
- } else if (m->IsOptimized()) {
+ } else if (m->IsOptimized(sizeof(void*))) {
// TODO: Implement, currently only used for exceptions when jdwp is enabled.
UNIMPLEMENTED(WARNING)
<< "StackVisitor::GetThisObject is unimplemented with the optimizing compiler";
@@ -153,9 +153,9 @@ bool StackVisitor::GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably read registers without a context.
DCHECK(m == GetMethod());
- const void* code_pointer = m->GetQuickOatCodePointer();
+ const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
DCHECK(code_pointer != nullptr);
- const VmapTable vmap_table(m->GetVmapTable(code_pointer));
+ const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
uint32_t vmap_offset;
// TODO: IsInContext stops before spotting floating point registers.
@@ -207,9 +207,9 @@ bool StackVisitor::GetVRegPair(mirror::ArtMethod* m, uint16_t vreg, VRegKind kin
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably read registers without a context.
DCHECK(m == GetMethod());
- const void* code_pointer = m->GetQuickOatCodePointer();
+ const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
DCHECK(code_pointer != nullptr);
- const VmapTable vmap_table(m->GetVmapTable(code_pointer));
+ const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
uint32_t vmap_offset_lo, vmap_offset_hi;
// TODO: IsInContext stops before spotting floating point registers.
@@ -254,9 +254,9 @@ bool StackVisitor::SetVReg(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_val
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
- const void* code_pointer = m->GetQuickOatCodePointer();
+ const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
DCHECK(code_pointer != nullptr);
- const VmapTable vmap_table(m->GetVmapTable(code_pointer));
+ const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
uint32_t vmap_offset;
// TODO: IsInContext stops before spotting floating point registers.
@@ -318,9 +318,9 @@ bool StackVisitor::SetVRegPair(mirror::ArtMethod* m, uint16_t vreg, uint64_t new
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
- const void* code_pointer = m->GetQuickOatCodePointer();
+ const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
DCHECK(code_pointer != nullptr);
- const VmapTable vmap_table(m->GetVmapTable(code_pointer));
+ const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
uint32_t vmap_offset_lo, vmap_offset_hi;
// TODO: IsInContext stops before spotting floating point registers.
diff --git a/runtime/thread.cc b/runtime/thread.cc
index c769faf3e5..cd47b5ea6e 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2136,9 +2136,9 @@ class ReferenceMapVisitor : public StackVisitor {
// Process register map (which native and runtime methods don't have)
if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
- if (m->IsOptimized()) {
+ if (m->IsOptimized(sizeof(void*))) {
Runtime* runtime = Runtime::Current();
- const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m);
+ const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
StackMap map = m->GetStackMap(native_pc_offset);
MemoryRegion mask = map.GetStackMask();
@@ -2167,12 +2167,12 @@ class ReferenceMapVisitor : public StackVisitor {
static_cast<size_t>(code_item->registers_size_));
if (num_regs > 0) {
Runtime* runtime = Runtime::Current();
- const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m);
+ const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
const uint8_t* reg_bitmap = map.FindBitMap(native_pc_offset);
DCHECK(reg_bitmap != nullptr);
const void* code_pointer = mirror::ArtMethod::EntryPointToCodePointer(entry_point);
- const VmapTable vmap_table(m->GetVmapTable(code_pointer));
+ const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
// For all dex registers in the bitmap
DCHECK(cur_quick_frame != nullptr);