summaryrefslogtreecommitdiff
path: root/compiler/dex/quick/codegen_util.cc
diff options
context:
space:
mode:
author Vladimir Marko <vmarko@google.com> 2015-03-24 11:26:17 +0000
committer Gerrit Code Review <noreply-gerritcodereview@google.com> 2015-03-24 11:26:17 +0000
commit274395fe6649d83d723c4b912a46291b2987efd6 (patch)
treeb363f83e2190e05f8c37ddb37c7d5f24ea9b3c8a /compiler/dex/quick/codegen_util.cc
parentbce0855ca1dbb1fa226c5b6a81760272ce0b64ef (diff)
parent767c752fddc64e280dba507457e4f06002b5f678 (diff)
Merge "Quick: Create GC map based on compiler data."
Diffstat (limited to 'compiler/dex/quick/codegen_util.cc')
-rw-r--r--compiler/dex/quick/codegen_util.cc155
1 files changed, 155 insertions, 0 deletions
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 029c0ca8c0..3285195b40 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -16,6 +16,7 @@
#include "mir_to_lir-inl.h"
+#include "base/bit_vector-inl.h"
#include "dex/mir_graph.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
@@ -88,6 +89,8 @@ void Mir2Lir::MarkSafepointPC(LIR* inst) {
inst->u.m.def_mask = &kEncodeAll;
LIR* safepoint_pc = NewLIR0(kPseudoSafepointPC);
DCHECK(safepoint_pc->u.m.def_mask->Equals(kEncodeAll));
+ DCHECK(current_mir_ != nullptr || (current_dalvik_offset_ == 0 && safepoints_.empty()));
+ safepoints_.emplace_back(safepoint_pc, current_mir_);
}
void Mir2Lir::MarkSafepointPCAfter(LIR* after) {
@@ -102,6 +105,8 @@ void Mir2Lir::MarkSafepointPCAfter(LIR* after) {
InsertLIRAfter(after, safepoint_pc);
}
DCHECK(safepoint_pc->u.m.def_mask->Equals(kEncodeAll));
+ DCHECK(current_mir_ != nullptr || (current_dalvik_offset_ == 0 && safepoints_.empty()));
+ safepoints_.emplace_back(safepoint_pc, current_mir_);
}
/* Remove a LIR from the list. */
@@ -767,6 +772,61 @@ void Mir2Lir::CreateMappingTables() {
}
void Mir2Lir::CreateNativeGcMap() {
+ if (UNLIKELY((cu_->disable_opt & (1u << kPromoteRegs)) != 0u)) {
+ // If we're not promoting to physical registers, it's safe to use the verifier's notion of
+ // references. (We disable register promotion when type inference finds a type conflict and
+ // in that the case we defer to the verifier to avoid using the compiler's conflicting info.)
+ CreateNativeGcMapWithoutRegisterPromotion();
+ return;
+ }
+
+ ArenaBitVector* references = new (arena_) ArenaBitVector(arena_, mir_graph_->GetNumSSARegs(),
+ false);
+
+ // Calculate max native offset and max reference vreg.
+ MIR* prev_mir = nullptr;
+ int max_ref_vreg = -1;
+ CodeOffset max_native_offset = 0u;
+ for (const auto& entry : safepoints_) {
+ uint32_t native_offset = entry.first->offset;
+ max_native_offset = std::max(max_native_offset, native_offset);
+ MIR* mir = entry.second;
+ UpdateReferenceVRegs(mir, prev_mir, references);
+ max_ref_vreg = std::max(max_ref_vreg, references->GetHighestBitSet());
+ prev_mir = mir;
+ }
+
+ // Build the GC map.
+ uint32_t reg_width = static_cast<uint32_t>((max_ref_vreg + 8) / 8);
+ GcMapBuilder native_gc_map_builder(&native_gc_map_,
+ safepoints_.size(),
+ max_native_offset, reg_width);
+#if !defined(BYTE_ORDER) || (BYTE_ORDER != LITTLE_ENDIAN)
+ ArenaVector<uint8_t> references_buffer(arena_->Adapter());
+ references_buffer.resize(reg_width);
+#endif
+ for (const auto& entry : safepoints_) {
+ uint32_t native_offset = entry.first->offset;
+ MIR* mir = entry.second;
+ UpdateReferenceVRegs(mir, prev_mir, references);
+#if !defined(BYTE_ORDER) || (BYTE_ORDER != LITTLE_ENDIAN)
+ // Big-endian or unknown endianness, manually translate the bit vector data.
+ const auto* raw_storage = references->GetRawStorage();
+ for (size_t i = 0; i != reg_width; ++i) {
+ references_buffer[i] = static_cast<uint8_t>(
+ raw_storage[i / sizeof(raw_storage[0])] >> (8u * (i % sizeof(raw_storage[0]))));
+ }
+ native_gc_map_builder.AddEntry(native_offset, &references_buffer[0]);
+#else
+ // For little-endian, the bytes comprising the bit vector's raw storage are what we need.
+ native_gc_map_builder.AddEntry(native_offset,
+ reinterpret_cast<const uint8_t*>(references->GetRawStorage()));
+#endif
+ prev_mir = mir;
+ }
+}
+
+void Mir2Lir::CreateNativeGcMapWithoutRegisterPromotion() {
DCHECK(!encoded_mapping_table_.empty());
MappingTable mapping_table(&encoded_mapping_table_[0]);
uint32_t max_native_offset = 0;
@@ -965,6 +1025,7 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena
block_label_list_(nullptr),
promotion_map_(nullptr),
current_dalvik_offset_(0),
+ current_mir_(nullptr),
estimated_native_code_size_(0),
reg_pool_(nullptr),
live_sreg_(0),
@@ -984,6 +1045,7 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena
slow_paths_(arena->Adapter(kArenaAllocSlowPaths)),
mem_ref_type_(ResourceMask::kHeapRef),
mask_cache_(arena),
+ safepoints_(arena->Adapter()),
in_to_reg_storage_mapping_(arena) {
switch_tables_.reserve(4);
fill_array_data_.reserve(4);
@@ -1274,4 +1336,97 @@ void Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
UNREACHABLE();
}
+void Mir2Lir::InitReferenceVRegs(BasicBlock* bb, BitVector* references) {
+ // Mark the references coming from the first predecessor.
+ DCHECK(bb != nullptr);
+ DCHECK(bb->block_type == kEntryBlock || !bb->predecessors.empty());
+ BasicBlock* first_bb =
+ (bb->block_type == kEntryBlock) ? bb : mir_graph_->GetBasicBlock(bb->predecessors[0]);
+ DCHECK(first_bb != nullptr);
+ DCHECK(first_bb->data_flow_info != nullptr);
+ DCHECK(first_bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
+ const int32_t* first_vreg_to_ssa_map = first_bb->data_flow_info->vreg_to_ssa_map_exit;
+ references->ClearAllBits();
+ for (uint32_t vreg = 0, num_vregs = mir_graph_->GetNumOfCodeVRs(); vreg != num_vregs; ++vreg) {
+ int32_t sreg = first_vreg_to_ssa_map[vreg];
+ if (sreg != INVALID_SREG && mir_graph_->reg_location_[sreg].ref &&
+ !mir_graph_->IsConstantNullRef(mir_graph_->reg_location_[sreg])) {
+ references->SetBit(vreg);
+ }
+ }
+ // Unmark the references that are merging with a different value.
+ for (size_t i = 1u, num_pred = bb->predecessors.size(); i < num_pred; ++i) {
+ BasicBlock* pred_bb = mir_graph_->GetBasicBlock(bb->predecessors[i]);
+ DCHECK(pred_bb != nullptr);
+ DCHECK(pred_bb->data_flow_info != nullptr);
+ DCHECK(pred_bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
+ const int32_t* pred_vreg_to_ssa_map = pred_bb->data_flow_info->vreg_to_ssa_map_exit;
+ for (uint32_t vreg : references->Indexes()) {
+ if (first_vreg_to_ssa_map[vreg] != pred_vreg_to_ssa_map[vreg]) {
+ // NOTE: The BitVectorSet::IndexIterator will not check the pointed-to bit again,
+ // so clearing the bit has no effect on the iterator.
+ references->ClearBit(vreg);
+ }
+ }
+ }
+ if (bb->block_type != kEntryBlock && bb->first_mir_insn != nullptr &&
+ static_cast<int>(bb->first_mir_insn->dalvikInsn.opcode) == kMirOpCheckPart2) {
+ // In Mir2Lir::MethodBlockCodeGen() we have artificially moved the throwing
+ // instruction to the previous block. However, the MIRGraph data used above
+ // doesn't reflect that, so we still need to process that MIR insn here.
+ DCHECK_EQ(bb->predecessors.size(), 1u);
+ BasicBlock* pred_bb = mir_graph_->GetBasicBlock(bb->predecessors[0]);
+ DCHECK(pred_bb != nullptr);
+ DCHECK(pred_bb->last_mir_insn != nullptr);
+ UpdateReferenceVRegsLocal(nullptr, pred_bb->last_mir_insn, references);
+ }
+}
+
+bool Mir2Lir::UpdateReferenceVRegsLocal(MIR* mir, MIR* prev_mir, BitVector* references) {
+ DCHECK(mir == nullptr || mir->bb == prev_mir->bb);
+ DCHECK(prev_mir != nullptr);
+ while (prev_mir != nullptr) {
+ if (prev_mir == mir) {
+ return true;
+ }
+ const size_t num_defs = prev_mir->ssa_rep->num_defs;
+ const int32_t* defs = prev_mir->ssa_rep->defs;
+ if (num_defs == 1u && mir_graph_->reg_location_[defs[0]].ref &&
+ !mir_graph_->IsConstantNullRef(mir_graph_->reg_location_[defs[0]])) {
+ references->SetBit(mir_graph_->SRegToVReg(defs[0]));
+ } else {
+ for (size_t i = 0u; i != num_defs; ++i) {
+ references->ClearBit(mir_graph_->SRegToVReg(defs[i]));
+ }
+ }
+ prev_mir = prev_mir->next;
+ }
+ return false;
+}
+
+void Mir2Lir::UpdateReferenceVRegs(MIR* mir, MIR* prev_mir, BitVector* references) {
+ if (mir == nullptr) {
+ // Safepoint in entry sequence.
+ InitReferenceVRegs(mir_graph_->GetEntryBlock(), references);
+ return;
+ }
+ if (IsInstructionReturn(mir->dalvikInsn.opcode) ||
+ mir->dalvikInsn.opcode == Instruction::RETURN_VOID_NO_BARRIER) {
+ references->ClearAllBits();
+ if (mir->dalvikInsn.opcode == Instruction::RETURN_OBJECT) {
+ references->SetBit(mir_graph_->SRegToVReg(mir->ssa_rep->uses[0]));
+ }
+ return;
+ }
+ if (prev_mir != nullptr && mir->bb == prev_mir->bb &&
+ UpdateReferenceVRegsLocal(mir, prev_mir, references)) {
+ return;
+ }
+ BasicBlock* bb = mir_graph_->GetBasicBlock(mir->bb);
+ DCHECK(bb != nullptr);
+ InitReferenceVRegs(bb, references);
+ bool success = UpdateReferenceVRegsLocal(mir, bb->first_mir_insn, references);
+ DCHECK(success) << "MIR @0x" << std::hex << mir->offset << " not in BB#" << std::dec << mir->bb;
+}
+
} // namespace art