summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
author Vladimir Marko <vmarko@google.com> 2015-03-20 12:47:30 +0000
committer Vladimir Marko <vmarko@google.com> 2015-06-03 20:10:12 +0100
commit7cc8f9aa1349fd6cb0814a653ee2d1164a7fb9f7 (patch)
tree4465d4783765591adf63a5cb62091afbebefdcba /compiler
parent3c683c5d107cb43e8027d740ed579ef92a88af2e (diff)
Quick: Create GC map based on compiler data. DO NOT MERGE
The Quick compiler and verifier sometimes disagree on dalvik register types (fp/core/ref) for 0/null constants and merged registers involving 0/null constants. Since the verifier is more lenient it can mark a register as a reference for GC where Quick considers it a floating point register or a dead register (which would have a ref/fp conflict if not dead). If the compiler used an fp register to hold the zero value, the core register or stack location used by GC based on the verifier data can hold an invalid value. Previously, as a workaround we stored the fp zero value also in the stack location or core register where GC would look for it. This wasn't precise and may have missed some cases. To fix this properly, we now generate GC maps based on the compiler's notion of references if register promotion is enabled. Bug: https://code.google.com/p/android/issues/detail?id=147187 (cherry picked from commit 767c752fddc64e280dba507457e4f06002b5f678) Change-Id: Id75428fd0a2f6bdd2ccb20ce75cdeab01150e455
Diffstat (limited to 'compiler')
-rw-r--r--compiler/dex/quick/codegen_util.cc160
-rw-r--r--compiler/dex/quick/gen_common.cc3
-rw-r--r--compiler/dex/quick/gen_loadstore.cc40
-rw-r--r--compiler/dex/quick/mir_to_lir.cc3
-rw-r--r--compiler/dex/quick/mir_to_lir.h21
5 files changed, 181 insertions, 46 deletions
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index cb5fbb3d0c..94710d1bd2 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "base/bit_vector.h"
#include "dex/compiler_internals.h"
#include "dex_file-inl.h"
#include "gc_map.h"
@@ -84,6 +85,8 @@ void Mir2Lir::MarkSafepointPC(LIR* inst) {
inst->u.m.def_mask = &kEncodeAll;
LIR* safepoint_pc = NewLIR0(kPseudoSafepointPC);
DCHECK(safepoint_pc->u.m.def_mask->Equals(kEncodeAll));
+ DCHECK(current_mir_ != nullptr || (current_dalvik_offset_ == 0 && safepoints_.empty()));
+ safepoints_.emplace_back(safepoint_pc, current_mir_);
}
void Mir2Lir::MarkSafepointPCAfter(LIR* after) {
@@ -98,6 +101,8 @@ void Mir2Lir::MarkSafepointPCAfter(LIR* after) {
InsertLIRAfter(after, safepoint_pc);
}
DCHECK(safepoint_pc->u.m.def_mask->Equals(kEncodeAll));
+ DCHECK(current_mir_ != nullptr || (current_dalvik_offset_ == 0 && safepoints_.empty()));
+ safepoints_.emplace_back(safepoint_pc, current_mir_);
}
/* Remove a LIR from the list. */
@@ -746,6 +751,61 @@ void Mir2Lir::CreateMappingTables() {
}
void Mir2Lir::CreateNativeGcMap() {
+ if (UNLIKELY((cu_->disable_opt & (1u << kPromoteRegs)) != 0u)) {
+ // If we're not promoting to physical registers, it's safe to use the verifier's notion of
+ // references. (We disable register promotion when type inference finds a type conflict and
+ // in that the case we defer to the verifier to avoid using the compiler's conflicting info.)
+ CreateNativeGcMapWithoutRegisterPromotion();
+ return;
+ }
+
+ ArenaBitVector* references = new (arena_) ArenaBitVector(arena_, mir_graph_->GetNumSSARegs(),
+ false);
+
+ // Calculate max native offset and max reference vreg.
+ MIR* prev_mir = nullptr;
+ int max_ref_vreg = -1;
+ CodeOffset max_native_offset = 0u;
+ for (const auto& entry : safepoints_) {
+ uint32_t native_offset = entry.first->offset;
+ max_native_offset = std::max(max_native_offset, native_offset);
+ MIR* mir = entry.second;
+ UpdateReferenceVRegs(mir, prev_mir, references);
+ max_ref_vreg = std::max(max_ref_vreg, references->GetHighestBitSet());
+ prev_mir = mir;
+ }
+
+ // Build the GC map.
+ uint32_t reg_width = static_cast<uint32_t>((max_ref_vreg + 8) / 8);
+ GcMapBuilder native_gc_map_builder(&native_gc_map_,
+ safepoints_.size(),
+ max_native_offset, reg_width);
+#if !defined(BYTE_ORDER) || (BYTE_ORDER != LITTLE_ENDIAN)
+ ArenaVector<uint8_t> references_buffer(arena_->Adapter());
+ references_buffer.resize(reg_width);
+#endif
+ for (const auto& entry : safepoints_) {
+ uint32_t native_offset = entry.first->offset;
+ MIR* mir = entry.second;
+ UpdateReferenceVRegs(mir, prev_mir, references);
+#if !defined(BYTE_ORDER) || (BYTE_ORDER != LITTLE_ENDIAN)
+ // Big-endian or unknown endianness, manually translate the bit vector data.
+ const auto* raw_storage = references->GetRawStorage();
+ for (size_t i = 0; i != reg_width; ++i) {
+ references_buffer[i] = static_cast<uint8_t>(
+ raw_storage[i / sizeof(raw_storage[0])] >> (8u * (i % sizeof(raw_storage[0]))));
+ }
+ native_gc_map_builder.AddEntry(native_offset, &references_buffer[0]);
+#else
+ // For little-endian, the bytes comprising the bit vector's raw storage are what we need.
+ native_gc_map_builder.AddEntry(native_offset,
+ reinterpret_cast<const uint8_t*>(references->GetRawStorage()));
+#endif
+ prev_mir = mir;
+ }
+}
+
+void Mir2Lir::CreateNativeGcMapWithoutRegisterPromotion() {
DCHECK(!encoded_mapping_table_.empty());
MappingTable mapping_table(&encoded_mapping_table_[0]);
uint32_t max_native_offset = 0;
@@ -1007,6 +1067,7 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena
block_label_list_(NULL),
promotion_map_(NULL),
current_dalvik_offset_(0),
+ current_mir_(nullptr),
estimated_native_code_size_(0),
reg_pool_(NULL),
live_sreg_(0),
@@ -1021,7 +1082,8 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena
last_lir_insn_(NULL),
slow_paths_(arena, 32, kGrowableArraySlowPaths),
mem_ref_type_(ResourceMask::kHeapRef),
- mask_cache_(arena) {
+ mask_cache_(arena),
+ safepoints_(arena->Adapter()) {
// Reserve pointer id 0 for NULL.
size_t null_idx = WrapPointer(NULL);
DCHECK_EQ(null_idx, 0U);
@@ -1312,4 +1374,100 @@ void Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
LOG(FATAL) << "Unknown MIR opcode not supported on this architecture";
}
+void Mir2Lir::InitReferenceVRegs(BasicBlock* bb, BitVector* references) {
+ // Mark the references coming from the first predecessor.
+ DCHECK(bb != nullptr);
+ DCHECK(bb->block_type == kEntryBlock || bb->predecessors->Size() != 0u);
+ BasicBlock* first_bb =
+ (bb->block_type == kEntryBlock) ? bb : mir_graph_->GetBasicBlock(bb->predecessors->Get(0));
+ DCHECK(first_bb != nullptr);
+ DCHECK(first_bb->data_flow_info != nullptr);
+ DCHECK(first_bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
+ const int32_t* first_vreg_to_ssa_map = first_bb->data_flow_info->vreg_to_ssa_map_exit;
+ references->ClearAllBits();
+ for (uint32_t vreg = 0, num_vregs = cu_->num_regs + cu_->num_ins; vreg != num_vregs; ++vreg) {
+ int32_t sreg = first_vreg_to_ssa_map[vreg];
+ if (sreg != INVALID_SREG && mir_graph_->reg_location_[sreg].ref &&
+ !mir_graph_->IsConstantNullRef(mir_graph_->reg_location_[sreg])) {
+ references->SetBit(vreg);
+ }
+ }
+ // Unmark the references that are merging with a different value.
+ for (size_t i = 1u, num_pred = bb->predecessors->Size(); i < num_pred; ++i) {
+ BasicBlock* pred_bb = mir_graph_->GetBasicBlock(bb->predecessors->Get(i));
+ DCHECK(pred_bb != nullptr);
+ DCHECK(pred_bb->data_flow_info != nullptr);
+ DCHECK(pred_bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
+ const int32_t* pred_vreg_to_ssa_map = pred_bb->data_flow_info->vreg_to_ssa_map_exit;
+ for (uint32_t vreg : references->Indexes()) {
+ if (first_vreg_to_ssa_map[vreg] != pred_vreg_to_ssa_map[vreg]) {
+ // NOTE: The BitVectorSet::IndexIterator will not check the pointed-to bit again,
+ // so clearing the bit has no effect on the iterator.
+ references->ClearBit(vreg);
+ }
+ }
+ }
+ if (bb->block_type != kEntryBlock && bb->first_mir_insn != nullptr &&
+ static_cast<int>(bb->first_mir_insn->dalvikInsn.opcode) == kMirOpCheckPart2) {
+ // In Mir2Lir::MethodBlockCodeGen() we have artificially moved the throwing
+ // instruction to the previous block. However, the MIRGraph data used above
+ // doesn't reflect that, so we still need to process that MIR insn here.
+ DCHECK_EQ(bb->predecessors->Size(), 1u);
+ BasicBlock* pred_bb = mir_graph_->GetBasicBlock(bb->predecessors->Get(0));
+ DCHECK(pred_bb != nullptr);
+ DCHECK(pred_bb->last_mir_insn != nullptr);
+ UpdateReferenceVRegsLocal(nullptr, pred_bb->last_mir_insn, references);
+ }
+}
+
+bool Mir2Lir::UpdateReferenceVRegsLocal(MIR* mir, MIR* prev_mir, BitVector* references) {
+ DCHECK(mir == nullptr || mir->bb == prev_mir->bb);
+ DCHECK(prev_mir != nullptr);
+ while (prev_mir != nullptr) {
+ if (prev_mir == mir) {
+ return true;
+ }
+ const size_t num_defs = prev_mir->ssa_rep->num_defs;
+ const int32_t* defs = prev_mir->ssa_rep->defs;
+ if (num_defs == 1u && mir_graph_->reg_location_[defs[0]].ref &&
+ !mir_graph_->IsConstantNullRef(mir_graph_->reg_location_[defs[0]])) {
+ references->SetBit(mir_graph_->SRegToVReg(defs[0]));
+ } else {
+ for (size_t i = 0u; i != num_defs; ++i) {
+ references->ClearBit(mir_graph_->SRegToVReg(defs[i]));
+ }
+ }
+ prev_mir = prev_mir->next;
+ }
+ return false;
+}
+
+void Mir2Lir::UpdateReferenceVRegs(MIR* mir, MIR* prev_mir, BitVector* references) {
+ if (mir == nullptr) {
+ // Safepoint in entry sequence.
+ InitReferenceVRegs(mir_graph_->GetEntryBlock(), references);
+ return;
+ }
+ if (mir->dalvikInsn.opcode == Instruction::RETURN_VOID ||
+ mir->dalvikInsn.opcode == Instruction::RETURN ||
+ mir->dalvikInsn.opcode == Instruction::RETURN_WIDE ||
+ mir->dalvikInsn.opcode == Instruction::RETURN_OBJECT ||
+ mir->dalvikInsn.opcode == Instruction::RETURN_VOID_BARRIER) {
+ references->ClearAllBits();
+ if (mir->dalvikInsn.opcode == Instruction::RETURN_OBJECT) {
+ references->SetBit(mir_graph_->SRegToVReg(mir->ssa_rep->uses[0]));
+ }
+ return;
+ }
+ if (prev_mir != nullptr && mir->bb == prev_mir->bb &&
+ UpdateReferenceVRegsLocal(mir, prev_mir, references)) {
+ return;
+ }
+ BasicBlock* bb = mir_graph_->GetBasicBlock(mir->bb);
+ DCHECK(bb != nullptr);
+ InitReferenceVRegs(bb, references);
+ bool success = UpdateReferenceVRegsLocal(mir, bb->first_mir_insn, references);
+ DCHECK(success) << "MIR @0x" << std::hex << mir->offset << " not in BB#" << std::dec << mir->bb;
+}
+
} // namespace art
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index d451401af0..b7ed4906c8 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -1935,9 +1935,6 @@ void Mir2Lir::GenConst(RegLocation rl_dest, int value) {
RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
LoadConstantNoClobber(rl_result.reg, value);
StoreValue(rl_dest, rl_result);
- if (value == 0) {
- Workaround7250540(rl_dest, rl_result.reg);
- }
}
void Mir2Lir::GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest,
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
index e5798fdc0b..8d0c9f4a88 100644
--- a/compiler/dex/quick/gen_loadstore.cc
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -36,46 +36,6 @@ LIR* Mir2Lir::LoadConstant(RegStorage r_dest, int value) {
}
/*
- * Temporary workaround for Issue 7250540. If we're loading a constant zero into a
- * promoted floating point register, also copy a zero into the int/ref identity of
- * that sreg.
- */
-void Mir2Lir::Workaround7250540(RegLocation rl_dest, RegStorage zero_reg) {
- if (rl_dest.fp) {
- int pmap_index = SRegToPMap(rl_dest.s_reg_low);
- if (promotion_map_[pmap_index].fp_location == kLocPhysReg) {
- // Now, determine if this vreg is ever used as a reference. If not, we're done.
- bool used_as_reference = false;
- int base_vreg = mir_graph_->SRegToVReg(rl_dest.s_reg_low);
- for (int i = 0; !used_as_reference && (i < mir_graph_->GetNumSSARegs()); i++) {
- if (mir_graph_->SRegToVReg(mir_graph_->reg_location_[i].s_reg_low) == base_vreg) {
- used_as_reference |= mir_graph_->reg_location_[i].ref;
- }
- }
- if (!used_as_reference) {
- return;
- }
- RegStorage temp_reg = zero_reg;
- if (!temp_reg.Valid()) {
- temp_reg = AllocTemp();
- LoadConstant(temp_reg, 0);
- }
- if (promotion_map_[pmap_index].core_location == kLocPhysReg) {
- // Promoted - just copy in a zero
- OpRegCopy(RegStorage::Solo32(promotion_map_[pmap_index].core_reg), temp_reg);
- } else {
- // Lives in the frame, need to store.
- ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(rl_dest.s_reg_low), temp_reg, k32, kNotVolatile);
- }
- if (!zero_reg.Valid()) {
- FreeTemp(temp_reg);
- }
- }
- }
-}
-
-/*
* Load a Dalvik register into a physical register. Take care when
* using this routine, as it doesn't perform any bookkeeping regarding
* register liveness. That is the responsibility of the caller.
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 6e0fe02af5..dcfc8bca69 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -319,6 +319,7 @@ bool Mir2Lir::GenSpecialIdentity(MIR* mir, const InlineMethod& special) {
bool Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special) {
DCHECK(special.flags & kInlineSpecial);
current_dalvik_offset_ = mir->offset;
+ DCHECK(current_mir_ == nullptr); // Safepoints attributed to prologue.
MIR* return_mir = nullptr;
bool successful = false;
@@ -1163,6 +1164,7 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) {
}
current_dalvik_offset_ = mir->offset;
+ current_mir_ = mir;
int opcode = mir->dalvikInsn.opcode;
GenPrintLabel(mir);
@@ -1265,6 +1267,7 @@ void Mir2Lir::MethodMIR2LIR() {
LIR* Mir2Lir::LIRSlowPath::GenerateTargetLabel(int opcode) {
m2l_->SetCurrentDexPc(current_dex_pc_);
+ m2l_->current_mir_ = current_mir_;
LIR* target = m2l_->NewLIR0(opcode);
fromfast_->target = target;
return target;
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index bfd7860684..eadda85687 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -139,6 +139,7 @@ typedef uint32_t CodeOffset; // Native code offset in bytes.
#endif
struct BasicBlock;
+class BitVector;
struct CallInfo;
struct CompilationUnit;
struct InlineMethod;
@@ -522,7 +523,8 @@ class Mir2Lir : public Backend {
public:
LIRSlowPath(Mir2Lir* m2l, const DexOffset dexpc, LIR* fromfast,
LIR* cont = nullptr) :
- m2l_(m2l), cu_(m2l->cu_), current_dex_pc_(dexpc), fromfast_(fromfast), cont_(cont) {
+ m2l_(m2l), cu_(m2l->cu_), current_dex_pc_(dexpc), current_mir_(m2l->current_mir_),
+ fromfast_(fromfast), cont_(cont) {
m2l->StartSlowPath(this);
}
virtual ~LIRSlowPath() {}
@@ -546,6 +548,7 @@ class Mir2Lir : public Backend {
Mir2Lir* const m2l_;
CompilationUnit* const cu_;
const DexOffset current_dex_pc_;
+ MIR* current_mir_;
LIR* const fromfast_;
LIR* const cont_;
};
@@ -703,6 +706,7 @@ class Mir2Lir : public Backend {
bool VerifyCatchEntries();
void CreateMappingTables();
void CreateNativeGcMap();
+ void CreateNativeGcMapWithoutRegisterPromotion();
int AssignLiteralOffset(CodeOffset offset);
int AssignSwitchTablesOffset(CodeOffset offset);
int AssignFillArrayDataOffset(CodeOffset offset);
@@ -1675,6 +1679,16 @@ class Mir2Lir : public Backend {
// See CheckRegLocationImpl.
void CheckRegLocation(RegLocation rl) const;
+ // Find the references at the beginning of a basic block (for generating GC maps).
+ void InitReferenceVRegs(BasicBlock* bb, BitVector* references);
+
+ // Update references from prev_mir to mir in the same BB. If mir is null or before
+ // prev_mir, report failure (return false) and update references to the end of the BB.
+ bool UpdateReferenceVRegsLocal(MIR* mir, MIR* prev_mir, BitVector* references);
+
+ // Update references from prev_mir to mir.
+ void UpdateReferenceVRegs(MIR* mir, MIR* prev_mir, BitVector* references);
+
public:
// TODO: add accessors for these.
LIR* literal_list_; // Constants.
@@ -1692,7 +1706,6 @@ class Mir2Lir : public Backend {
GrowableArray<RegisterInfo*> tempreg_info_;
GrowableArray<RegisterInfo*> reginfo_map_;
GrowableArray<void*> pointer_storage_;
- CodeOffset current_code_offset_; // Working byte offset of machine instructons.
CodeOffset data_offset_; // starting offset of literal pool.
size_t total_size_; // header + code size.
LIR* block_label_list_;
@@ -1707,6 +1720,7 @@ class Mir2Lir : public Backend {
* The low-level LIR creation utilites will pull it from here. Rework this.
*/
DexOffset current_dalvik_offset_;
+ MIR* current_mir_;
size_t estimated_native_code_size_; // Just an estimate; used to reserve code_buffer_ size.
RegisterPool* reg_pool_;
/*
@@ -1741,6 +1755,9 @@ class Mir2Lir : public Backend {
// (i.e. 8 bytes on 32-bit arch, 16 bytes on 64-bit arch) and we use ResourceMaskCache
// to deduplicate the masks.
ResourceMaskCache mask_cache_;
+
+ // Record the MIR that generated a given safepoint (nullptr for prologue safepoints).
+ ArenaVector<std::pair<LIR*, MIR*>> safepoints_;
}; // Class Mir2Lir
} // namespace art