summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
author Vladimir Marko <vmarko@google.com> 2015-06-04 09:47:47 +0000
committer Vladimir Marko <vmarko@google.com> 2015-06-04 09:47:47 +0000
commitb7fd412dd21eb362931b3a0716c94fd189a66295 (patch)
tree7c9e7ee5296fdac1e1966f6d00d6782c4a990cfb /compiler
parent7cc8f9aa1349fd6cb0814a653ee2d1164a7fb9f7 (diff)
Revert "Quick: Create GC map based on compiler data. DO NOT MERGE"
This reverts commit 7cc8f9aa1349fd6cb0814a653ee2d1164a7fb9f7. Change-Id: Iadb4462bf8e834c6a847c01ee6eb332a325de22c
Diffstat (limited to 'compiler')
-rw-r--r--compiler/dex/quick/codegen_util.cc160
-rw-r--r--compiler/dex/quick/gen_common.cc3
-rw-r--r--compiler/dex/quick/gen_loadstore.cc40
-rw-r--r--compiler/dex/quick/mir_to_lir.cc3
-rw-r--r--compiler/dex/quick/mir_to_lir.h21
5 files changed, 46 insertions, 181 deletions
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 94710d1bd2..cb5fbb3d0c 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -14,7 +14,6 @@
* limitations under the License.
*/
-#include "base/bit_vector.h"
#include "dex/compiler_internals.h"
#include "dex_file-inl.h"
#include "gc_map.h"
@@ -85,8 +84,6 @@ void Mir2Lir::MarkSafepointPC(LIR* inst) {
inst->u.m.def_mask = &kEncodeAll;
LIR* safepoint_pc = NewLIR0(kPseudoSafepointPC);
DCHECK(safepoint_pc->u.m.def_mask->Equals(kEncodeAll));
- DCHECK(current_mir_ != nullptr || (current_dalvik_offset_ == 0 && safepoints_.empty()));
- safepoints_.emplace_back(safepoint_pc, current_mir_);
}
void Mir2Lir::MarkSafepointPCAfter(LIR* after) {
@@ -101,8 +98,6 @@ void Mir2Lir::MarkSafepointPCAfter(LIR* after) {
InsertLIRAfter(after, safepoint_pc);
}
DCHECK(safepoint_pc->u.m.def_mask->Equals(kEncodeAll));
- DCHECK(current_mir_ != nullptr || (current_dalvik_offset_ == 0 && safepoints_.empty()));
- safepoints_.emplace_back(safepoint_pc, current_mir_);
}
/* Remove a LIR from the list. */
@@ -751,61 +746,6 @@ void Mir2Lir::CreateMappingTables() {
}
void Mir2Lir::CreateNativeGcMap() {
- if (UNLIKELY((cu_->disable_opt & (1u << kPromoteRegs)) != 0u)) {
- // If we're not promoting to physical registers, it's safe to use the verifier's notion of
- // references. (We disable register promotion when type inference finds a type conflict and
- // in that the case we defer to the verifier to avoid using the compiler's conflicting info.)
- CreateNativeGcMapWithoutRegisterPromotion();
- return;
- }
-
- ArenaBitVector* references = new (arena_) ArenaBitVector(arena_, mir_graph_->GetNumSSARegs(),
- false);
-
- // Calculate max native offset and max reference vreg.
- MIR* prev_mir = nullptr;
- int max_ref_vreg = -1;
- CodeOffset max_native_offset = 0u;
- for (const auto& entry : safepoints_) {
- uint32_t native_offset = entry.first->offset;
- max_native_offset = std::max(max_native_offset, native_offset);
- MIR* mir = entry.second;
- UpdateReferenceVRegs(mir, prev_mir, references);
- max_ref_vreg = std::max(max_ref_vreg, references->GetHighestBitSet());
- prev_mir = mir;
- }
-
- // Build the GC map.
- uint32_t reg_width = static_cast<uint32_t>((max_ref_vreg + 8) / 8);
- GcMapBuilder native_gc_map_builder(&native_gc_map_,
- safepoints_.size(),
- max_native_offset, reg_width);
-#if !defined(BYTE_ORDER) || (BYTE_ORDER != LITTLE_ENDIAN)
- ArenaVector<uint8_t> references_buffer(arena_->Adapter());
- references_buffer.resize(reg_width);
-#endif
- for (const auto& entry : safepoints_) {
- uint32_t native_offset = entry.first->offset;
- MIR* mir = entry.second;
- UpdateReferenceVRegs(mir, prev_mir, references);
-#if !defined(BYTE_ORDER) || (BYTE_ORDER != LITTLE_ENDIAN)
- // Big-endian or unknown endianness, manually translate the bit vector data.
- const auto* raw_storage = references->GetRawStorage();
- for (size_t i = 0; i != reg_width; ++i) {
- references_buffer[i] = static_cast<uint8_t>(
- raw_storage[i / sizeof(raw_storage[0])] >> (8u * (i % sizeof(raw_storage[0]))));
- }
- native_gc_map_builder.AddEntry(native_offset, &references_buffer[0]);
-#else
- // For little-endian, the bytes comprising the bit vector's raw storage are what we need.
- native_gc_map_builder.AddEntry(native_offset,
- reinterpret_cast<const uint8_t*>(references->GetRawStorage()));
-#endif
- prev_mir = mir;
- }
-}
-
-void Mir2Lir::CreateNativeGcMapWithoutRegisterPromotion() {
DCHECK(!encoded_mapping_table_.empty());
MappingTable mapping_table(&encoded_mapping_table_[0]);
uint32_t max_native_offset = 0;
@@ -1067,7 +1007,6 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena
block_label_list_(NULL),
promotion_map_(NULL),
current_dalvik_offset_(0),
- current_mir_(nullptr),
estimated_native_code_size_(0),
reg_pool_(NULL),
live_sreg_(0),
@@ -1082,8 +1021,7 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena
last_lir_insn_(NULL),
slow_paths_(arena, 32, kGrowableArraySlowPaths),
mem_ref_type_(ResourceMask::kHeapRef),
- mask_cache_(arena),
- safepoints_(arena->Adapter()) {
+ mask_cache_(arena) {
// Reserve pointer id 0 for NULL.
size_t null_idx = WrapPointer(NULL);
DCHECK_EQ(null_idx, 0U);
@@ -1374,100 +1312,4 @@ void Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
LOG(FATAL) << "Unknown MIR opcode not supported on this architecture";
}
-void Mir2Lir::InitReferenceVRegs(BasicBlock* bb, BitVector* references) {
- // Mark the references coming from the first predecessor.
- DCHECK(bb != nullptr);
- DCHECK(bb->block_type == kEntryBlock || bb->predecessors->Size() != 0u);
- BasicBlock* first_bb =
- (bb->block_type == kEntryBlock) ? bb : mir_graph_->GetBasicBlock(bb->predecessors->Get(0));
- DCHECK(first_bb != nullptr);
- DCHECK(first_bb->data_flow_info != nullptr);
- DCHECK(first_bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
- const int32_t* first_vreg_to_ssa_map = first_bb->data_flow_info->vreg_to_ssa_map_exit;
- references->ClearAllBits();
- for (uint32_t vreg = 0, num_vregs = cu_->num_regs + cu_->num_ins; vreg != num_vregs; ++vreg) {
- int32_t sreg = first_vreg_to_ssa_map[vreg];
- if (sreg != INVALID_SREG && mir_graph_->reg_location_[sreg].ref &&
- !mir_graph_->IsConstantNullRef(mir_graph_->reg_location_[sreg])) {
- references->SetBit(vreg);
- }
- }
- // Unmark the references that are merging with a different value.
- for (size_t i = 1u, num_pred = bb->predecessors->Size(); i < num_pred; ++i) {
- BasicBlock* pred_bb = mir_graph_->GetBasicBlock(bb->predecessors->Get(i));
- DCHECK(pred_bb != nullptr);
- DCHECK(pred_bb->data_flow_info != nullptr);
- DCHECK(pred_bb->data_flow_info->vreg_to_ssa_map_exit != nullptr);
- const int32_t* pred_vreg_to_ssa_map = pred_bb->data_flow_info->vreg_to_ssa_map_exit;
- for (uint32_t vreg : references->Indexes()) {
- if (first_vreg_to_ssa_map[vreg] != pred_vreg_to_ssa_map[vreg]) {
- // NOTE: The BitVectorSet::IndexIterator will not check the pointed-to bit again,
- // so clearing the bit has no effect on the iterator.
- references->ClearBit(vreg);
- }
- }
- }
- if (bb->block_type != kEntryBlock && bb->first_mir_insn != nullptr &&
- static_cast<int>(bb->first_mir_insn->dalvikInsn.opcode) == kMirOpCheckPart2) {
- // In Mir2Lir::MethodBlockCodeGen() we have artificially moved the throwing
- // instruction to the previous block. However, the MIRGraph data used above
- // doesn't reflect that, so we still need to process that MIR insn here.
- DCHECK_EQ(bb->predecessors->Size(), 1u);
- BasicBlock* pred_bb = mir_graph_->GetBasicBlock(bb->predecessors->Get(0));
- DCHECK(pred_bb != nullptr);
- DCHECK(pred_bb->last_mir_insn != nullptr);
- UpdateReferenceVRegsLocal(nullptr, pred_bb->last_mir_insn, references);
- }
-}
-
-bool Mir2Lir::UpdateReferenceVRegsLocal(MIR* mir, MIR* prev_mir, BitVector* references) {
- DCHECK(mir == nullptr || mir->bb == prev_mir->bb);
- DCHECK(prev_mir != nullptr);
- while (prev_mir != nullptr) {
- if (prev_mir == mir) {
- return true;
- }
- const size_t num_defs = prev_mir->ssa_rep->num_defs;
- const int32_t* defs = prev_mir->ssa_rep->defs;
- if (num_defs == 1u && mir_graph_->reg_location_[defs[0]].ref &&
- !mir_graph_->IsConstantNullRef(mir_graph_->reg_location_[defs[0]])) {
- references->SetBit(mir_graph_->SRegToVReg(defs[0]));
- } else {
- for (size_t i = 0u; i != num_defs; ++i) {
- references->ClearBit(mir_graph_->SRegToVReg(defs[i]));
- }
- }
- prev_mir = prev_mir->next;
- }
- return false;
-}
-
-void Mir2Lir::UpdateReferenceVRegs(MIR* mir, MIR* prev_mir, BitVector* references) {
- if (mir == nullptr) {
- // Safepoint in entry sequence.
- InitReferenceVRegs(mir_graph_->GetEntryBlock(), references);
- return;
- }
- if (mir->dalvikInsn.opcode == Instruction::RETURN_VOID ||
- mir->dalvikInsn.opcode == Instruction::RETURN ||
- mir->dalvikInsn.opcode == Instruction::RETURN_WIDE ||
- mir->dalvikInsn.opcode == Instruction::RETURN_OBJECT ||
- mir->dalvikInsn.opcode == Instruction::RETURN_VOID_BARRIER) {
- references->ClearAllBits();
- if (mir->dalvikInsn.opcode == Instruction::RETURN_OBJECT) {
- references->SetBit(mir_graph_->SRegToVReg(mir->ssa_rep->uses[0]));
- }
- return;
- }
- if (prev_mir != nullptr && mir->bb == prev_mir->bb &&
- UpdateReferenceVRegsLocal(mir, prev_mir, references)) {
- return;
- }
- BasicBlock* bb = mir_graph_->GetBasicBlock(mir->bb);
- DCHECK(bb != nullptr);
- InitReferenceVRegs(bb, references);
- bool success = UpdateReferenceVRegsLocal(mir, bb->first_mir_insn, references);
- DCHECK(success) << "MIR @0x" << std::hex << mir->offset << " not in BB#" << std::dec << mir->bb;
-}
-
} // namespace art
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index b7ed4906c8..d451401af0 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -1935,6 +1935,9 @@ void Mir2Lir::GenConst(RegLocation rl_dest, int value) {
RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
LoadConstantNoClobber(rl_result.reg, value);
StoreValue(rl_dest, rl_result);
+ if (value == 0) {
+ Workaround7250540(rl_dest, rl_result.reg);
+ }
}
void Mir2Lir::GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest,
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
index 8d0c9f4a88..e5798fdc0b 100644
--- a/compiler/dex/quick/gen_loadstore.cc
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -36,6 +36,46 @@ LIR* Mir2Lir::LoadConstant(RegStorage r_dest, int value) {
}
/*
+ * Temporary workaround for Issue 7250540. If we're loading a constant zero into a
+ * promoted floating point register, also copy a zero into the int/ref identity of
+ * that sreg.
+ */
+void Mir2Lir::Workaround7250540(RegLocation rl_dest, RegStorage zero_reg) {
+ if (rl_dest.fp) {
+ int pmap_index = SRegToPMap(rl_dest.s_reg_low);
+ if (promotion_map_[pmap_index].fp_location == kLocPhysReg) {
+ // Now, determine if this vreg is ever used as a reference. If not, we're done.
+ bool used_as_reference = false;
+ int base_vreg = mir_graph_->SRegToVReg(rl_dest.s_reg_low);
+ for (int i = 0; !used_as_reference && (i < mir_graph_->GetNumSSARegs()); i++) {
+ if (mir_graph_->SRegToVReg(mir_graph_->reg_location_[i].s_reg_low) == base_vreg) {
+ used_as_reference |= mir_graph_->reg_location_[i].ref;
+ }
+ }
+ if (!used_as_reference) {
+ return;
+ }
+ RegStorage temp_reg = zero_reg;
+ if (!temp_reg.Valid()) {
+ temp_reg = AllocTemp();
+ LoadConstant(temp_reg, 0);
+ }
+ if (promotion_map_[pmap_index].core_location == kLocPhysReg) {
+ // Promoted - just copy in a zero
+ OpRegCopy(RegStorage::Solo32(promotion_map_[pmap_index].core_reg), temp_reg);
+ } else {
+ // Lives in the frame, need to store.
+ ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+ StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(rl_dest.s_reg_low), temp_reg, k32, kNotVolatile);
+ }
+ if (!zero_reg.Valid()) {
+ FreeTemp(temp_reg);
+ }
+ }
+ }
+}
+
+/*
* Load a Dalvik register into a physical register. Take care when
* using this routine, as it doesn't perform any bookkeeping regarding
* register liveness. That is the responsibility of the caller.
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index dcfc8bca69..6e0fe02af5 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -319,7 +319,6 @@ bool Mir2Lir::GenSpecialIdentity(MIR* mir, const InlineMethod& special) {
bool Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special) {
DCHECK(special.flags & kInlineSpecial);
current_dalvik_offset_ = mir->offset;
- DCHECK(current_mir_ == nullptr); // Safepoints attributed to prologue.
MIR* return_mir = nullptr;
bool successful = false;
@@ -1164,7 +1163,6 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) {
}
current_dalvik_offset_ = mir->offset;
- current_mir_ = mir;
int opcode = mir->dalvikInsn.opcode;
GenPrintLabel(mir);
@@ -1267,7 +1265,6 @@ void Mir2Lir::MethodMIR2LIR() {
LIR* Mir2Lir::LIRSlowPath::GenerateTargetLabel(int opcode) {
m2l_->SetCurrentDexPc(current_dex_pc_);
- m2l_->current_mir_ = current_mir_;
LIR* target = m2l_->NewLIR0(opcode);
fromfast_->target = target;
return target;
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index eadda85687..bfd7860684 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -139,7 +139,6 @@ typedef uint32_t CodeOffset; // Native code offset in bytes.
#endif
struct BasicBlock;
-class BitVector;
struct CallInfo;
struct CompilationUnit;
struct InlineMethod;
@@ -523,8 +522,7 @@ class Mir2Lir : public Backend {
public:
LIRSlowPath(Mir2Lir* m2l, const DexOffset dexpc, LIR* fromfast,
LIR* cont = nullptr) :
- m2l_(m2l), cu_(m2l->cu_), current_dex_pc_(dexpc), current_mir_(m2l->current_mir_),
- fromfast_(fromfast), cont_(cont) {
+ m2l_(m2l), cu_(m2l->cu_), current_dex_pc_(dexpc), fromfast_(fromfast), cont_(cont) {
m2l->StartSlowPath(this);
}
virtual ~LIRSlowPath() {}
@@ -548,7 +546,6 @@ class Mir2Lir : public Backend {
Mir2Lir* const m2l_;
CompilationUnit* const cu_;
const DexOffset current_dex_pc_;
- MIR* current_mir_;
LIR* const fromfast_;
LIR* const cont_;
};
@@ -706,7 +703,6 @@ class Mir2Lir : public Backend {
bool VerifyCatchEntries();
void CreateMappingTables();
void CreateNativeGcMap();
- void CreateNativeGcMapWithoutRegisterPromotion();
int AssignLiteralOffset(CodeOffset offset);
int AssignSwitchTablesOffset(CodeOffset offset);
int AssignFillArrayDataOffset(CodeOffset offset);
@@ -1679,16 +1675,6 @@ class Mir2Lir : public Backend {
// See CheckRegLocationImpl.
void CheckRegLocation(RegLocation rl) const;
- // Find the references at the beginning of a basic block (for generating GC maps).
- void InitReferenceVRegs(BasicBlock* bb, BitVector* references);
-
- // Update references from prev_mir to mir in the same BB. If mir is null or before
- // prev_mir, report failure (return false) and update references to the end of the BB.
- bool UpdateReferenceVRegsLocal(MIR* mir, MIR* prev_mir, BitVector* references);
-
- // Update references from prev_mir to mir.
- void UpdateReferenceVRegs(MIR* mir, MIR* prev_mir, BitVector* references);
-
public:
// TODO: add accessors for these.
LIR* literal_list_; // Constants.
@@ -1706,6 +1692,7 @@ class Mir2Lir : public Backend {
GrowableArray<RegisterInfo*> tempreg_info_;
GrowableArray<RegisterInfo*> reginfo_map_;
GrowableArray<void*> pointer_storage_;
+ CodeOffset current_code_offset_; // Working byte offset of machine instructons.
CodeOffset data_offset_; // starting offset of literal pool.
size_t total_size_; // header + code size.
LIR* block_label_list_;
@@ -1720,7 +1707,6 @@ class Mir2Lir : public Backend {
* The low-level LIR creation utilites will pull it from here. Rework this.
*/
DexOffset current_dalvik_offset_;
- MIR* current_mir_;
size_t estimated_native_code_size_; // Just an estimate; used to reserve code_buffer_ size.
RegisterPool* reg_pool_;
/*
@@ -1755,9 +1741,6 @@ class Mir2Lir : public Backend {
// (i.e. 8 bytes on 32-bit arch, 16 bytes on 64-bit arch) and we use ResourceMaskCache
// to deduplicate the masks.
ResourceMaskCache mask_cache_;
-
- // Record the MIR that generated a given safepoint (nullptr for prologue safepoints).
- ArenaVector<std::pair<LIR*, MIR*>> safepoints_;
}; // Class Mir2Lir
} // namespace art