summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/dex/mir_optimization.cc3
-rw-r--r--compiler/dex/quick/codegen_util.cc52
-rw-r--r--compiler/dex/quick/mir_to_lir.h3
3 files changed, 34 insertions, 24 deletions
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 266b7c3064..c85c3b6f21 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -671,6 +671,9 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
}
int dead_true_def = if_true->ssa_rep->defs[0];
raw_use_counts_[dead_true_def] = use_counts_[dead_true_def] = 0;
+ // Update ending vreg->sreg map for GC maps generation.
+ int def_vreg = SRegToVReg(mir->ssa_rep->defs[0]);
+ bb->data_flow_info->vreg_to_ssa_map_exit[def_vreg] = mir->ssa_rep->defs[0];
// We want to remove ft and tk and link bb directly to ft_ft. First, we need
// to update all Phi inputs correctly with UpdatePredecessor(ft->id, bb->id)
// since the live_def above comes from ft->first_mir_insn (if_false).
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 4e7919b6d2..bd479bef5b 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -793,33 +793,43 @@ void Mir2Lir::CreateNativeGcMap() {
prev_mir = mir;
}
+#if defined(BYTE_ORDER) && (BYTE_ORDER == LITTLE_ENDIAN)
+ static constexpr bool kLittleEndian = true;
+#else
+ static constexpr bool kLittleEndian = false;
+#endif
+
// Build the GC map.
uint32_t reg_width = static_cast<uint32_t>((max_ref_vreg + 8) / 8);
GcMapBuilder native_gc_map_builder(&native_gc_map_,
safepoints_.size(),
max_native_offset, reg_width);
-#if !defined(BYTE_ORDER) || (BYTE_ORDER != LITTLE_ENDIAN)
- ArenaVector<uint8_t> references_buffer(arena_->Adapter());
- references_buffer.resize(reg_width);
-#endif
- for (const auto& entry : safepoints_) {
- uint32_t native_offset = entry.first->offset;
- MIR* mir = entry.second;
- UpdateReferenceVRegs(mir, prev_mir, references);
-#if !defined(BYTE_ORDER) || (BYTE_ORDER != LITTLE_ENDIAN)
- // Big-endian or unknown endianness, manually translate the bit vector data.
- const auto* raw_storage = references->GetRawStorage();
- for (size_t i = 0; i != reg_width; ++i) {
- references_buffer[i] = static_cast<uint8_t>(
- raw_storage[i / sizeof(raw_storage[0])] >> (8u * (i % sizeof(raw_storage[0]))));
+ if (kLittleEndian) {
+ for (const auto& entry : safepoints_) {
+ uint32_t native_offset = entry.first->offset;
+ MIR* mir = entry.second;
+ UpdateReferenceVRegs(mir, prev_mir, references);
+ // For little-endian, the bytes comprising the bit vector's raw storage are what we need.
+ native_gc_map_builder.AddEntry(native_offset,
+ reinterpret_cast<const uint8_t*>(references->GetRawStorage()));
+ prev_mir = mir;
+ }
+ } else {
+ ArenaVector<uint8_t> references_buffer(arena_->Adapter());
+ references_buffer.resize(reg_width);
+ for (const auto& entry : safepoints_) {
+ uint32_t native_offset = entry.first->offset;
+ MIR* mir = entry.second;
+ UpdateReferenceVRegs(mir, prev_mir, references);
+ // Big-endian or unknown endianness, manually translate the bit vector data.
+ const auto* raw_storage = references->GetRawStorage();
+ for (size_t i = 0; i != reg_width; ++i) {
+ references_buffer[i] = static_cast<uint8_t>(
+ raw_storage[i / sizeof(raw_storage[0])] >> (8u * (i % sizeof(raw_storage[0]))));
+ }
+ native_gc_map_builder.AddEntry(native_offset, &references_buffer[0]);
+ prev_mir = mir;
}
- native_gc_map_builder.AddEntry(native_offset, &references_buffer[0]);
-#else
- // For little-endian, the bytes comprising the bit vector's raw storage are what we need.
- native_gc_map_builder.AddEntry(native_offset,
- reinterpret_cast<const uint8_t*>(references->GetRawStorage()));
-#endif
- prev_mir = mir;
}
}
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 236bad7451..cca4e5a30a 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -1465,9 +1465,6 @@ class Mir2Lir {
virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src);
virtual void GenMonitorExit(int opt_flags, RegLocation rl_src);
- // Temp workaround
- void Workaround7250540(RegLocation rl_dest, RegStorage zero_reg);
-
virtual LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) = 0;
// Queries for backend support for vectors