summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator.h2
-rw-r--r--compiler/optimizing/graph_visualizer.cc2
-rw-r--r--compiler/optimizing/nodes.h3
-rw-r--r--compiler/optimizing/register_allocator.cc60
4 files changed, 35 insertions, 32 deletions
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 0a3698946e..acce5b3359 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -531,6 +531,8 @@ class CodeGenerator {
template <typename LabelType>
LabelType* CommonInitializeLabels() {
+ // We use raw array allocations instead of ArenaVector<> because Labels are
+ // non-constructible and non-movable and as such cannot be held in a vector.
size_t size = GetGraph()->GetBlocks().size();
LabelType* labels = GetGraph()->GetArena()->AllocArray<LabelType>(size,
kArenaAllocCodeGenerator);
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 208260a2ad..4111671a9b 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -362,6 +362,8 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
StartAttributeStream("gen_clinit_check") << std::boolalpha
<< load_class->MustGenerateClinitCheck() << std::noboolalpha;
+ StartAttributeStream("needs_access_check") << std::boolalpha
+ << load_class->NeedsAccessCheck() << std::noboolalpha;
}
void VisitCheckCast(HCheckCast* check_cast) OVERRIDE {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index bba5a9c350..615012eb3a 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -4510,7 +4510,8 @@ class HLoadClass : public HExpression<1> {
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- return other->AsLoadClass()->type_index_ == type_index_;
+ return other->AsLoadClass()->type_index_ == type_index_ &&
+ other->AsLoadClass()->needs_access_check_ == needs_access_check_;
}
size_t ComputeHashCode() const OVERRIDE { return type_index_; }
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 4d43d1df9c..6fc77721e7 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -617,42 +617,40 @@ void RegisterAllocator::LinearScan() {
// (2) Remove currently active intervals that are dead at this position.
// Move active intervals that have a lifetime hole at this position
// to inactive.
- // Note: Copy elements we keep to the beginning, just like
- // v.erase(std::remove(v.begin(), v.end(), value), v.end());
- auto active_kept_end = active_.begin();
- for (auto it = active_.begin(), end = active_.end(); it != end; ++it) {
- LiveInterval* interval = *it;
- if (interval->IsDeadAt(position)) {
- handled_.push_back(interval);
- } else if (!interval->Covers(position)) {
- inactive_.push_back(interval);
- } else {
- *active_kept_end++ = interval; // Keep this interval.
- }
- }
- // We have copied what we want to keep to [active_.begin(), active_kept_end),
- // the rest of the data in active_ is junk - drop it.
+ auto active_kept_end = std::remove_if(
+ active_.begin(),
+ active_.end(),
+ [this, position](LiveInterval* interval) {
+ if (interval->IsDeadAt(position)) {
+ handled_.push_back(interval);
+ return true;
+ } else if (!interval->Covers(position)) {
+ inactive_.push_back(interval);
+ return true;
+ } else {
+ return false; // Keep this interval.
+ }
+ });
active_.erase(active_kept_end, active_.end());
// (3) Remove currently inactive intervals that are dead at this position.
// Move inactive intervals that cover this position to active.
- // Note: Copy elements we keep to the beginning, just like
- // v.erase(std::remove(v.begin(), v.begin() + num, value), v.begin() + num);
- auto inactive_kept_end = inactive_.begin();
auto inactive_to_handle_end = inactive_.begin() + inactive_intervals_to_handle;
- for (auto it = inactive_.begin(); it != inactive_to_handle_end; ++it) {
- LiveInterval* interval = *it;
- DCHECK(interval->GetStart() < position || interval->IsFixed());
- if (interval->IsDeadAt(position)) {
- handled_.push_back(interval);
- } else if (interval->Covers(position)) {
- active_.push_back(interval);
- } else {
- *inactive_kept_end++ = interval; // Keep this interval.
- }
- }
- // We have copied what we want to keep to [inactive_.begin(), inactive_kept_end),
- // the rest of the data in the processed interval is junk - drop it.
+ auto inactive_kept_end = std::remove_if(
+ inactive_.begin(),
+ inactive_to_handle_end,
+ [this, position](LiveInterval* interval) {
+ DCHECK(interval->GetStart() < position || interval->IsFixed());
+ if (interval->IsDeadAt(position)) {
+ handled_.push_back(interval);
+ return true;
+ } else if (interval->Covers(position)) {
+ active_.push_back(interval);
+ return true;
+ } else {
+ return false; // Keep this interval.
+ }
+ });
inactive_.erase(inactive_kept_end, inactive_to_handle_end);
if (current->IsSlowPathSafepoint()) {