summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--compiler/dex/local_value_numbering.cc609
-rw-r--r--compiler/dex/local_value_numbering.h201
-rw-r--r--compiler/dex/local_value_numbering_test.cc299
-rw-r--r--compiler/dex/mir_optimization.cc7
-rw-r--r--runtime/safe_map.h3
5 files changed, 845 insertions, 274 deletions
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index c0068b2331..62594963fc 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -21,8 +21,48 @@
namespace art {
-uint16_t LocalValueNumbering::GetFieldId(const DexFile* dex_file, uint16_t field_idx) {
- FieldReference key = { dex_file, field_idx };
+namespace { // anonymous namespace
+
+// Operations used for value map keys instead of actual opcode.
+static constexpr uint16_t kInvokeMemoryVersionBumpOp = Instruction::INVOKE_DIRECT;
+static constexpr uint16_t kUnresolvedSFieldOp = Instruction::SPUT;
+static constexpr uint16_t kResolvedSFieldOp = Instruction::SGET;
+static constexpr uint16_t kUnresolvedIFieldOp = Instruction::IPUT;
+static constexpr uint16_t kNonAliasingIFieldOp = Instruction::IGET;
+static constexpr uint16_t kAliasingIFieldOp = Instruction::IGET_WIDE;
+static constexpr uint16_t kAliasingIFieldStartVersionOp = Instruction::IGET_WIDE;
+static constexpr uint16_t kAliasingIFieldBumpVersionOp = Instruction::IGET_OBJECT;
+static constexpr uint16_t kArrayAccessLocOp = Instruction::APUT;
+static constexpr uint16_t kNonAliasingArrayOp = Instruction::AGET;
+static constexpr uint16_t kNonAliasingArrayStartVersionOp = Instruction::AGET_WIDE;
+static constexpr uint16_t kAliasingArrayOp = Instruction::AGET_OBJECT;
+static constexpr uint16_t kAliasingArrayMemoryVersionOp = Instruction::AGET_BOOLEAN;
+static constexpr uint16_t kAliasingArrayBumpVersionOp = Instruction::AGET_BYTE;
+
+} // anonymous namespace
+
+LocalValueNumbering::LocalValueNumbering(CompilationUnit* cu, ScopedArenaAllocator* allocator)
+ : cu_(cu),
+ last_value_(0u),
+ sreg_value_map_(std::less<uint16_t>(), allocator->Adapter()),
+ sreg_wide_value_map_(std::less<uint16_t>(), allocator->Adapter()),
+ value_map_(std::less<uint64_t>(), allocator->Adapter()),
+ global_memory_version_(0u),
+ aliasing_ifield_version_map_(std::less<uint16_t>(), allocator->Adapter()),
+ non_aliasing_array_version_map_(std::less<uint16_t>(), allocator->Adapter()),
+ field_index_map_(FieldReferenceComparator(), allocator->Adapter()),
+ non_aliasing_refs_(std::less<uint16_t>(), allocator->Adapter()),
+ non_aliasing_ifields_(NonAliasingIFieldKeyComparator(), allocator->Adapter()),
+ escaped_array_refs_(EscapedArrayKeyComparator(), allocator->Adapter()),
+ range_checked_(RangeCheckKeyComparator() , allocator->Adapter()),
+ null_checked_(std::less<uint16_t>(), allocator->Adapter()) {
+ std::fill_n(unresolved_sfield_version_, kFieldTypeCount, 0u);
+ std::fill_n(unresolved_ifield_version_, kFieldTypeCount, 0u);
+ std::fill_n(aliasing_array_version_, kFieldTypeCount, 0u);
+}
+
+uint16_t LocalValueNumbering::GetFieldId(const MirFieldInfo& field_info) {
+ FieldReference key = { field_info.DeclaringDexFile(), field_info.DeclaringFieldIndex() };
auto it = field_index_map_.find(key);
if (it != field_index_map_.end()) {
return it->second;
@@ -32,62 +72,6 @@ uint16_t LocalValueNumbering::GetFieldId(const DexFile* dex_file, uint16_t field
return id;
}
-void LocalValueNumbering::AdvanceGlobalMemory() {
- // See AdvanceMemoryVersion() for explanation.
- global_memory_version_ = next_memory_version_;
- ++next_memory_version_;
-}
-
-uint16_t LocalValueNumbering::GetMemoryVersion(uint16_t base, uint16_t field, uint16_t type) {
- // See AdvanceMemoryVersion() for explanation.
- MemoryVersionKey key = { base, field, type };
- MemoryVersionMap::iterator it = memory_version_map_.find(key);
- uint16_t memory_version = (it != memory_version_map_.end()) ? it->second : 0u;
- if (base != NO_VALUE && non_aliasing_refs_.find(base) == non_aliasing_refs_.end()) {
- // Check modifications by potentially aliased access.
- MemoryVersionKey aliased_access_key = { NO_VALUE, field, type };
- auto aa_it = memory_version_map_.find(aliased_access_key);
- if (aa_it != memory_version_map_.end() && aa_it->second > memory_version) {
- memory_version = aa_it->second;
- }
- memory_version = std::max(memory_version, global_memory_version_);
- } else if (base != NO_VALUE) {
- // Ignore global_memory_version_ for access via unique references.
- } else {
- memory_version = std::max(memory_version, global_memory_version_);
- }
- return memory_version;
-};
-
-uint16_t LocalValueNumbering::AdvanceMemoryVersion(uint16_t base, uint16_t field, uint16_t type) {
- // When we read the same value from memory, we want to assign the same value name to it.
- // However, we need to be careful not to assign the same value name if the memory location
- // may have been written to between the reads. To avoid that we do "memory versioning".
- //
- // For each write to a memory location (instance field, static field, array element) we assign
- // a new memory version number to the location identified by the value name of the base register,
- // the field id and type, or "{ base, field, type }". For static fields the "base" is NO_VALUE
- // since they are not accessed via a reference. For arrays the "field" is NO_VALUE since they
- // don't have a field id.
- //
- // To account for the possibility of aliased access to the same memory location via different
- // "base", we also store the memory version number with the key "{ NO_VALUE, field, type }"
- // if "base" is an aliasing reference and check it in GetMemoryVersion() on reads via
- // aliasing references. A global memory version is set for method calls as a method can
- // potentially write to any memory location accessed via an aliasing reference.
-
- uint16_t result = next_memory_version_;
- ++next_memory_version_;
- MemoryVersionKey key = { base, field, type };
- memory_version_map_.Overwrite(key, result);
- if (base != NO_VALUE && non_aliasing_refs_.find(base) == non_aliasing_refs_.end()) {
- // Advance memory version for aliased access.
- MemoryVersionKey aliased_access_key = { NO_VALUE, field, type };
- memory_version_map_.Overwrite(aliased_access_key, result);
- }
- return result;
-};
-
uint16_t LocalValueNumbering::MarkNonAliasingNonNull(MIR* mir) {
uint16_t res = GetOperandValue(mir->ssa_rep->defs[0]);
SetOperandValue(mir->ssa_rep->defs[0], res);
@@ -97,43 +81,332 @@ uint16_t LocalValueNumbering::MarkNonAliasingNonNull(MIR* mir) {
return res;
}
-void LocalValueNumbering::MakeArgsAliasing(MIR* mir) {
- for (size_t i = 0u, count = mir->ssa_rep->num_uses; i != count; ++i) {
- uint16_t reg = GetOperandValue(mir->ssa_rep->uses[i]);
- non_aliasing_refs_.erase(reg);
+bool LocalValueNumbering::IsNonAliasing(uint16_t reg) {
+ return non_aliasing_refs_.find(reg) != non_aliasing_refs_.end();
+}
+
+bool LocalValueNumbering::IsNonAliasingIField(uint16_t reg, uint16_t field_id, uint16_t type) {
+ if (IsNonAliasing(reg)) {
+ return true;
+ }
+ NonAliasingIFieldKey key = { reg, field_id, type };
+ return non_aliasing_ifields_.count(key) != 0u;
+}
+
+bool LocalValueNumbering::IsNonAliasingArray(uint16_t reg, uint16_t type) {
+ if (IsNonAliasing(reg)) {
+ return true;
}
+ EscapedArrayKey key = { reg, type };
+ return escaped_array_refs_.count(key) != 0u;
}
+
void LocalValueNumbering::HandleNullCheck(MIR* mir, uint16_t reg) {
- if (null_checked_.find(reg) != null_checked_.end()) {
- if (cu_->verbose) {
- LOG(INFO) << "Removing null check for 0x" << std::hex << mir->offset;
+ auto lb = null_checked_.lower_bound(reg);
+ if (lb != null_checked_.end() && *lb == reg) {
+ if (LIKELY(Good())) {
+ if (cu_->verbose) {
+ LOG(INFO) << "Removing null check for 0x" << std::hex << mir->offset;
+ }
+ mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
}
- mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
} else {
- null_checked_.insert(reg);
+ null_checked_.insert(lb, reg);
}
}
void LocalValueNumbering::HandleRangeCheck(MIR* mir, uint16_t array, uint16_t index) {
- if (ValueExists(ARRAY_REF, array, index, NO_VALUE)) {
- if (cu_->verbose) {
- LOG(INFO) << "Removing range check for 0x" << std::hex << mir->offset;
+ RangeCheckKey key = { array, index };
+ auto lb = range_checked_.lower_bound(key);
+ if (lb != range_checked_.end() && !RangeCheckKeyComparator()(key, *lb)) {
+ if (LIKELY(Good())) {
+ if (cu_->verbose) {
+ LOG(INFO) << "Removing range check for 0x" << std::hex << mir->offset;
+ }
+ mir->optimization_flags |= MIR_IGNORE_RANGE_CHECK;
}
- mir->optimization_flags |= MIR_IGNORE_RANGE_CHECK;
+ } else {
+ // Mark range check completed.
+ range_checked_.insert(lb, key);
}
- // Use side effect to note range check completed.
- (void)LookupValue(ARRAY_REF, array, index, NO_VALUE);
}
void LocalValueNumbering::HandlePutObject(MIR* mir) {
// If we're storing a non-aliasing reference, stop tracking it as non-aliasing now.
uint16_t base = GetOperandValue(mir->ssa_rep->uses[0]);
- non_aliasing_refs_.erase(base);
+ HandleEscapingRef(base);
+}
+
+void LocalValueNumbering::HandleEscapingRef(uint16_t base) {
+ auto it = non_aliasing_refs_.find(base);
+ if (it != non_aliasing_refs_.end()) {
+ uint64_t iget_key = BuildKey(Instruction::IGET, base, 0u, 0u);
+ for (auto iget_it = value_map_.lower_bound(iget_key), iget_end = value_map_.end();
+ iget_it != iget_end && EqualOpAndOperand1(iget_it->first, iget_key); ++iget_it) {
+ uint16_t field_id = ExtractOperand2(iget_it->first);
+ uint16_t type = ExtractModifier(iget_it->first);
+ NonAliasingIFieldKey key = { base, field_id, type };
+ non_aliasing_ifields_.insert(key);
+ }
+ uint64_t aget_key = BuildKey(kNonAliasingArrayStartVersionOp, base, 0u, 0u);
+ auto aget_it = value_map_.lower_bound(aget_key);
+ if (aget_it != value_map_.end() && EqualOpAndOperand1(aget_key, aget_it->first)) {
+ DCHECK_EQ(ExtractOperand2(aget_it->first), kNoValue);
+ uint16_t type = ExtractModifier(aget_it->first);
+ EscapedArrayKey key = { base, type };
+ escaped_array_refs_.insert(key);
+ }
+ non_aliasing_refs_.erase(it);
+ }
+}
+
+uint16_t LocalValueNumbering::HandleAGet(MIR* mir, uint16_t opcode) {
+ // uint16_t type = opcode - Instruction::AGET;
+ uint16_t array = GetOperandValue(mir->ssa_rep->uses[0]);
+ HandleNullCheck(mir, array);
+ uint16_t index = GetOperandValue(mir->ssa_rep->uses[1]);
+ HandleRangeCheck(mir, array, index);
+ uint16_t type = opcode - Instruction::AGET;
+ // Establish value number for loaded register.
+ uint16_t res;
+ if (IsNonAliasingArray(array, type)) {
+ // Get the start version that accounts for aliasing within the array (different index names).
+ uint16_t start_version = LookupValue(kNonAliasingArrayStartVersionOp, array, kNoValue, type);
+ // Find the current version from the non_aliasing_array_version_map_.
+ uint16_t memory_version = start_version;
+ auto it = non_aliasing_array_version_map_.find(start_version);
+ if (it != non_aliasing_array_version_map_.end()) {
+ memory_version = it->second;
+ } else {
+ // Just use the start_version.
+ }
+ res = LookupValue(kNonAliasingArrayOp, array, index, memory_version);
+ } else {
+ // Get the memory version of aliased array accesses of this type.
+ uint16_t memory_version = LookupValue(kAliasingArrayMemoryVersionOp, global_memory_version_,
+ aliasing_array_version_[type], kNoValue);
+ res = LookupValue(kAliasingArrayOp, array, index, memory_version);
+ }
+ if (opcode == Instruction::AGET_WIDE) {
+ SetOperandValueWide(mir->ssa_rep->defs[0], res);
+ } else {
+ SetOperandValue(mir->ssa_rep->defs[0], res);
+ }
+ return res;
+}
+
+void LocalValueNumbering::HandleAPut(MIR* mir, uint16_t opcode) {
+ int array_idx = (opcode == Instruction::APUT_WIDE) ? 2 : 1;
+ int index_idx = array_idx + 1;
+ uint16_t array = GetOperandValue(mir->ssa_rep->uses[array_idx]);
+ HandleNullCheck(mir, array);
+ uint16_t index = GetOperandValue(mir->ssa_rep->uses[index_idx]);
+ HandleRangeCheck(mir, array, index);
+
+ uint16_t type = opcode - Instruction::APUT;
+ uint16_t value = (opcode == Instruction::APUT_WIDE)
+ ? GetOperandValueWide(mir->ssa_rep->uses[0])
+ : GetOperandValue(mir->ssa_rep->uses[0]);
+ if (IsNonAliasing(array)) {
+ // Get the start version that accounts for aliasing within the array (different index values).
+ uint16_t start_version = LookupValue(kNonAliasingArrayStartVersionOp, array, kNoValue, type);
+ auto it = non_aliasing_array_version_map_.find(start_version);
+ uint16_t memory_version = start_version;
+ if (it != non_aliasing_array_version_map_.end()) {
+ memory_version = it->second;
+ }
+ // We need to take 4 values (array, index, memory_version, value) into account for bumping
+ // the memory version but the key can take only 3. Merge array and index into a location.
+ uint16_t array_access_location = LookupValue(kArrayAccessLocOp, array, index, kNoValue);
+ // Bump the version, adding to the chain.
+ memory_version = LookupValue(kAliasingArrayBumpVersionOp, memory_version,
+ array_access_location, value);
+ non_aliasing_array_version_map_.Overwrite(start_version, memory_version);
+ StoreValue(kNonAliasingArrayOp, array, index, memory_version, value);
+ } else {
+ // Get the memory version based on global_memory_version_ and aliasing_array_version_[type].
+ uint16_t memory_version = LookupValue(kAliasingArrayMemoryVersionOp, global_memory_version_,
+ aliasing_array_version_[type], kNoValue);
+ if (HasValue(kAliasingArrayOp, array, index, memory_version, value)) {
+ // This APUT can be eliminated, it stores the same value that's already in the field.
+ // TODO: Eliminate the APUT.
+ return;
+ }
+ // We need to take 4 values (array, index, memory_version, value) into account for bumping
+ // the memory version but the key can take only 3. Merge array and index into a location.
+ uint16_t array_access_location = LookupValue(kArrayAccessLocOp, array, index, kNoValue);
+ // Bump the version, adding to the chain.
+ uint16_t bumped_version = LookupValue(kAliasingArrayBumpVersionOp, memory_version,
+ array_access_location, value);
+ aliasing_array_version_[type] = bumped_version;
+ memory_version = LookupValue(kAliasingArrayMemoryVersionOp, global_memory_version_,
+ bumped_version, kNoValue);
+ StoreValue(kAliasingArrayOp, array, index, memory_version, value);
+
+ // Clear escaped array refs for this type.
+ EscapedArrayKey array_key = { type, 0u };
+ auto it = escaped_array_refs_.lower_bound(array_key), end = escaped_array_refs_.end();
+ while (it != end && it->type == type) {
+ it = escaped_array_refs_.erase(it);
+ }
+ }
+}
+
+uint16_t LocalValueNumbering::HandleIGet(MIR* mir, uint16_t opcode) {
+ uint16_t base = GetOperandValue(mir->ssa_rep->uses[0]);
+ HandleNullCheck(mir, base);
+ const MirFieldInfo& field_info = cu_->mir_graph->GetIFieldLoweringInfo(mir);
+ uint16_t res;
+ if (!field_info.IsResolved() || field_info.IsVolatile()) {
+ // Volatile fields always get a new memory version; field id is irrelevant.
+ // Unresolved fields may be volatile, so handle them as such to be safe.
+ // Use result s_reg - will be unique.
+ res = LookupValue(kNoValue, mir->ssa_rep->defs[0], kNoValue, kNoValue);
+ } else {
+ uint16_t type = opcode - Instruction::IGET;
+ uint16_t field_id = GetFieldId(field_info);
+ if (IsNonAliasingIField(base, field_id, type)) {
+ res = LookupValue(kNonAliasingIFieldOp, base, field_id, type);
+ } else {
+ // Get the start version that accounts for aliasing with unresolved fields of the same type
+ // and make it unique for the field by including the field_id.
+ uint16_t start_version = LookupValue(kAliasingIFieldStartVersionOp, global_memory_version_,
+ unresolved_ifield_version_[type], field_id);
+ // Find the current version from the aliasing_ifield_version_map_.
+ uint16_t memory_version = start_version;
+ auto version_it = aliasing_ifield_version_map_.find(start_version);
+ if (version_it != aliasing_ifield_version_map_.end()) {
+ memory_version = version_it->second;
+ } else {
+ // Just use the start_version.
+ }
+ res = LookupValue(kAliasingIFieldOp, base, field_id, memory_version);
+ }
+ }
+ if (opcode == Instruction::IGET_WIDE) {
+ SetOperandValueWide(mir->ssa_rep->defs[0], res);
+ } else {
+ SetOperandValue(mir->ssa_rep->defs[0], res);
+ }
+ return res;
+}
+
+void LocalValueNumbering::HandleIPut(MIR* mir, uint16_t opcode) {
+ uint16_t type = opcode - Instruction::IPUT;
+ int base_reg = (opcode == Instruction::IPUT_WIDE) ? 2 : 1;
+ uint16_t base = GetOperandValue(mir->ssa_rep->uses[base_reg]);
+ HandleNullCheck(mir, base);
+ const MirFieldInfo& field_info = cu_->mir_graph->GetIFieldLoweringInfo(mir);
+ if (!field_info.IsResolved()) {
+ // Unresolved fields always alias with everything of the same type.
+ // Use mir->offset as modifier; without elaborate inlining, it will be unique.
+ unresolved_ifield_version_[type] =
+ LookupValue(kUnresolvedIFieldOp, kNoValue, kNoValue, mir->offset);
+
+ // Treat fields of escaped references of the same type as potentially modified.
+ NonAliasingIFieldKey key = { type, 0u, 0u }; // lowest possible key of this type.
+ auto it = non_aliasing_ifields_.lower_bound(key), end = non_aliasing_ifields_.end();
+ while (it != end && it->type == type) {
+ it = non_aliasing_ifields_.erase(it);
+ }
+ } else if (field_info.IsVolatile()) {
+ // Nothing to do, resolved volatile fields always get a new memory version anyway and
+ // can't alias with resolved non-volatile fields.
+ } else {
+ uint16_t field_id = GetFieldId(field_info);
+ uint16_t value = (opcode == Instruction::IPUT_WIDE)
+ ? GetOperandValueWide(mir->ssa_rep->uses[0])
+ : GetOperandValue(mir->ssa_rep->uses[0]);
+ if (IsNonAliasing(base)) {
+ StoreValue(kNonAliasingIFieldOp, base, field_id, type, value);
+ } else {
+ // Get the start version that accounts for aliasing with unresolved fields of the same type
+ // and make it unique for the field by including the field_id.
+ uint16_t start_version = LookupValue(kAliasingIFieldStartVersionOp, global_memory_version_,
+ unresolved_ifield_version_[type], field_id);
+ // Find the old version from the aliasing_ifield_version_map_.
+ uint16_t old_version = start_version;
+ auto version_it = aliasing_ifield_version_map_.find(start_version);
+ if (version_it != aliasing_ifield_version_map_.end()) {
+ old_version = version_it->second;
+ }
+ // Check if the field currently contains the value, making this a NOP.
+ if (HasValue(kAliasingIFieldOp, base, field_id, old_version, value)) {
+ // This IPUT can be eliminated, it stores the same value that's already in the field.
+ // TODO: Eliminate the IPUT.
+ return;
+ }
+ // Bump the version, adding to the chain started by start_version.
+ uint16_t memory_version = LookupValue(kAliasingIFieldBumpVersionOp, old_version, base, value);
+ // Update the aliasing_ifield_version_map_ so that HandleIGet() can get the memory_version
+ // without knowing the values used to build the chain.
+ aliasing_ifield_version_map_.Overwrite(start_version, memory_version);
+ StoreValue(kAliasingIFieldOp, base, field_id, memory_version, value);
+
+ // Clear non-aliasing fields for this field_id.
+ NonAliasingIFieldKey field_key = { type, field_id, 0u };
+ auto it = non_aliasing_ifields_.lower_bound(field_key), end = non_aliasing_ifields_.end();
+ while (it != end && it->field_id == field_id) {
+ DCHECK_EQ(type, it->type);
+ it = non_aliasing_ifields_.erase(it);
+ }
+ }
+ }
+}
+
+uint16_t LocalValueNumbering::HandleSGet(MIR* mir, uint16_t opcode) {
+ const MirFieldInfo& field_info = cu_->mir_graph->GetSFieldLoweringInfo(mir);
+ uint16_t res;
+ if (!field_info.IsResolved() || field_info.IsVolatile()) {
+ // Volatile fields always get a new memory version; field id is irrelevant.
+ // Unresolved fields may be volatile, so handle them as such to be safe.
+ // Use result s_reg - will be unique.
+ res = LookupValue(kNoValue, mir->ssa_rep->defs[0], kNoValue, kNoValue);
+ } else {
+ uint16_t field_id = GetFieldId(field_info);
+ // Resolved non-volatile static fields can alias with non-resolved fields of the same type,
+ // so we need to use unresolved_sfield_version_[type] in addition to global_memory_version_
+ // to determine the version of the field.
+ uint16_t type = opcode - Instruction::SGET;
+ res = LookupValue(kResolvedSFieldOp, field_id,
+ unresolved_sfield_version_[type], global_memory_version_);
+ }
+ if (opcode == Instruction::SGET_WIDE) {
+ SetOperandValueWide(mir->ssa_rep->defs[0], res);
+ } else {
+ SetOperandValue(mir->ssa_rep->defs[0], res);
+ }
+ return res;
+}
+
+void LocalValueNumbering::HandleSPut(MIR* mir, uint16_t opcode) {
+ uint16_t type = opcode - Instruction::SPUT;
+ const MirFieldInfo& field_info = cu_->mir_graph->GetSFieldLoweringInfo(mir);
+ if (!field_info.IsResolved()) {
+ // Unresolved fields always alias with everything of the same type.
+ // Use mir->offset as modifier; without elaborate inlining, it will be unique.
+ unresolved_sfield_version_[type] =
+ LookupValue(kUnresolvedSFieldOp, kNoValue, kNoValue, mir->offset);
+ } else if (field_info.IsVolatile()) {
+ // Nothing to do, resolved volatile fields always get a new memory version anyway and
+ // can't alias with resolved non-volatile fields.
+ } else {
+ uint16_t field_id = GetFieldId(field_info);
+ uint16_t value = (opcode == Instruction::SPUT_WIDE)
+ ? GetOperandValueWide(mir->ssa_rep->uses[0])
+ : GetOperandValue(mir->ssa_rep->uses[0]);
+ // Resolved non-volatile static fields can alias with non-resolved fields of the same type,
+ // so we need to use unresolved_sfield_version_[type] in addition to global_memory_version_
+ // to determine the version of the field.
+ uint16_t type = opcode - Instruction::SGET;
+ StoreValue(kResolvedSFieldOp, field_id,
+ unresolved_sfield_version_[type], global_memory_version_, value);
+ }
}
uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
- uint16_t res = NO_VALUE;
+ uint16_t res = kNoValue;
uint16_t opcode = mir->dalvikInsn.opcode;
switch (opcode) {
case Instruction::NOP:
@@ -176,9 +449,14 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
// Nothing defined but the result will be unique and non-null.
if (mir->next != nullptr && mir->next->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
MarkNonAliasingNonNull(mir->next);
+ // TUNING: We could track value names stored in the array.
// The MOVE_RESULT_OBJECT will be processed next and we'll return the value name then.
}
- MakeArgsAliasing(mir);
+ // All args escaped (if references).
+ for (size_t i = 0u, count = mir->ssa_rep->num_uses; i != count; ++i) {
+ uint16_t reg = GetOperandValue(mir->ssa_rep->uses[i]);
+ HandleEscapingRef(reg);
+ }
break;
case Instruction::INVOKE_DIRECT:
@@ -197,8 +475,17 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::INVOKE_STATIC:
case Instruction::INVOKE_STATIC_RANGE:
if ((mir->optimization_flags & MIR_INLINED) == 0) {
- AdvanceGlobalMemory();
- MakeArgsAliasing(mir);
+ // Use mir->offset as modifier; without elaborate inlining, it will be unique.
+ global_memory_version_ = LookupValue(kInvokeMemoryVersionBumpOp, 0u, 0u, mir->offset);
+ // Make ref args aliasing.
+ for (size_t i = 0u, count = mir->ssa_rep->num_uses; i != count; ++i) {
+ uint16_t reg = GetOperandValue(mir->ssa_rep->uses[i]);
+ non_aliasing_refs_.erase(reg);
+ }
+ // All fields of escaped references need to be treated as potentially modified.
+ non_aliasing_ifields_.clear();
+ // Array elements may also have been modified via escaped array refs.
+ escaped_array_refs_.clear();
}
break;
@@ -211,13 +498,24 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
break;
case Instruction::MOVE_EXCEPTION:
case Instruction::NEW_INSTANCE:
- case Instruction::CONST_STRING:
- case Instruction::CONST_STRING_JUMBO:
case Instruction::CONST_CLASS:
case Instruction::NEW_ARRAY:
// 1 result, treat as unique each time, use result s_reg - will be unique.
res = MarkNonAliasingNonNull(mir);
break;
+ case Instruction::CONST_STRING:
+ case Instruction::CONST_STRING_JUMBO:
+ // These strings are internalized, so assign value based on the string pool index.
+ res = LookupValue(Instruction::CONST_STRING, Low16Bits(mir->dalvikInsn.vB),
+ High16Bits(mir->dalvikInsn.vB), 0);
+ SetOperandValue(mir->ssa_rep->defs[0], res);
+ null_checked_.insert(res); // May already be there.
+ // NOTE: Hacking the contents of an internalized string via reflection is possible
+ // but the behavior is undefined. Therefore, we consider the string constant and
+ // the reference non-aliasing.
+ // TUNING: We could keep this property even if the reference "escapes".
+ non_aliasing_refs_.insert(res); // May already be there.
+ break;
case Instruction::MOVE_RESULT_WIDE:
// 1 wide result, treat as unique each time, use result s_reg - will be unique.
res = GetOperandValueWide(mir->ssa_rep->defs[0]);
@@ -255,7 +553,7 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::CONST_4:
case Instruction::CONST_16:
res = LookupValue(Instruction::CONST, Low16Bits(mir->dalvikInsn.vB),
- High16Bits(mir->dalvikInsn.vB >> 16), 0);
+ High16Bits(mir->dalvikInsn.vB), 0);
SetOperandValue(mir->ssa_rep->defs[0], res);
break;
@@ -310,7 +608,7 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::FLOAT_TO_INT: {
// res = op + 1 operand
uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
- res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
+ res = LookupValue(opcode, operand1, kNoValue, kNoValue);
SetOperandValue(mir->ssa_rep->defs[0], res);
}
break;
@@ -320,8 +618,8 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::DOUBLE_TO_FLOAT:
case Instruction::DOUBLE_TO_INT: {
// res = op + 1 wide operand
- uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
- res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
+ uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
+ res = LookupValue(opcode, operand1, kNoValue, kNoValue);
SetOperandValue(mir->ssa_rep->defs[0], res);
}
break;
@@ -334,7 +632,7 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::NEG_DOUBLE: {
// wide res = op + 1 wide operand
uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
- res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
+ res = LookupValue(opcode, operand1, kNoValue, kNoValue);
SetOperandValueWide(mir->ssa_rep->defs[0], res);
}
break;
@@ -344,8 +642,8 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::INT_TO_DOUBLE:
case Instruction::INT_TO_LONG: {
// wide res = op + 1 operand
- uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
- res = LookupValue(opcode, operand1, NO_VALUE, NO_VALUE);
+ uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
+ res = LookupValue(opcode, operand1, kNoValue, kNoValue);
SetOperandValueWide(mir->ssa_rep->defs[0], res);
}
break;
@@ -356,7 +654,7 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
// res = op + 2 wide operands
uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
uint16_t operand2 = GetOperandValueWide(mir->ssa_rep->uses[2]);
- res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+ res = LookupValue(opcode, operand1, operand2, kNoValue);
SetOperandValue(mir->ssa_rep->defs[0], res);
}
break;
@@ -388,7 +686,7 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
// res = op + 2 operands
uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
uint16_t operand2 = GetOperandValue(mir->ssa_rep->uses[1]);
- res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+ res = LookupValue(opcode, operand1, operand2, kNoValue);
SetOperandValue(mir->ssa_rep->defs[0], res);
}
break;
@@ -422,7 +720,7 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
// wide res = op + 2 wide operands
uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
uint16_t operand2 = GetOperandValueWide(mir->ssa_rep->uses[2]);
- res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+ res = LookupValue(opcode, operand1, operand2, kNoValue);
SetOperandValueWide(mir->ssa_rep->defs[0], res);
}
break;
@@ -435,8 +733,8 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::USHR_LONG_2ADDR: {
// wide res = op + 1 wide operand + 1 operand
uint16_t operand1 = GetOperandValueWide(mir->ssa_rep->uses[0]);
- uint16_t operand2 = GetOperandValueWide(mir->ssa_rep->uses[2]);
- res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+ uint16_t operand2 = GetOperandValue(mir->ssa_rep->uses[2]);
+ res = LookupValue(opcode, operand1, operand2, kNoValue);
SetOperandValueWide(mir->ssa_rep->defs[0], res);
}
break;
@@ -454,7 +752,7 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
// res = op + 2 operands
uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
uint16_t operand2 = GetOperandValue(mir->ssa_rep->uses[1]);
- res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+ res = LookupValue(opcode, operand1, operand2, kNoValue);
SetOperandValue(mir->ssa_rep->defs[0], res);
}
break;
@@ -481,7 +779,7 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
// Same as res = op + 2 operands, except use vC as operand 2
uint16_t operand1 = GetOperandValue(mir->ssa_rep->uses[0]);
uint16_t operand2 = LookupValue(Instruction::CONST, mir->dalvikInsn.vC, 0, 0);
- res = LookupValue(opcode, operand1, operand2, NO_VALUE);
+ res = LookupValue(opcode, operand1, operand2, kNoValue);
SetOperandValue(mir->ssa_rep->defs[0], res);
}
break;
@@ -492,21 +790,8 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::AGET_BOOLEAN:
case Instruction::AGET_BYTE:
case Instruction::AGET_CHAR:
- case Instruction::AGET_SHORT: {
- uint16_t type = opcode - Instruction::AGET;
- uint16_t array = GetOperandValue(mir->ssa_rep->uses[0]);
- HandleNullCheck(mir, array);
- uint16_t index = GetOperandValue(mir->ssa_rep->uses[1]);
- HandleRangeCheck(mir, array, index);
- // Establish value number for loaded register. Note use of memory version.
- uint16_t memory_version = GetMemoryVersion(array, NO_VALUE, type);
- uint16_t res = LookupValue(ARRAY_REF, array, index, memory_version);
- if (opcode == Instruction::AGET_WIDE) {
- SetOperandValueWide(mir->ssa_rep->defs[0], res);
- } else {
- SetOperandValue(mir->ssa_rep->defs[0], res);
- }
- }
+ case Instruction::AGET_SHORT:
+ res = HandleAGet(mir, opcode);
break;
case Instruction::APUT_OBJECT:
@@ -517,17 +802,8 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::APUT_BYTE:
case Instruction::APUT_BOOLEAN:
case Instruction::APUT_SHORT:
- case Instruction::APUT_CHAR: {
- uint16_t type = opcode - Instruction::APUT;
- int array_idx = (opcode == Instruction::APUT_WIDE) ? 2 : 1;
- int index_idx = array_idx + 1;
- uint16_t array = GetOperandValue(mir->ssa_rep->uses[array_idx]);
- HandleNullCheck(mir, array);
- uint16_t index = GetOperandValue(mir->ssa_rep->uses[index_idx]);
- HandleRangeCheck(mir, array, index);
- // Rev the memory version
- AdvanceMemoryVersion(array, NO_VALUE, type);
- }
+ case Instruction::APUT_CHAR:
+ HandleAPut(mir, opcode);
break;
case Instruction::IGET_OBJECT:
@@ -536,33 +812,8 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::IGET_BOOLEAN:
case Instruction::IGET_BYTE:
case Instruction::IGET_CHAR:
- case Instruction::IGET_SHORT: {
- uint16_t type = opcode - Instruction::IGET;
- uint16_t base = GetOperandValue(mir->ssa_rep->uses[0]);
- HandleNullCheck(mir, base);
- const MirFieldInfo& field_info = cu_->mir_graph->GetIFieldLoweringInfo(mir);
- uint16_t memory_version;
- uint16_t field_id;
- if (!field_info.IsResolved() || field_info.IsVolatile()) {
- // Volatile fields always get a new memory version; field id is irrelevant.
- // Unresolved fields may be volatile, so handle them as such to be safe.
- field_id = 0u;
- memory_version = next_memory_version_;
- ++next_memory_version_;
- } else {
- DCHECK(field_info.IsResolved());
- field_id = GetFieldId(field_info.DeclaringDexFile(), field_info.DeclaringFieldIndex());
- memory_version = std::max(unresolved_ifield_version_[type],
- GetMemoryVersion(base, field_id, type));
- }
- if (opcode == Instruction::IGET_WIDE) {
- res = LookupValue(Instruction::IGET_WIDE, base, field_id, memory_version);
- SetOperandValueWide(mir->ssa_rep->defs[0], res);
- } else {
- res = LookupValue(Instruction::IGET, base, field_id, memory_version);
- SetOperandValue(mir->ssa_rep->defs[0], res);
- }
- }
+ case Instruction::IGET_SHORT:
+ res = HandleIGet(mir, opcode);
break;
case Instruction::IPUT_OBJECT:
@@ -573,24 +824,8 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::IPUT_BOOLEAN:
case Instruction::IPUT_BYTE:
case Instruction::IPUT_CHAR:
- case Instruction::IPUT_SHORT: {
- uint16_t type = opcode - Instruction::IPUT;
- int base_reg = (opcode == Instruction::IPUT_WIDE) ? 2 : 1;
- uint16_t base = GetOperandValue(mir->ssa_rep->uses[base_reg]);
- HandleNullCheck(mir, base);
- const MirFieldInfo& field_info = cu_->mir_graph->GetIFieldLoweringInfo(mir);
- if (!field_info.IsResolved()) {
- // Unresolved fields always alias with everything of the same type.
- unresolved_ifield_version_[type] = next_memory_version_;
- ++next_memory_version_;
- } else if (field_info.IsVolatile()) {
- // Nothing to do, resolved volatile fields always get a new memory version anyway and
- // can't alias with resolved non-volatile fields.
- } else {
- AdvanceMemoryVersion(base, GetFieldId(field_info.DeclaringDexFile(),
- field_info.DeclaringFieldIndex()), type);
- }
- }
+ case Instruction::IPUT_SHORT:
+ HandleIPut(mir, opcode);
break;
case Instruction::SGET_OBJECT:
@@ -599,31 +834,8 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::SGET_BOOLEAN:
case Instruction::SGET_BYTE:
case Instruction::SGET_CHAR:
- case Instruction::SGET_SHORT: {
- uint16_t type = opcode - Instruction::SGET;
- const MirFieldInfo& field_info = cu_->mir_graph->GetSFieldLoweringInfo(mir);
- uint16_t memory_version;
- uint16_t field_id;
- if (!field_info.IsResolved() || field_info.IsVolatile()) {
- // Volatile fields always get a new memory version; field id is irrelevant.
- // Unresolved fields may be volatile, so handle them as such to be safe.
- field_id = 0u;
- memory_version = next_memory_version_;
- ++next_memory_version_;
- } else {
- DCHECK(field_info.IsResolved());
- field_id = GetFieldId(field_info.DeclaringDexFile(), field_info.DeclaringFieldIndex());
- memory_version = std::max(unresolved_sfield_version_[type],
- GetMemoryVersion(NO_VALUE, field_id, type));
- }
- if (opcode == Instruction::SGET_WIDE) {
- res = LookupValue(Instruction::SGET_WIDE, NO_VALUE, field_id, memory_version);
- SetOperandValueWide(mir->ssa_rep->defs[0], res);
- } else {
- res = LookupValue(Instruction::SGET, NO_VALUE, field_id, memory_version);
- SetOperandValue(mir->ssa_rep->defs[0], res);
- }
- }
+ case Instruction::SGET_SHORT:
+ res = HandleSGet(mir, opcode);
break;
case Instruction::SPUT_OBJECT:
@@ -634,21 +846,8 @@ uint16_t LocalValueNumbering::GetValueNumber(MIR* mir) {
case Instruction::SPUT_BOOLEAN:
case Instruction::SPUT_BYTE:
case Instruction::SPUT_CHAR:
- case Instruction::SPUT_SHORT: {
- uint16_t type = opcode - Instruction::SPUT;
- const MirFieldInfo& field_info = cu_->mir_graph->GetSFieldLoweringInfo(mir);
- if (!field_info.IsResolved()) {
- // Unresolved fields always alias with everything of the same type.
- unresolved_sfield_version_[type] = next_memory_version_;
- ++next_memory_version_;
- } else if (field_info.IsVolatile()) {
- // Nothing to do, resolved volatile fields always get a new memory version anyway and
- // can't alias with resolved non-volatile fields.
- } else {
- AdvanceMemoryVersion(NO_VALUE, GetFieldId(field_info.DeclaringDexFile(),
- field_info.DeclaringFieldIndex()), type);
- }
- }
+ case Instruction::SPUT_SHORT:
+ HandleSPut(mir, opcode);
break;
}
return res;
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index 0c2b6a7e01..2a815be1cc 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -23,15 +23,33 @@
#include "utils/scoped_arena_allocator.h"
#include "utils/scoped_arena_containers.h"
-#define NO_VALUE 0xffff
-#define ARRAY_REF 0xfffe
-
namespace art {
class DexFile;
+class MirFieldInfo;
class LocalValueNumbering {
+ public:
+ LocalValueNumbering(CompilationUnit* cu, ScopedArenaAllocator* allocator);
+
+ uint16_t GetValueNumber(MIR* mir);
+
+ // LocalValueNumbering should be allocated on the ArenaStack (or the native stack).
+ static void* operator new(size_t size, ScopedArenaAllocator* allocator) {
+ return allocator->Alloc(sizeof(LocalValueNumbering), kArenaAllocMIR);
+ }
+
+ // Allow delete-expression to destroy a LocalValueNumbering object without deallocation.
+ static void operator delete(void* ptr) { UNUSED(ptr); }
+
+ // Checks that the value names didn't overflow.
+ bool Good() const {
+ return last_value_ < kNoValue;
+ }
+
private:
+ static constexpr uint16_t kNoValue = 0xffffu;
+
// Field types correspond to the ordering of GET/PUT instructions; this order is the same
// for IGET, IPUT, SGET, SPUT, AGET and APUT:
// op 0
@@ -43,7 +61,7 @@ class LocalValueNumbering {
// op_SHORT 6
static constexpr size_t kFieldTypeCount = 7;
- // FieldReference represents either a unique resolved field or all unresolved fields together.
+ // FieldReference represents a unique resolved field.
struct FieldReference {
const DexFile* dex_file;
uint16_t field_idx;
@@ -58,48 +76,107 @@ class LocalValueNumbering {
}
};
- struct MemoryVersionKey {
+ // Maps field key to field id for resolved fields.
+ typedef ScopedArenaSafeMap<FieldReference, uint32_t, FieldReferenceComparator> FieldIndexMap;
+
+ struct RangeCheckKey {
+ uint16_t array;
+ uint16_t index;
+ };
+
+ struct RangeCheckKeyComparator {
+ bool operator()(const RangeCheckKey& lhs, const RangeCheckKey& rhs) const {
+ if (lhs.array != rhs.array) {
+ return lhs.array < rhs.array;
+ }
+ return lhs.index < rhs.index;
+ }
+ };
+
+ typedef ScopedArenaSet<RangeCheckKey, RangeCheckKeyComparator> RangeCheckSet;
+
+ typedef ScopedArenaSafeMap<uint16_t, uint16_t> AliasingIFieldVersionMap;
+ typedef ScopedArenaSafeMap<uint16_t, uint16_t> NonAliasingArrayVersionMap;
+
+ struct NonAliasingIFieldKey {
uint16_t base;
uint16_t field_id;
uint16_t type;
};
- struct MemoryVersionKeyComparator {
- bool operator()(const MemoryVersionKey& lhs, const MemoryVersionKey& rhs) const {
- if (lhs.base != rhs.base) {
- return lhs.base < rhs.base;
+ struct NonAliasingIFieldKeyComparator {
+ bool operator()(const NonAliasingIFieldKey& lhs, const NonAliasingIFieldKey& rhs) const {
+ // Compare the type first. This allows iterating across all the entries for a certain type
+ // as needed when we need to purge them for an unresolved field IPUT.
+ if (lhs.type != rhs.type) {
+ return lhs.type < rhs.type;
}
+ // Compare the field second. This allows iterating across all the entries for a certain
+ // field as needed when we need to purge them for an aliasing field IPUT.
if (lhs.field_id != rhs.field_id) {
return lhs.field_id < rhs.field_id;
}
- return lhs.type < rhs.type;
+ // Compare the base last.
+ return lhs.base < rhs.base;
}
};
+ // Set of instance fields still holding non-aliased values after the base has been stored.
+ typedef ScopedArenaSet<NonAliasingIFieldKey, NonAliasingIFieldKeyComparator> NonAliasingFieldSet;
+
+ struct EscapedArrayKey {
+ uint16_t base;
+ uint16_t type;
+ };
+
+ struct EscapedArrayKeyComparator {
+ bool operator()(const EscapedArrayKey& lhs, const EscapedArrayKey& rhs) const {
+ // Compare the type first. This allows iterating across all the entries for a certain type
+ // as needed when we need to purge them for an unresolved field APUT.
+ if (lhs.type != rhs.type) {
+ return lhs.type < rhs.type;
+ }
+ // Compare the base last.
+ return lhs.base < rhs.base;
+ }
+ };
+
+ // Set of previously non-aliasing array refs that escaped.
+ typedef ScopedArenaSet<EscapedArrayKey, EscapedArrayKeyComparator> EscapedArraySet;
+
// Key is s_reg, value is value name.
typedef ScopedArenaSafeMap<uint16_t, uint16_t> SregValueMap;
// Key is concatenation of opcode, operand1, operand2 and modifier, value is value name.
typedef ScopedArenaSafeMap<uint64_t, uint16_t> ValueMap;
// Key represents a memory address, value is generation.
- typedef ScopedArenaSafeMap<MemoryVersionKey, uint16_t, MemoryVersionKeyComparator
- > MemoryVersionMap;
- // Maps field key to field id for resolved fields.
- typedef ScopedArenaSafeMap<FieldReference, uint32_t, FieldReferenceComparator> FieldIndexMap;
// A set of value names.
typedef ScopedArenaSet<uint16_t> ValueNameSet;
- public:
- static LocalValueNumbering* Create(CompilationUnit* cu) {
- std::unique_ptr<ScopedArenaAllocator> allocator(ScopedArenaAllocator::Create(&cu->arena_stack));
- void* addr = allocator->Alloc(sizeof(LocalValueNumbering), kArenaAllocMisc);
- return new(addr) LocalValueNumbering(cu, allocator.release());
- }
-
static uint64_t BuildKey(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier) {
return (static_cast<uint64_t>(op) << 48 | static_cast<uint64_t>(operand1) << 32 |
static_cast<uint64_t>(operand2) << 16 | static_cast<uint64_t>(modifier));
};
+ static uint16_t ExtractOp(uint64_t key) {
+ return static_cast<uint16_t>(key >> 48);
+ }
+
+ static uint16_t ExtractOperand1(uint64_t key) {
+ return static_cast<uint16_t>(key >> 32);
+ }
+
+ static uint16_t ExtractOperand2(uint64_t key) {
+ return static_cast<uint16_t>(key >> 16);
+ }
+
+ static uint16_t ExtractModifier(uint64_t key) {
+ return static_cast<uint16_t>(key);
+ }
+
+ static bool EqualOpAndOperand1(uint64_t key1, uint64_t key2) {
+ return static_cast<uint32_t>(key1 >> 32) == static_cast<uint32_t>(key2 >> 32);
+ }
+
uint16_t LookupValue(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier) {
uint16_t res;
uint64_t key = BuildKey(op, operand1, operand2, modifier);
@@ -107,12 +184,26 @@ class LocalValueNumbering {
if (it != value_map_.end()) {
res = it->second;
} else {
- res = value_map_.size() + 1;
+ ++last_value_;
+ res = last_value_;
value_map_.Put(key, res);
}
return res;
};
+ void StoreValue(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier,
+ uint16_t value) {
+ uint64_t key = BuildKey(op, operand1, operand2, modifier);
+ value_map_.Overwrite(key, value);
+ }
+
+ bool HasValue(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier,
+ uint16_t value) const {
+ uint64_t key = BuildKey(op, operand1, operand2, modifier);
+ ValueMap::const_iterator it = value_map_.find(key);
+ return (it != value_map_.end() && it->second == value);
+ };
+
bool ValueExists(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier) const {
uint64_t key = BuildKey(op, operand1, operand2, modifier);
ValueMap::const_iterator it = value_map_.find(key);
@@ -129,13 +220,13 @@ class LocalValueNumbering {
};
uint16_t GetOperandValue(int s_reg) {
- uint16_t res = NO_VALUE;
+ uint16_t res = kNoValue;
SregValueMap::iterator it = sreg_value_map_.find(s_reg);
if (it != sreg_value_map_.end()) {
res = it->second;
} else {
// First use
- res = LookupValue(NO_VALUE, s_reg, NO_VALUE, NO_VALUE);
+ res = LookupValue(kNoValue, s_reg, kNoValue, kNoValue);
sreg_value_map_.Put(s_reg, res);
}
return res;
@@ -151,63 +242,61 @@ class LocalValueNumbering {
};
uint16_t GetOperandValueWide(int s_reg) {
- uint16_t res = NO_VALUE;
+ uint16_t res = kNoValue;
SregValueMap::iterator it = sreg_wide_value_map_.find(s_reg);
if (it != sreg_wide_value_map_.end()) {
res = it->second;
} else {
// First use
- res = LookupValue(NO_VALUE, s_reg, NO_VALUE, NO_VALUE);
+ res = LookupValue(kNoValue, s_reg, kNoValue, kNoValue);
sreg_wide_value_map_.Put(s_reg, res);
}
return res;
};
- uint16_t GetValueNumber(MIR* mir);
-
- // Allow delete-expression to destroy a LocalValueNumbering object without deallocation.
- static void operator delete(void* ptr) { UNUSED(ptr); }
-
- private:
- LocalValueNumbering(CompilationUnit* cu, ScopedArenaAllocator* allocator)
- : cu_(cu),
- allocator_(allocator),
- sreg_value_map_(std::less<uint16_t>(), allocator->Adapter()),
- sreg_wide_value_map_(std::less<uint16_t>(), allocator->Adapter()),
- value_map_(std::less<uint64_t>(), allocator->Adapter()),
- next_memory_version_(1u),
- global_memory_version_(0u),
- memory_version_map_(MemoryVersionKeyComparator(), allocator->Adapter()),
- field_index_map_(FieldReferenceComparator(), allocator->Adapter()),
- non_aliasing_refs_(std::less<uint16_t>(), allocator->Adapter()),
- null_checked_(std::less<uint16_t>(), allocator->Adapter()) {
- std::fill_n(unresolved_sfield_version_, kFieldTypeCount, 0u);
- std::fill_n(unresolved_ifield_version_, kFieldTypeCount, 0u);
- }
-
- uint16_t GetFieldId(const DexFile* dex_file, uint16_t field_idx);
- void AdvanceGlobalMemory();
- uint16_t GetMemoryVersion(uint16_t base, uint16_t field, uint16_t type);
- uint16_t AdvanceMemoryVersion(uint16_t base, uint16_t field, uint16_t type);
+ uint16_t GetFieldId(const MirFieldInfo& field_info);
uint16_t MarkNonAliasingNonNull(MIR* mir);
- void MakeArgsAliasing(MIR* mir);
+ bool IsNonAliasing(uint16_t reg);
+ bool IsNonAliasingIField(uint16_t reg, uint16_t field_id, uint16_t type);
+ bool IsNonAliasingArray(uint16_t reg, uint16_t type);
void HandleNullCheck(MIR* mir, uint16_t reg);
void HandleRangeCheck(MIR* mir, uint16_t array, uint16_t index);
void HandlePutObject(MIR* mir);
+ void HandleEscapingRef(uint16_t base);
+ uint16_t HandleAGet(MIR* mir, uint16_t opcode);
+ void HandleAPut(MIR* mir, uint16_t opcode);
+ uint16_t HandleIGet(MIR* mir, uint16_t opcode);
+ void HandleIPut(MIR* mir, uint16_t opcode);
+ uint16_t HandleSGet(MIR* mir, uint16_t opcode);
+ void HandleSPut(MIR* mir, uint16_t opcode);
CompilationUnit* const cu_;
- std::unique_ptr<ScopedArenaAllocator> allocator_;
+
+ // We have 32-bit last_value_ so that we can detect when we run out of value names, see Good().
+ // We usually don't check Good() until the end of LVN unless we're about to modify code.
+ uint32_t last_value_;
+
SregValueMap sreg_value_map_;
SregValueMap sreg_wide_value_map_;
ValueMap value_map_;
- uint16_t next_memory_version_;
+
+ // Data for dealing with memory clobbering and store/load aliasing.
uint16_t global_memory_version_;
uint16_t unresolved_sfield_version_[kFieldTypeCount];
uint16_t unresolved_ifield_version_[kFieldTypeCount];
- MemoryVersionMap memory_version_map_;
+ uint16_t aliasing_array_version_[kFieldTypeCount];
+ AliasingIFieldVersionMap aliasing_ifield_version_map_;
+ NonAliasingArrayVersionMap non_aliasing_array_version_map_;
FieldIndexMap field_index_map_;
// Value names of references to objects that cannot be reached through a different value name.
ValueNameSet non_aliasing_refs_;
+ // Instance fields still holding non-aliased values after the base has escaped.
+ NonAliasingFieldSet non_aliasing_ifields_;
+ // Previously non-aliasing array refs that escaped but can still be used for non-aliasing AGET.
+ EscapedArraySet escaped_array_refs_;
+
+ // Range check and null check elimination.
+ RangeCheckSet range_checked_;
ValueNameSet null_checked_;
DISALLOW_COPY_AND_ASSIGN(LocalValueNumbering);
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index e56e0160ca..efc4fc8a34 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -40,7 +40,7 @@ class LocalValueNumberingTest : public testing::Test {
struct MIRDef {
static constexpr size_t kMaxSsaDefs = 2;
- static constexpr size_t kMaxSsaUses = 3;
+ static constexpr size_t kMaxSsaUses = 4;
Instruction::Code opcode;
int64_t value;
@@ -55,6 +55,8 @@ class LocalValueNumberingTest : public testing::Test {
{ opcode, value, 0u, 0, { }, 1, { reg } }
#define DEF_CONST_WIDE(opcode, reg, value) \
{ opcode, value, 0u, 0, { }, 2, { reg, reg + 1 } }
+#define DEF_CONST_STRING(opcode, reg, index) \
+ { opcode, index, 0u, 0, { }, 1, { reg } }
#define DEF_IGET(opcode, reg, obj, field_info) \
{ opcode, 0u, field_info, 1, { obj }, 1, { reg } }
#define DEF_IGET_WIDE(opcode, reg, obj, field_info) \
@@ -71,6 +73,14 @@ class LocalValueNumberingTest : public testing::Test {
{ opcode, 0u, field_info, 1, { reg }, 0, { } }
#define DEF_SPUT_WIDE(opcode, reg, field_info) \
{ opcode, 0u, field_info, 2, { reg, reg + 1 }, 0, { } }
+#define DEF_AGET(opcode, reg, obj, idx) \
+ { opcode, 0u, 0u, 2, { obj, idx }, 1, { reg } }
+#define DEF_AGET_WIDE(opcode, reg, obj, idx) \
+ { opcode, 0u, 0u, 2, { obj, idx }, 2, { reg, reg + 1 } }
+#define DEF_APUT(opcode, reg, obj, idx) \
+ { opcode, 0u, 0u, 3, { reg, obj, idx }, 0, { } }
+#define DEF_APUT_WIDE(opcode, reg, obj, idx) \
+ { opcode, 0u, 0u, 4, { reg, reg + 1, obj, idx }, 0, { } }
#define DEF_INVOKE1(opcode, reg) \
{ opcode, 0u, 0u, 1, { reg }, 0, { } }
#define DEF_UNIQUE_REF(opcode, reg) \
@@ -163,6 +173,7 @@ class LocalValueNumberingTest : public testing::Test {
for (size_t i = 0; i != mir_count_; ++i) {
value_names_[i] = lvn_->GetValueNumber(&mirs_[i]);
}
+ EXPECT_TRUE(lvn_->Good());
}
LocalValueNumberingTest()
@@ -170,8 +181,11 @@ class LocalValueNumberingTest : public testing::Test {
cu_(&pool_),
mir_count_(0u),
mirs_(nullptr),
- lvn_(LocalValueNumbering::Create(&cu_)) {
+ allocator_(),
+ lvn_() {
cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
+ allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
+ lvn_.reset(new (allocator_.get()) LocalValueNumbering(&cu_, allocator_.get()));
}
ArenaPool pool_;
@@ -180,12 +194,13 @@ class LocalValueNumberingTest : public testing::Test {
MIR* mirs_;
std::vector<SSARepresentation> ssa_reps_;
std::vector<uint16_t> value_names_;
+ std::unique_ptr<ScopedArenaAllocator> allocator_;
std::unique_ptr<LocalValueNumbering> lvn_;
};
-TEST_F(LocalValueNumberingTest, TestIGetIGetInvokeIGet) {
+TEST_F(LocalValueNumberingTest, IGetIGetInvokeIGet) {
static const IFieldDef ifields[] = {
- { 1u, 1u, 1u, false }
+ { 1u, 1u, 1u, false },
};
static const MIRDef mirs[] = {
DEF_IGET(Instruction::IGET, 0u, 10u, 0u),
@@ -206,15 +221,15 @@ TEST_F(LocalValueNumberingTest, TestIGetIGetInvokeIGet) {
EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
}
-TEST_F(LocalValueNumberingTest, TestIGetIPutIGetIGetIGet) {
+TEST_F(LocalValueNumberingTest, IGetIPutIGetIGetIGet) {
static const IFieldDef ifields[] = {
{ 1u, 1u, 1u, false },
{ 2u, 1u, 2u, false },
};
static const MIRDef mirs[] = {
- DEF_IGET(Instruction::IGET, 0u, 10u, 0u),
- DEF_IPUT(Instruction::IPUT, 1u, 11u, 0u), // May alias.
- DEF_IGET(Instruction::IGET, 2u, 10u, 0u),
+ DEF_IGET(Instruction::IGET_OBJECT, 0u, 10u, 0u),
+ DEF_IPUT(Instruction::IPUT_OBJECT, 1u, 11u, 0u), // May alias.
+ DEF_IGET(Instruction::IGET_OBJECT, 2u, 10u, 0u),
DEF_IGET(Instruction::IGET, 3u, 0u, 1u),
DEF_IGET(Instruction::IGET, 4u, 2u, 1u),
};
@@ -232,7 +247,7 @@ TEST_F(LocalValueNumberingTest, TestIGetIPutIGetIGetIGet) {
EXPECT_EQ(mirs_[4].optimization_flags, 0u);
}
-TEST_F(LocalValueNumberingTest, TestUniquePreserve1) {
+TEST_F(LocalValueNumberingTest, UniquePreserve1) {
static const IFieldDef ifields[] = {
{ 1u, 1u, 1u, false },
};
@@ -253,7 +268,7 @@ TEST_F(LocalValueNumberingTest, TestUniquePreserve1) {
EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
}
-TEST_F(LocalValueNumberingTest, TestUniquePreserve2) {
+TEST_F(LocalValueNumberingTest, UniquePreserve2) {
static const IFieldDef ifields[] = {
{ 1u, 1u, 1u, false },
};
@@ -274,7 +289,7 @@ TEST_F(LocalValueNumberingTest, TestUniquePreserve2) {
EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
}
-TEST_F(LocalValueNumberingTest, TestUniquePreserveAndEscape) {
+TEST_F(LocalValueNumberingTest, UniquePreserveAndEscape) {
static const IFieldDef ifields[] = {
{ 1u, 1u, 1u, false },
};
@@ -298,7 +313,7 @@ TEST_F(LocalValueNumberingTest, TestUniquePreserveAndEscape) {
EXPECT_EQ(mirs_[5].optimization_flags, MIR_IGNORE_NULL_CHECK);
}
-TEST_F(LocalValueNumberingTest, TestVolatile) {
+TEST_F(LocalValueNumberingTest, Volatile) {
static const IFieldDef ifields[] = {
{ 1u, 1u, 1u, false },
{ 2u, 1u, 2u, true },
@@ -322,4 +337,264 @@ TEST_F(LocalValueNumberingTest, TestVolatile) {
EXPECT_EQ(mirs_[3].optimization_flags, 0u);
}
+TEST_F(LocalValueNumberingTest, UnresolvedIField) {
+ static const IFieldDef ifields[] = {
+ { 1u, 1u, 1u, false }, // Resolved field #1.
+ { 2u, 1u, 2u, false }, // Resolved field #2.
+ { 3u, 0u, 0u, false }, // Unresolved field.
+ };
+ static const MIRDef mirs[] = {
+ DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 20u),
+ DEF_IGET(Instruction::IGET, 1u, 20u, 0u), // Resolved field #1, unique object.
+ DEF_IGET(Instruction::IGET, 2u, 21u, 0u), // Resolved field #1.
+ DEF_IGET_WIDE(Instruction::IGET_WIDE, 3u, 21u, 1u), // Resolved field #2.
+ DEF_IGET(Instruction::IGET, 4u, 22u, 2u), // IGET doesn't clobber anything.
+ DEF_IGET(Instruction::IGET, 5u, 20u, 0u), // Resolved field #1, unique object.
+ DEF_IGET(Instruction::IGET, 6u, 21u, 0u), // Resolved field #1.
+ DEF_IGET_WIDE(Instruction::IGET_WIDE, 7u, 21u, 1u), // Resolved field #2.
+ DEF_IPUT(Instruction::IPUT, 8u, 22u, 2u), // IPUT clobbers field #1 (#2 if wide).
+ DEF_IGET(Instruction::IGET, 9u, 20u, 0u), // Resolved field #1, unique object.
+ DEF_IGET(Instruction::IGET, 10u, 21u, 0u), // Resolved field #1, new value name.
+ DEF_IGET_WIDE(Instruction::IGET_WIDE, 11u, 21u, 1u), // Resolved field #2.
+ };
+
+ PrepareIFields(ifields);
+ PrepareMIRs(mirs);
+ PerformLVN();
+ ASSERT_EQ(value_names_.size(), 12u);
+ EXPECT_EQ(value_names_[1], value_names_[5]);
+ EXPECT_EQ(value_names_[2], value_names_[6]);
+ EXPECT_EQ(value_names_[3], value_names_[7]);
+ EXPECT_EQ(value_names_[1], value_names_[9]);
+ EXPECT_NE(value_names_[2], value_names_[10]); // This aliased with unresolved IPUT.
+ EXPECT_EQ(value_names_[3], value_names_[11]);
+ EXPECT_EQ(mirs_[0].optimization_flags, 0u);
+ EXPECT_EQ(mirs_[1].optimization_flags, MIR_IGNORE_NULL_CHECK);
+ EXPECT_EQ(mirs_[2].optimization_flags, 0u);
+ EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK);
+ EXPECT_EQ(mirs_[4].optimization_flags, 0u);
+ for (size_t i = 5u; i != mir_count_; ++i) {
+ EXPECT_EQ(mirs_[i].optimization_flags, MIR_IGNORE_NULL_CHECK);
+ }
+}
+
+TEST_F(LocalValueNumberingTest, UnresolvedSField) {
+ static const SFieldDef sfields[] = {
+ { 1u, 1u, 1u, false }, // Resolved field #1.
+ { 2u, 1u, 2u, false }, // Resolved field #2.
+ { 3u, 0u, 0u, false }, // Unresolved field.
+ };
+ static const MIRDef mirs[] = {
+ DEF_SGET(Instruction::SGET, 0u, 0u), // Resolved field #1.
+ DEF_SGET_WIDE(Instruction::SGET_WIDE, 1u, 1u), // Resolved field #2.
+ DEF_SGET(Instruction::SGET, 2u, 2u), // SGET doesn't clobber anything.
+ DEF_SGET(Instruction::SGET, 3u, 0u), // Resolved field #1.
+ DEF_SGET_WIDE(Instruction::SGET_WIDE, 4u, 1u), // Resolved field #2.
+ DEF_SPUT(Instruction::SPUT, 5u, 2u), // SPUT clobbers field #1 (#2 is wide).
+ DEF_SGET(Instruction::SGET, 6u, 0u), // Resolved field #1.
+ DEF_SGET_WIDE(Instruction::SGET_WIDE, 7u, 1u), // Resolved field #2.
+ };
+
+ PrepareSFields(sfields);
+ PrepareMIRs(mirs);
+ PerformLVN();
+ ASSERT_EQ(value_names_.size(), 8u);
+ EXPECT_EQ(value_names_[0], value_names_[3]);
+ EXPECT_EQ(value_names_[1], value_names_[4]);
+ EXPECT_NE(value_names_[0], value_names_[6]); // This aliased with unresolved IPUT.
+ EXPECT_EQ(value_names_[1], value_names_[7]);
+ for (size_t i = 0u; i != mir_count_; ++i) {
+ EXPECT_EQ(mirs_[i].optimization_flags, 0u) << i;
+ }
+}
+
+TEST_F(LocalValueNumberingTest, ConstString) {
+ static const MIRDef mirs[] = {
+ DEF_CONST_STRING(Instruction::CONST_STRING, 0u, 0u),
+ DEF_CONST_STRING(Instruction::CONST_STRING, 1u, 0u),
+ DEF_CONST_STRING(Instruction::CONST_STRING, 2u, 2u),
+ DEF_CONST_STRING(Instruction::CONST_STRING, 3u, 0u),
+ DEF_INVOKE1(Instruction::INVOKE_DIRECT, 2u),
+ DEF_CONST_STRING(Instruction::CONST_STRING, 4u, 2u),
+ };
+
+ PrepareMIRs(mirs);
+ PerformLVN();
+ ASSERT_EQ(value_names_.size(), 6u);
+ EXPECT_EQ(value_names_[1], value_names_[0]);
+ EXPECT_NE(value_names_[2], value_names_[0]);
+ EXPECT_EQ(value_names_[3], value_names_[0]);
+ EXPECT_EQ(value_names_[5], value_names_[2]);
+}
+
+TEST_F(LocalValueNumberingTest, SameValueInDifferentMemoryLocations) {
+ static const IFieldDef ifields[] = {
+ { 1u, 1u, 1u, false },
+ { 2u, 1u, 2u, false },
+ };
+ static const SFieldDef sfields[] = {
+ { 3u, 1u, 3u, false },
+ };
+ static const MIRDef mirs[] = {
+ DEF_IGET(Instruction::IGET, 0u, 10u, 0u),
+ DEF_IPUT(Instruction::IPUT, 0u, 10u, 1u),
+ DEF_SPUT(Instruction::SPUT, 0u, 0u),
+ DEF_APUT(Instruction::APUT, 0u, 11u, 12u),
+ DEF_IGET(Instruction::IGET, 1u, 10u, 0u),
+ DEF_IGET(Instruction::IGET, 2u, 10u, 1u),
+ DEF_AGET(Instruction::AGET, 3u, 11u, 12u),
+ DEF_SGET(Instruction::SGET, 4u, 0u),
+ };
+
+ PrepareIFields(ifields);
+ PrepareSFields(sfields);
+ PrepareMIRs(mirs);
+ PerformLVN();
+ ASSERT_EQ(value_names_.size(), 8u);
+ EXPECT_EQ(value_names_[4], value_names_[0]);
+ EXPECT_EQ(value_names_[5], value_names_[0]);
+ EXPECT_EQ(value_names_[6], value_names_[0]);
+ EXPECT_EQ(value_names_[7], value_names_[0]);
+ EXPECT_EQ(mirs_[0].optimization_flags, 0u);
+ EXPECT_EQ(mirs_[1].optimization_flags, MIR_IGNORE_NULL_CHECK);
+ EXPECT_EQ(mirs_[2].optimization_flags, 0u);
+ EXPECT_EQ(mirs_[3].optimization_flags, 0u);
+ EXPECT_EQ(mirs_[4].optimization_flags, MIR_IGNORE_NULL_CHECK);
+ EXPECT_EQ(mirs_[5].optimization_flags, MIR_IGNORE_NULL_CHECK);
+ EXPECT_EQ(mirs_[6].optimization_flags, MIR_IGNORE_NULL_CHECK | MIR_IGNORE_RANGE_CHECK);
+ EXPECT_EQ(mirs_[7].optimization_flags, 0u);
+}
+
+TEST_F(LocalValueNumberingTest, UniqueArrayAliasing) {
+ static const MIRDef mirs[] = {
+ DEF_UNIQUE_REF(Instruction::NEW_ARRAY, 20u),
+ DEF_AGET(Instruction::AGET, 1u, 20u, 40u),
+ DEF_APUT(Instruction::APUT, 2u, 20u, 41u), // May alias with index for sreg 40u.
+ DEF_AGET(Instruction::AGET, 3u, 20u, 40u),
+ };
+
+ PrepareMIRs(mirs);
+ PerformLVN();
+ ASSERT_EQ(value_names_.size(), 4u);
+ EXPECT_NE(value_names_[1], value_names_[3]);
+ EXPECT_EQ(mirs_[0].optimization_flags, 0u);
+ EXPECT_EQ(mirs_[1].optimization_flags, MIR_IGNORE_NULL_CHECK);
+ EXPECT_EQ(mirs_[2].optimization_flags, MIR_IGNORE_NULL_CHECK);
+ EXPECT_EQ(mirs_[3].optimization_flags, MIR_IGNORE_NULL_CHECK | MIR_IGNORE_RANGE_CHECK);
+}
+
+TEST_F(LocalValueNumberingTest, EscapingRefs) {
+ static const IFieldDef ifields[] = {
+ { 1u, 1u, 1u, false }, // Field #1.
+ { 2u, 1u, 2u, false }, // Field #2.
+ { 3u, 1u, 3u, false }, // Reference field for storing escaping refs.
+ { 4u, 1u, 4u, false }, // Wide.
+ { 5u, 0u, 0u, false }, // Unresolved field, int.
+ { 6u, 0u, 0u, false }, // Unresolved field, wide.
+ };
+ static const MIRDef mirs[] = {
+ DEF_UNIQUE_REF(Instruction::NEW_INSTANCE, 20u),
+ DEF_IGET(Instruction::IGET, 1u, 20u, 0u),
+ DEF_IGET(Instruction::IGET, 2u, 20u, 1u),
+ DEF_IPUT(Instruction::IPUT_OBJECT, 20u, 30u, 2u), // Ref escapes.
+ DEF_IGET(Instruction::IGET, 4u, 20u, 0u),
+ DEF_IGET(Instruction::IGET, 5u, 20u, 1u),
+ DEF_IPUT(Instruction::IPUT, 6u, 31u, 0u), // May alias with field #1.
+ DEF_IGET(Instruction::IGET, 7u, 20u, 0u), // New value.
+ DEF_IGET(Instruction::IGET, 8u, 20u, 1u), // Still the same.
+ DEF_IPUT_WIDE(Instruction::IPUT_WIDE, 9u, 31u, 3u), // No aliasing, different type.
+ DEF_IGET(Instruction::IGET, 10u, 20u, 0u),
+ DEF_IGET(Instruction::IGET, 11u, 20u, 1u),
+ DEF_IPUT_WIDE(Instruction::IPUT_WIDE, 12u, 31u, 5u), // No aliasing, different type.
+ DEF_IGET(Instruction::IGET, 13u, 20u, 0u),
+ DEF_IGET(Instruction::IGET, 14u, 20u, 1u),
+ DEF_IPUT(Instruction::IPUT, 15u, 31u, 4u), // Aliasing, same type.
+ DEF_IGET(Instruction::IGET, 16u, 20u, 0u),
+ DEF_IGET(Instruction::IGET, 17u, 20u, 1u),
+ };
+
+ PrepareIFields(ifields);
+ PrepareMIRs(mirs);
+ PerformLVN();
+ ASSERT_EQ(value_names_.size(), 18u);
+ EXPECT_EQ(value_names_[1], value_names_[4]);
+ EXPECT_EQ(value_names_[2], value_names_[5]);
+ EXPECT_NE(value_names_[4], value_names_[7]); // New value.
+ EXPECT_EQ(value_names_[5], value_names_[8]);
+ EXPECT_EQ(value_names_[7], value_names_[10]);
+ EXPECT_EQ(value_names_[8], value_names_[11]);
+ EXPECT_EQ(value_names_[10], value_names_[13]);
+ EXPECT_EQ(value_names_[11], value_names_[14]);
+ EXPECT_NE(value_names_[13], value_names_[16]); // New value.
+ EXPECT_NE(value_names_[14], value_names_[17]); // New value.
+ for (size_t i = 0u; i != mir_count_; ++i) {
+ int expected = (i != 0u && i != 3u && i != 6u) ? MIR_IGNORE_NULL_CHECK : 0u;
+ EXPECT_EQ(expected, mirs_[i].optimization_flags) << i;
+ }
+}
+
+TEST_F(LocalValueNumberingTest, EscapingArrayRefs) {
+ static const MIRDef mirs[] = {
+ DEF_UNIQUE_REF(Instruction::NEW_ARRAY, 20u),
+ DEF_AGET(Instruction::AGET, 1u, 20u, 40u),
+ DEF_AGET(Instruction::AGET, 2u, 20u, 41u),
+ DEF_APUT(Instruction::APUT_OBJECT, 20u, 30u, 42u), // Array ref escapes.
+ DEF_AGET(Instruction::AGET, 4u, 20u, 40u),
+ DEF_AGET(Instruction::AGET, 5u, 20u, 41u),
+ DEF_APUT_WIDE(Instruction::APUT_WIDE, 6u, 31u, 43u), // No aliasing, different type.
+ DEF_AGET(Instruction::AGET, 7u, 20u, 40u),
+ DEF_AGET(Instruction::AGET, 8u, 20u, 41u),
+ DEF_APUT(Instruction::APUT, 9u, 32u, 40u), // May alias with all elements.
+ DEF_AGET(Instruction::AGET, 10u, 20u, 40u), // New value (same index name).
+ DEF_AGET(Instruction::AGET, 11u, 20u, 41u), // New value (different index name).
+ };
+
+ PrepareMIRs(mirs);
+ PerformLVN();
+ ASSERT_EQ(value_names_.size(), 12u);
+ EXPECT_EQ(value_names_[1], value_names_[4]);
+ EXPECT_EQ(value_names_[2], value_names_[5]);
+ EXPECT_EQ(value_names_[4], value_names_[7]);
+ EXPECT_EQ(value_names_[5], value_names_[8]);
+ EXPECT_NE(value_names_[7], value_names_[10]); // New value.
+ EXPECT_NE(value_names_[8], value_names_[11]); // New value.
+ for (size_t i = 0u; i != mir_count_; ++i) {
+ int expected =
+ ((i != 0u && i != 3u && i != 6u && i != 9u) ? MIR_IGNORE_NULL_CHECK : 0u) |
+ ((i >= 4 && i != 6u && i != 9u) ? MIR_IGNORE_RANGE_CHECK : 0u);
+ EXPECT_EQ(expected, mirs_[i].optimization_flags) << i;
+ }
+}
+
+TEST_F(LocalValueNumberingTest, StoringSameValueKeepsMemoryVersion) {
+ static const IFieldDef ifields[] = {
+ { 1u, 1u, 1u, false },
+ };
+ static const MIRDef mirs[] = {
+ DEF_IGET(Instruction::IGET, 0u, 10u, 0u),
+ DEF_IGET(Instruction::IGET, 1u, 11u, 0u),
+ DEF_IPUT(Instruction::IPUT, 1u, 11u, 0u), // Store the same value.
+ DEF_IGET(Instruction::IGET, 3u, 10u, 0u),
+ DEF_AGET(Instruction::AGET, 4u, 12u, 40u),
+ DEF_AGET(Instruction::AGET, 5u, 13u, 40u),
+ DEF_APUT(Instruction::APUT, 5u, 13u, 40u), // Store the same value.
+ DEF_AGET(Instruction::AGET, 7u, 12u, 40u),
+ };
+
+ PrepareIFields(ifields);
+ PrepareMIRs(mirs);
+ PerformLVN();
+ ASSERT_EQ(value_names_.size(), 8u);
+ EXPECT_NE(value_names_[0], value_names_[1]);
+ EXPECT_EQ(value_names_[0], value_names_[3]);
+ EXPECT_NE(value_names_[4], value_names_[5]);
+ EXPECT_EQ(value_names_[4], value_names_[7]);
+ for (size_t i = 0u; i != mir_count_; ++i) {
+ int expected =
+ ((i == 2u || i == 3u || i == 6u || i == 7u) ? MIR_IGNORE_NULL_CHECK : 0u) |
+ ((i == 6u || i == 7u) ? MIR_IGNORE_RANGE_CHECK : 0u);
+ EXPECT_EQ(expected, mirs_[i].optimization_flags) << i;
+ }
+}
+
} // namespace art
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 1d4aef2183..256686ebe1 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -320,9 +320,11 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
return true;
}
bool use_lvn = bb->use_lvn;
+ std::unique_ptr<ScopedArenaAllocator> allocator;
std::unique_ptr<LocalValueNumbering> local_valnum;
if (use_lvn) {
- local_valnum.reset(LocalValueNumbering::Create(cu_));
+ allocator.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
+ local_valnum.reset(new (allocator.get()) LocalValueNumbering(cu_, allocator.get()));
}
while (bb != NULL) {
for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
@@ -550,6 +552,9 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
}
bb = ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) ? NextDominatedBlock(bb) : NULL;
}
+ if (use_lvn && UNLIKELY(!local_valnum->Good())) {
+ LOG(WARNING) << "LVN overflow in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
+ }
return true;
}
diff --git a/runtime/safe_map.h b/runtime/safe_map.h
index 190db6092b..bf3a15eec4 100644
--- a/runtime/safe_map.h
+++ b/runtime/safe_map.h
@@ -65,6 +65,9 @@ class SafeMap {
iterator find(const K& k) { return map_.find(k); }
const_iterator find(const K& k) const { return map_.find(k); }
+ iterator lower_bound(const K& k) { return map_.lower_bound(k); }
+ const_iterator lower_bound(const K& k) const { return map_.lower_bound(k); }
+
size_type count(const K& k) const { return map_.count(k); }
// Note that unlike std::map's operator[], this doesn't return a reference to the value.