Rewrite use/def masks to support 128 bits.
Reduce LIR memory usage by holding masks by pointers in the
LIR rather than directly and using pre-defined const masks
for the common cases, allocating very few on the arena.
Change-Id: I0f6d27ef6867acd157184c8c74f9612cebfe6c16
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 86d32f4..92781b5 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -87,9 +87,9 @@
if (data_target == NULL) {
data_target = AddWordData(&literal_list_, value);
}
+ ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
LIR* load_pc_rel = RawLIR(current_dalvik_offset_, kThumb2Vldrs,
r_dest, rs_r15pc.GetReg(), 0, 0, 0, data_target);
- SetMemRefType(load_pc_rel, true, kLiteral);
AppendLIR(load_pc_rel);
return load_pc_rel;
}
@@ -670,6 +670,7 @@
if (data_target == NULL) {
data_target = AddWideData(&literal_list_, val_lo, val_hi);
}
+ ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
if (r_dest.IsFloat()) {
res = RawLIR(current_dalvik_offset_, kThumb2Vldrd,
r_dest.GetReg(), rs_r15pc.GetReg(), 0, 0, 0, data_target);
@@ -678,7 +679,6 @@
res = RawLIR(current_dalvik_offset_, kThumb2LdrdPcRel8,
r_dest.GetLowReg(), r_dest.GetHighReg(), rs_r15pc.GetReg(), 0, 0, data_target);
}
- SetMemRefType(res, true, kLiteral);
AppendLIR(res);
}
return res;
@@ -946,7 +946,8 @@
}
// TODO: in future may need to differentiate Dalvik accesses w/ spills
- if (r_base == rs_rARM_SP) {
+ if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+ DCHECK(r_base == rs_rARM_SP);
AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
}
return load;
@@ -1085,7 +1086,8 @@
}
// TODO: In future, may need to differentiate Dalvik & spill accesses
- if (r_base == rs_rARM_SP) {
+ if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+ DCHECK(r_base == rs_rARM_SP);
AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
}
return store;