Merge "Revert "Support for narrow operands in "dangerous" operations.""
diff --git a/Android.mk b/Android.mk
index 7beb30f..803ba50 100644
--- a/Android.mk
+++ b/Android.mk
@@ -123,7 +123,7 @@
ifeq ($(ART_TEST_ANDROID_ROOT),)
test-art-target-sync: $(TEST_ART_TARGET_SYNC_DEPS)
$(TEST_ART_ADB_ROOT_AND_REMOUNT)
- adb sync
+ adb sync system && adb sync data
else
test-art-target-sync: $(TEST_ART_TARGET_SYNC_DEPS)
$(TEST_ART_ADB_ROOT_AND_REMOUNT)
diff --git a/compiler/debug/elf_debug_info_writer.h b/compiler/debug/elf_debug_info_writer.h
index 558c7d5..de32351 100644
--- a/compiler/debug/elf_debug_info_writer.h
+++ b/compiler/debug/elf_debug_info_writer.h
@@ -411,7 +411,7 @@
for (const auto& base_class_reference : base_class_references) {
size_t reference_offset = base_class_reference.first;
mirror::Class* base_class = base_class_reference.second;
- const auto& it = class_declarations.find(base_class);
+ const auto it = class_declarations.find(base_class);
if (it != class_declarations.end()) {
info_.UpdateUint32(reference_offset, it->second);
} else {
@@ -512,7 +512,7 @@
using namespace dwarf; // NOLINT. For easy access to DWARF constants.
DCHECK(!desc.empty());
- const auto& it = type_cache_.find(desc);
+ const auto it = type_cache_.find(desc);
if (it != type_cache_.end()) {
return it->second;
}
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 713d370..87ce3f6 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1656,6 +1656,34 @@
}
}
+static int64_t AdjustConstantForCondition(int64_t value,
+ IfCondition* condition,
+ IfCondition* opposite) {
+ if (value == 1) {
+ if (*condition == kCondB) {
+ value = 0;
+ *condition = kCondEQ;
+ *opposite = kCondNE;
+ } else if (*condition == kCondAE) {
+ value = 0;
+ *condition = kCondNE;
+ *opposite = kCondEQ;
+ }
+ } else if (value == -1) {
+ if (*condition == kCondGT) {
+ value = 0;
+ *condition = kCondGE;
+ *opposite = kCondLT;
+ } else if (*condition == kCondLE) {
+ value = 0;
+ *condition = kCondLT;
+ *opposite = kCondGE;
+ }
+ }
+
+ return value;
+}
+
static std::pair<Condition, Condition> GenerateLongTestConstant(HCondition* condition,
bool invert,
CodeGeneratorARM* codegen) {
@@ -1669,7 +1697,7 @@
std::swap(cond, opposite);
}
- std::pair<Condition, Condition> ret;
+ std::pair<Condition, Condition> ret(EQ, NE);
const Location left = locations->InAt(0);
const Location right = locations->InAt(1);
@@ -1677,7 +1705,38 @@
const Register left_high = left.AsRegisterPairHigh<Register>();
const Register left_low = left.AsRegisterPairLow<Register>();
- int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
+ int64_t value = AdjustConstantForCondition(right.GetConstant()->AsLongConstant()->GetValue(),
+ &cond,
+ &opposite);
+
+ // Comparisons against 0 are common enough to deserve special attention.
+ if (value == 0) {
+ switch (cond) {
+ case kCondNE:
+ // x > 0 iff x != 0 when the comparison is unsigned.
+ case kCondA:
+ ret = std::make_pair(NE, EQ);
+ FALLTHROUGH_INTENDED;
+ case kCondEQ:
+ // x <= 0 iff x == 0 when the comparison is unsigned.
+ case kCondBE:
+ __ orrs(IP, left_low, ShifterOperand(left_high));
+ return ret;
+ case kCondLT:
+ case kCondGE:
+ __ cmp(left_high, ShifterOperand(0));
+ return std::make_pair(ARMCondition(cond), ARMCondition(opposite));
+ // Trivially true or false.
+ case kCondB:
+ ret = std::make_pair(NE, EQ);
+ FALLTHROUGH_INTENDED;
+ case kCondAE:
+ __ cmp(left_low, ShifterOperand(left_low));
+ return ret;
+ default:
+ break;
+ }
+ }
switch (cond) {
case kCondEQ:
@@ -1837,10 +1896,14 @@
static bool CanGenerateTest(HCondition* condition, ArmAssembler* assembler) {
if (condition->GetLeft()->GetType() == Primitive::kPrimLong) {
const LocationSummary* const locations = condition->GetLocations();
- const IfCondition c = condition->GetCondition();
if (locations->InAt(1).IsConstant()) {
- const int64_t value = locations->InAt(1).GetConstant()->AsLongConstant()->GetValue();
+ IfCondition c = condition->GetCondition();
+ IfCondition opposite = condition->GetOppositeCondition();
+ const int64_t value = AdjustConstantForCondition(
+ Int64FromConstant(locations->InAt(1).GetConstant()),
+ &c,
+ &opposite);
ShifterOperand so;
if (c < kCondLT || c > kCondGE) {
@@ -1848,9 +1911,11 @@
// we check that the least significant half of the first input to be compared
// is in a low register (the other half is read outside an IT block), and
// the constant fits in an 8-bit unsigned integer, so that a 16-bit CMP
- // encoding can be used.
- if (!ArmAssembler::IsLowRegister(locations->InAt(0).AsRegisterPairLow<Register>()) ||
- !IsUint<8>(Low32Bits(value))) {
+ // encoding can be used; 0 is always handled, no matter what registers are
+ // used by the first input.
+ if (value != 0 &&
+ (!ArmAssembler::IsLowRegister(locations->InAt(0).AsRegisterPairLow<Register>()) ||
+ !IsUint<8>(Low32Bits(value)))) {
return false;
}
} else if (c == kCondLE || c == kCondGT) {
@@ -1877,6 +1942,329 @@
return true;
}
+static void GenerateConditionGeneric(HCondition* cond, CodeGeneratorARM* codegen) {
+ DCHECK(CanGenerateTest(cond, codegen->GetAssembler()));
+
+ const Register out = cond->GetLocations()->Out().AsRegister<Register>();
+ const auto condition = GenerateTest(cond, false, codegen);
+
+ __ mov(out, ShifterOperand(0), AL, kCcKeep);
+
+ if (ArmAssembler::IsLowRegister(out)) {
+ __ it(condition.first);
+ __ mov(out, ShifterOperand(1), condition.first);
+ } else {
+ Label done_label;
+ Label* const final_label = codegen->GetFinalLabel(cond, &done_label);
+
+ __ b(final_label, condition.second);
+ __ LoadImmediate(out, 1);
+
+ if (done_label.IsLinked()) {
+ __ Bind(&done_label);
+ }
+ }
+}
+
+static void GenerateEqualLong(HCondition* cond, CodeGeneratorARM* codegen) {
+ DCHECK_EQ(cond->GetLeft()->GetType(), Primitive::kPrimLong);
+
+ const LocationSummary* const locations = cond->GetLocations();
+ IfCondition condition = cond->GetCondition();
+ const Register out = locations->Out().AsRegister<Register>();
+ const Location left = locations->InAt(0);
+ const Location right = locations->InAt(1);
+ Register left_high = left.AsRegisterPairHigh<Register>();
+ Register left_low = left.AsRegisterPairLow<Register>();
+
+ if (right.IsConstant()) {
+ IfCondition opposite = cond->GetOppositeCondition();
+ const int64_t value = AdjustConstantForCondition(Int64FromConstant(right.GetConstant()),
+ &condition,
+ &opposite);
+ int32_t value_high = -High32Bits(value);
+ int32_t value_low = -Low32Bits(value);
+
+ // The output uses Location::kNoOutputOverlap.
+ if (out == left_high) {
+ std::swap(left_low, left_high);
+ std::swap(value_low, value_high);
+ }
+
+ __ AddConstant(out, left_low, value_low);
+ __ AddConstant(IP, left_high, value_high);
+ } else {
+ DCHECK(right.IsRegisterPair());
+ __ sub(IP, left_high, ShifterOperand(right.AsRegisterPairHigh<Register>()));
+ __ sub(out, left_low, ShifterOperand(right.AsRegisterPairLow<Register>()));
+ }
+
+ // Need to check after calling AdjustConstantForCondition().
+ DCHECK(condition == kCondEQ || condition == kCondNE) << condition;
+
+ if (condition == kCondNE && ArmAssembler::IsLowRegister(out)) {
+ __ orrs(out, out, ShifterOperand(IP));
+ __ it(NE);
+ __ mov(out, ShifterOperand(1), NE);
+ } else {
+ __ orr(out, out, ShifterOperand(IP));
+ codegen->GenerateConditionWithZero(condition, out, out, IP);
+ }
+}
+
+static void GenerateLongComparesAndJumps(HCondition* cond,
+ Label* true_label,
+ Label* false_label,
+ CodeGeneratorARM* codegen) {
+ LocationSummary* locations = cond->GetLocations();
+ Location left = locations->InAt(0);
+ Location right = locations->InAt(1);
+ IfCondition if_cond = cond->GetCondition();
+
+ Register left_high = left.AsRegisterPairHigh<Register>();
+ Register left_low = left.AsRegisterPairLow<Register>();
+ IfCondition true_high_cond = if_cond;
+ IfCondition false_high_cond = cond->GetOppositeCondition();
+ Condition final_condition = ARMUnsignedCondition(if_cond); // unsigned on lower part
+
+ // Set the conditions for the test, remembering that == needs to be
+ // decided using the low words.
+ switch (if_cond) {
+ case kCondEQ:
+ case kCondNE:
+ // Nothing to do.
+ break;
+ case kCondLT:
+ false_high_cond = kCondGT;
+ break;
+ case kCondLE:
+ true_high_cond = kCondLT;
+ break;
+ case kCondGT:
+ false_high_cond = kCondLT;
+ break;
+ case kCondGE:
+ true_high_cond = kCondGT;
+ break;
+ case kCondB:
+ false_high_cond = kCondA;
+ break;
+ case kCondBE:
+ true_high_cond = kCondB;
+ break;
+ case kCondA:
+ false_high_cond = kCondB;
+ break;
+ case kCondAE:
+ true_high_cond = kCondA;
+ break;
+ }
+ if (right.IsConstant()) {
+ int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
+ int32_t val_low = Low32Bits(value);
+ int32_t val_high = High32Bits(value);
+
+ __ CmpConstant(left_high, val_high);
+ if (if_cond == kCondNE) {
+ __ b(true_label, ARMCondition(true_high_cond));
+ } else if (if_cond == kCondEQ) {
+ __ b(false_label, ARMCondition(false_high_cond));
+ } else {
+ __ b(true_label, ARMCondition(true_high_cond));
+ __ b(false_label, ARMCondition(false_high_cond));
+ }
+ // Must be equal high, so compare the lows.
+ __ CmpConstant(left_low, val_low);
+ } else {
+ Register right_high = right.AsRegisterPairHigh<Register>();
+ Register right_low = right.AsRegisterPairLow<Register>();
+
+ __ cmp(left_high, ShifterOperand(right_high));
+ if (if_cond == kCondNE) {
+ __ b(true_label, ARMCondition(true_high_cond));
+ } else if (if_cond == kCondEQ) {
+ __ b(false_label, ARMCondition(false_high_cond));
+ } else {
+ __ b(true_label, ARMCondition(true_high_cond));
+ __ b(false_label, ARMCondition(false_high_cond));
+ }
+ // Must be equal high, so compare the lows.
+ __ cmp(left_low, ShifterOperand(right_low));
+ }
+ // The last comparison might be unsigned.
+ // TODO: optimize cases where this is always true/false
+ __ b(true_label, final_condition);
+}
+
+static void GenerateConditionLong(HCondition* cond, CodeGeneratorARM* codegen) {
+ DCHECK_EQ(cond->GetLeft()->GetType(), Primitive::kPrimLong);
+
+ const LocationSummary* const locations = cond->GetLocations();
+ IfCondition condition = cond->GetCondition();
+ const Register out = locations->Out().AsRegister<Register>();
+ const Location left = locations->InAt(0);
+ const Location right = locations->InAt(1);
+
+ if (right.IsConstant()) {
+ IfCondition opposite = cond->GetOppositeCondition();
+
+ // Comparisons against 0 are common enough to deserve special attention.
+ if (AdjustConstantForCondition(Int64FromConstant(right.GetConstant()),
+ &condition,
+ &opposite) == 0) {
+ switch (condition) {
+ case kCondNE:
+ case kCondA:
+ if (ArmAssembler::IsLowRegister(out)) {
+ // We only care if both input registers are 0 or not.
+ __ orrs(out,
+ left.AsRegisterPairLow<Register>(),
+ ShifterOperand(left.AsRegisterPairHigh<Register>()));
+ __ it(NE);
+ __ mov(out, ShifterOperand(1), NE);
+ return;
+ }
+
+ FALLTHROUGH_INTENDED;
+ case kCondEQ:
+ case kCondBE:
+ // We only care if both input registers are 0 or not.
+ __ orr(out,
+ left.AsRegisterPairLow<Register>(),
+ ShifterOperand(left.AsRegisterPairHigh<Register>()));
+ codegen->GenerateConditionWithZero(condition, out, out);
+ return;
+ case kCondLT:
+ case kCondGE:
+ // We only care about the sign bit.
+ FALLTHROUGH_INTENDED;
+ case kCondAE:
+ case kCondB:
+ codegen->GenerateConditionWithZero(condition, out, left.AsRegisterPairHigh<Register>());
+ return;
+ case kCondLE:
+ case kCondGT:
+ default:
+ break;
+ }
+ }
+ }
+
+ if ((condition == kCondEQ || condition == kCondNE) &&
+ // If `out` is a low register, then the GenerateConditionGeneric()
+ // function generates a shorter code sequence that is still branchless.
+ (!ArmAssembler::IsLowRegister(out) || !CanGenerateTest(cond, codegen->GetAssembler()))) {
+ GenerateEqualLong(cond, codegen);
+ return;
+ }
+
+ if (CanGenerateTest(cond, codegen->GetAssembler())) {
+ GenerateConditionGeneric(cond, codegen);
+ return;
+ }
+
+ // Convert the jumps into the result.
+ Label done_label;
+ Label* const final_label = codegen->GetFinalLabel(cond, &done_label);
+ Label true_label, false_label;
+
+ GenerateLongComparesAndJumps(cond, &true_label, &false_label, codegen);
+
+ // False case: result = 0.
+ __ Bind(&false_label);
+ __ mov(out, ShifterOperand(0));
+ __ b(final_label);
+
+ // True case: result = 1.
+ __ Bind(&true_label);
+ __ mov(out, ShifterOperand(1));
+
+ if (done_label.IsLinked()) {
+ __ Bind(&done_label);
+ }
+}
+
+static void GenerateConditionIntegralOrNonPrimitive(HCondition* cond, CodeGeneratorARM* codegen) {
+ const Primitive::Type type = cond->GetLeft()->GetType();
+
+ DCHECK(Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) << type;
+
+ if (type == Primitive::kPrimLong) {
+ GenerateConditionLong(cond, codegen);
+ return;
+ }
+
+ const LocationSummary* const locations = cond->GetLocations();
+ IfCondition condition = cond->GetCondition();
+ Register in = locations->InAt(0).AsRegister<Register>();
+ const Register out = locations->Out().AsRegister<Register>();
+ const Location right = cond->GetLocations()->InAt(1);
+ int64_t value;
+
+ if (right.IsConstant()) {
+ IfCondition opposite = cond->GetOppositeCondition();
+
+ value = AdjustConstantForCondition(Int64FromConstant(right.GetConstant()),
+ &condition,
+ &opposite);
+
+ // Comparisons against 0 are common enough to deserve special attention.
+ if (value == 0) {
+ switch (condition) {
+ case kCondNE:
+ case kCondA:
+ if (ArmAssembler::IsLowRegister(out) && out == in) {
+ __ cmp(out, ShifterOperand(0));
+ __ it(NE);
+ __ mov(out, ShifterOperand(1), NE);
+ return;
+ }
+
+ FALLTHROUGH_INTENDED;
+ case kCondEQ:
+ case kCondBE:
+ case kCondLT:
+ case kCondGE:
+ case kCondAE:
+ case kCondB:
+ codegen->GenerateConditionWithZero(condition, out, in);
+ return;
+ case kCondLE:
+ case kCondGT:
+ default:
+ break;
+ }
+ }
+ }
+
+ if (condition == kCondEQ || condition == kCondNE) {
+ ShifterOperand operand;
+
+ if (right.IsConstant()) {
+ operand = ShifterOperand(value);
+ } else if (out == right.AsRegister<Register>()) {
+ // Avoid 32-bit instructions if possible.
+ operand = ShifterOperand(in);
+ in = right.AsRegister<Register>();
+ } else {
+ operand = ShifterOperand(right.AsRegister<Register>());
+ }
+
+ if (condition == kCondNE && ArmAssembler::IsLowRegister(out)) {
+ __ subs(out, in, operand);
+ __ it(NE);
+ __ mov(out, ShifterOperand(1), NE);
+ } else {
+ __ sub(out, in, operand);
+ codegen->GenerateConditionWithZero(condition, out, out);
+ }
+
+ return;
+ }
+
+ GenerateConditionGeneric(cond, codegen);
+}
+
static bool CanEncodeConstantAs8BitImmediate(HConstant* constant) {
const Primitive::Type type = constant->GetType();
bool ret = false;
@@ -2479,89 +2867,6 @@
void InstructionCodeGeneratorARM::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
-void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
- Label* true_label,
- Label* false_label) {
- LocationSummary* locations = cond->GetLocations();
- Location left = locations->InAt(0);
- Location right = locations->InAt(1);
- IfCondition if_cond = cond->GetCondition();
-
- Register left_high = left.AsRegisterPairHigh<Register>();
- Register left_low = left.AsRegisterPairLow<Register>();
- IfCondition true_high_cond = if_cond;
- IfCondition false_high_cond = cond->GetOppositeCondition();
- Condition final_condition = ARMUnsignedCondition(if_cond); // unsigned on lower part
-
- // Set the conditions for the test, remembering that == needs to be
- // decided using the low words.
- switch (if_cond) {
- case kCondEQ:
- case kCondNE:
- // Nothing to do.
- break;
- case kCondLT:
- false_high_cond = kCondGT;
- break;
- case kCondLE:
- true_high_cond = kCondLT;
- break;
- case kCondGT:
- false_high_cond = kCondLT;
- break;
- case kCondGE:
- true_high_cond = kCondGT;
- break;
- case kCondB:
- false_high_cond = kCondA;
- break;
- case kCondBE:
- true_high_cond = kCondB;
- break;
- case kCondA:
- false_high_cond = kCondB;
- break;
- case kCondAE:
- true_high_cond = kCondA;
- break;
- }
- if (right.IsConstant()) {
- int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
- int32_t val_low = Low32Bits(value);
- int32_t val_high = High32Bits(value);
-
- __ CmpConstant(left_high, val_high);
- if (if_cond == kCondNE) {
- __ b(true_label, ARMCondition(true_high_cond));
- } else if (if_cond == kCondEQ) {
- __ b(false_label, ARMCondition(false_high_cond));
- } else {
- __ b(true_label, ARMCondition(true_high_cond));
- __ b(false_label, ARMCondition(false_high_cond));
- }
- // Must be equal high, so compare the lows.
- __ CmpConstant(left_low, val_low);
- } else {
- Register right_high = right.AsRegisterPairHigh<Register>();
- Register right_low = right.AsRegisterPairLow<Register>();
-
- __ cmp(left_high, ShifterOperand(right_high));
- if (if_cond == kCondNE) {
- __ b(true_label, ARMCondition(true_high_cond));
- } else if (if_cond == kCondEQ) {
- __ b(false_label, ARMCondition(false_high_cond));
- } else {
- __ b(true_label, ARMCondition(true_high_cond));
- __ b(false_label, ARMCondition(false_high_cond));
- }
- // Must be equal high, so compare the lows.
- __ cmp(left_low, ShifterOperand(right_low));
- }
- // The last comparison might be unsigned.
- // TODO: optimize cases where this is always true/false
- __ b(true_label, final_condition);
-}
-
void InstructionCodeGeneratorARM::GenerateCompareTestAndBranch(HCondition* condition,
Label* true_target_in,
Label* false_target_in) {
@@ -2596,7 +2901,7 @@
Label* false_target = false_target_in == nullptr ? &fallthrough_target : false_target_in;
DCHECK_EQ(condition->InputAt(0)->GetType(), Primitive::kPrimLong);
- GenerateLongComparesAndJumps(condition, true_target, false_target);
+ GenerateLongComparesAndJumps(condition, true_target, false_target, codegen_);
if (false_target != &fallthrough_target) {
__ b(false_target);
@@ -2911,6 +3216,80 @@
__ nop();
}
+// `temp` is an extra temporary register that is used for some conditions;
+// callers may not specify it, in which case the method will use a scratch
+// register instead.
+void CodeGeneratorARM::GenerateConditionWithZero(IfCondition condition,
+ Register out,
+ Register in,
+ Register temp) {
+ switch (condition) {
+ case kCondEQ:
+ // x <= 0 iff x == 0 when the comparison is unsigned.
+ case kCondBE:
+ if (temp == kNoRegister || (ArmAssembler::IsLowRegister(out) && out != in)) {
+ temp = out;
+ }
+
+ // Avoid 32-bit instructions if possible; note that `in` and `temp` must be
+ // different as well.
+ if (ArmAssembler::IsLowRegister(in) && ArmAssembler::IsLowRegister(temp) && in != temp) {
+ // temp = - in; only 0 sets the carry flag.
+ __ rsbs(temp, in, ShifterOperand(0));
+
+ if (out == in) {
+ std::swap(in, temp);
+ }
+
+ // out = - in + in + carry = carry
+ __ adc(out, temp, ShifterOperand(in));
+ } else {
+ // If `in` is 0, then it has 32 leading zeros, and less than that otherwise.
+ __ clz(out, in);
+ // Any number less than 32 logically shifted right by 5 bits results in 0;
+ // the same operation on 32 yields 1.
+ __ Lsr(out, out, 5);
+ }
+
+ break;
+ case kCondNE:
+ // x > 0 iff x != 0 when the comparison is unsigned.
+ case kCondA:
+ if (out == in) {
+ if (temp == kNoRegister || in == temp) {
+ temp = IP;
+ }
+ } else if (temp == kNoRegister || !ArmAssembler::IsLowRegister(temp)) {
+ temp = out;
+ }
+
+ // temp = in - 1; only 0 does not set the carry flag.
+ __ subs(temp, in, ShifterOperand(1));
+ // out = in + ~temp + carry = in + (-(in - 1) - 1) + carry = in - in + 1 - 1 + carry = carry
+ __ sbc(out, in, ShifterOperand(temp));
+ break;
+ case kCondGE:
+ __ mvn(out, ShifterOperand(in));
+ in = out;
+ FALLTHROUGH_INTENDED;
+ case kCondLT:
+ // We only care about the sign bit.
+ __ Lsr(out, in, 31);
+ break;
+ case kCondAE:
+ // Trivially true.
+ __ mov(out, ShifterOperand(1));
+ break;
+ case kCondB:
+ // Trivially false.
+ __ mov(out, ShifterOperand(0));
+ break;
+ default:
+ LOG(FATAL) << "Unexpected condition " << condition;
+ UNREACHABLE();
+ }
+}
+
void LocationsBuilderARM::HandleCondition(HCondition* cond) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
@@ -2947,48 +3326,48 @@
return;
}
- const Register out = cond->GetLocations()->Out().AsRegister<Register>();
+ const Primitive::Type type = cond->GetLeft()->GetType();
- if (ArmAssembler::IsLowRegister(out) && CanGenerateTest(cond, codegen_->GetAssembler())) {
- const auto condition = GenerateTest(cond, false, codegen_);
-
- __ it(condition.first);
- __ mov(out, ShifterOperand(1), condition.first);
- __ it(condition.second);
- __ mov(out, ShifterOperand(0), condition.second);
+ if (Primitive::IsFloatingPointType(type)) {
+ GenerateConditionGeneric(cond, codegen_);
return;
}
- // Convert the jumps into the result.
- Label done_label;
- Label* const final_label = codegen_->GetFinalLabel(cond, &done_label);
+ DCHECK(Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) << type;
- if (cond->InputAt(0)->GetType() == Primitive::kPrimLong) {
- Label true_label, false_label;
+ const IfCondition condition = cond->GetCondition();
- GenerateLongComparesAndJumps(cond, &true_label, &false_label);
+ // A condition with only one boolean input, or two boolean inputs without being equality or
+ // inequality results from transformations done by the instruction simplifier, and is handled
+ // as a regular condition with integral inputs.
+ if (type == Primitive::kPrimBoolean &&
+ cond->GetRight()->GetType() == Primitive::kPrimBoolean &&
+ (condition == kCondEQ || condition == kCondNE)) {
+ const LocationSummary* const locations = cond->GetLocations();
+ Register left = locations->InAt(0).AsRegister<Register>();
+ const Register out = locations->Out().AsRegister<Register>();
+ const Location right_loc = locations->InAt(1);
- // False case: result = 0.
- __ Bind(&false_label);
- __ LoadImmediate(out, 0);
- __ b(final_label);
+ // The constant case is handled by the instruction simplifier.
+ DCHECK(!right_loc.IsConstant());
- // True case: result = 1.
- __ Bind(&true_label);
- __ LoadImmediate(out, 1);
- } else {
- DCHECK(CanGenerateTest(cond, codegen_->GetAssembler()));
+ Register right = right_loc.AsRegister<Register>();
- const auto condition = GenerateTest(cond, false, codegen_);
+ // Avoid 32-bit instructions if possible.
+ if (out == right) {
+ std::swap(left, right);
+ }
- __ mov(out, ShifterOperand(0), AL, kCcKeep);
- __ b(final_label, condition.second);
- __ LoadImmediate(out, 1);
+ __ eor(out, left, ShifterOperand(right));
+
+ if (condition == kCondEQ) {
+ __ eor(out, out, ShifterOperand(1));
+ }
+
+ return;
}
- if (done_label.IsLinked()) {
- __ Bind(&done_label);
- }
+ GenerateConditionIntegralOrNonPrimitive(cond, codegen_);
}
void LocationsBuilderARM::VisitEqual(HEqual* comp) {
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 47e6be5..4763cbd 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -299,7 +299,6 @@
void GenerateCompareTestAndBranch(HCondition* condition,
Label* true_target,
Label* false_target);
- void GenerateLongComparesAndJumps(HCondition* cond, Label* true_label, Label* false_label);
void DivRemOneOrMinusOne(HBinaryOperation* instruction);
void DivRemByPowerOfTwo(HBinaryOperation* instruction);
void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
@@ -623,6 +622,14 @@
void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+ // `temp` is an extra temporary register that is used for some conditions;
+ // callers may not specify it, in which case the method will use a scratch
+ // register instead.
+ void GenerateConditionWithZero(IfCondition condition,
+ Register out,
+ Register in,
+ Register temp = kNoRegister);
+
private:
Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, Register temp);
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 015e6dd..c4844c1 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -1775,6 +1775,34 @@
}
}
+static int64_t AdjustConstantForCondition(int64_t value,
+ IfCondition* condition,
+ IfCondition* opposite) {
+ if (value == 1) {
+ if (*condition == kCondB) {
+ value = 0;
+ *condition = kCondEQ;
+ *opposite = kCondNE;
+ } else if (*condition == kCondAE) {
+ value = 0;
+ *condition = kCondNE;
+ *opposite = kCondEQ;
+ }
+ } else if (value == -1) {
+ if (*condition == kCondGT) {
+ value = 0;
+ *condition = kCondGE;
+ *opposite = kCondLT;
+ } else if (*condition == kCondLE) {
+ value = 0;
+ *condition = kCondLT;
+ *opposite = kCondGE;
+ }
+ }
+
+ return value;
+}
+
static std::pair<vixl32::Condition, vixl32::Condition> GenerateLongTestConstant(
HCondition* condition,
bool invert,
@@ -1797,7 +1825,37 @@
const vixl32::Register left_high = HighRegisterFrom(left);
const vixl32::Register left_low = LowRegisterFrom(left);
- int64_t value = Int64ConstantFrom(right);
+ int64_t value = AdjustConstantForCondition(Int64ConstantFrom(right), &cond, &opposite);
+ UseScratchRegisterScope temps(codegen->GetVIXLAssembler());
+
+ // Comparisons against 0 are common enough to deserve special attention.
+ if (value == 0) {
+ switch (cond) {
+ case kCondNE:
+ // x > 0 iff x != 0 when the comparison is unsigned.
+ case kCondA:
+ ret = std::make_pair(ne, eq);
+ FALLTHROUGH_INTENDED;
+ case kCondEQ:
+ // x <= 0 iff x == 0 when the comparison is unsigned.
+ case kCondBE:
+ __ Orrs(temps.Acquire(), left_low, left_high);
+ return ret;
+ case kCondLT:
+ case kCondGE:
+ __ Cmp(left_high, 0);
+ return std::make_pair(ARMCondition(cond), ARMCondition(opposite));
+ // Trivially true or false.
+ case kCondB:
+ ret = std::make_pair(ne, eq);
+ FALLTHROUGH_INTENDED;
+ case kCondAE:
+ __ Cmp(left_low, left_low);
+ return ret;
+ default:
+ break;
+ }
+ }
switch (cond) {
case kCondEQ:
@@ -1842,8 +1900,6 @@
FALLTHROUGH_INTENDED;
case kCondGE:
case kCondLT: {
- UseScratchRegisterScope temps(codegen->GetVIXLAssembler());
-
__ Cmp(left_low, Low32Bits(value));
__ Sbcs(temps.Acquire(), left_high, High32Bits(value));
ret = std::make_pair(ARMCondition(cond), ARMCondition(opposite));
@@ -1961,18 +2017,22 @@
static bool CanGenerateTest(HCondition* condition, ArmVIXLAssembler* assembler) {
if (condition->GetLeft()->GetType() == Primitive::kPrimLong) {
const LocationSummary* const locations = condition->GetLocations();
- const IfCondition c = condition->GetCondition();
if (locations->InAt(1).IsConstant()) {
- const int64_t value = Int64ConstantFrom(locations->InAt(1));
+ IfCondition c = condition->GetCondition();
+ IfCondition opposite = condition->GetOppositeCondition();
+ const int64_t value =
+ AdjustConstantForCondition(Int64ConstantFrom(locations->InAt(1)), &c, &opposite);
if (c < kCondLT || c > kCondGE) {
// Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
// we check that the least significant half of the first input to be compared
// is in a low register (the other half is read outside an IT block), and
// the constant fits in an 8-bit unsigned integer, so that a 16-bit CMP
- // encoding can be used.
- if (!LowRegisterFrom(locations->InAt(0)).IsLow() || !IsUint<8>(Low32Bits(value))) {
+ // encoding can be used; 0 is always handled, no matter what registers are
+ // used by the first input.
+ if (value != 0 &&
+ (!LowRegisterFrom(locations->InAt(0)).IsLow() || !IsUint<8>(Low32Bits(value)))) {
return false;
}
// TODO(VIXL): The rest of the checks are there to keep the backend in sync with
@@ -1991,6 +2051,353 @@
return true;
}
+static void GenerateConditionGeneric(HCondition* cond, CodeGeneratorARMVIXL* codegen) {
+ DCHECK(CanGenerateTest(cond, codegen->GetAssembler()));
+
+ const vixl32::Register out = OutputRegister(cond);
+ const auto condition = GenerateTest(cond, false, codegen);
+
+ __ Mov(LeaveFlags, out, 0);
+
+ if (out.IsLow()) {
+ // We use the scope because of the IT block that follows.
+ ExactAssemblyScope guard(codegen->GetVIXLAssembler(),
+ 2 * vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
+
+ __ it(condition.first);
+ __ mov(condition.first, out, 1);
+ } else {
+ vixl32::Label done_label;
+ vixl32::Label* const final_label = codegen->GetFinalLabel(cond, &done_label);
+
+ __ B(condition.second, final_label, /* far_target */ false);
+ __ Mov(out, 1);
+
+ if (done_label.IsReferenced()) {
+ __ Bind(&done_label);
+ }
+ }
+}
+
+static void GenerateEqualLong(HCondition* cond, CodeGeneratorARMVIXL* codegen) {
+ DCHECK_EQ(cond->GetLeft()->GetType(), Primitive::kPrimLong);
+
+ const LocationSummary* const locations = cond->GetLocations();
+ IfCondition condition = cond->GetCondition();
+ const vixl32::Register out = OutputRegister(cond);
+ const Location left = locations->InAt(0);
+ const Location right = locations->InAt(1);
+ vixl32::Register left_high = HighRegisterFrom(left);
+ vixl32::Register left_low = LowRegisterFrom(left);
+ vixl32::Register temp;
+ UseScratchRegisterScope temps(codegen->GetVIXLAssembler());
+
+ if (right.IsConstant()) {
+ IfCondition opposite = cond->GetOppositeCondition();
+ const int64_t value = AdjustConstantForCondition(Int64ConstantFrom(right),
+ &condition,
+ &opposite);
+ Operand right_high = High32Bits(value);
+ Operand right_low = Low32Bits(value);
+
+ // The output uses Location::kNoOutputOverlap.
+ if (out.Is(left_high)) {
+ std::swap(left_low, left_high);
+ std::swap(right_low, right_high);
+ }
+
+ __ Sub(out, left_low, right_low);
+ temp = temps.Acquire();
+ __ Sub(temp, left_high, right_high);
+ } else {
+ DCHECK(right.IsRegisterPair());
+ temp = temps.Acquire();
+ __ Sub(temp, left_high, HighRegisterFrom(right));
+ __ Sub(out, left_low, LowRegisterFrom(right));
+ }
+
+ // Need to check after calling AdjustConstantForCondition().
+ DCHECK(condition == kCondEQ || condition == kCondNE) << condition;
+
+ if (condition == kCondNE && out.IsLow()) {
+ __ Orrs(out, out, temp);
+
+ // We use the scope because of the IT block that follows.
+ ExactAssemblyScope guard(codegen->GetVIXLAssembler(),
+ 2 * vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
+
+ __ it(ne);
+ __ mov(ne, out, 1);
+ } else {
+ __ Orr(out, out, temp);
+ codegen->GenerateConditionWithZero(condition, out, out, temp);
+ }
+}
+
+static void GenerateLongComparesAndJumps(HCondition* cond,
+ vixl32::Label* true_label,
+ vixl32::Label* false_label,
+ CodeGeneratorARMVIXL* codegen) {
+ LocationSummary* locations = cond->GetLocations();
+ Location left = locations->InAt(0);
+ Location right = locations->InAt(1);
+ IfCondition if_cond = cond->GetCondition();
+
+ vixl32::Register left_high = HighRegisterFrom(left);
+ vixl32::Register left_low = LowRegisterFrom(left);
+ IfCondition true_high_cond = if_cond;
+ IfCondition false_high_cond = cond->GetOppositeCondition();
+ vixl32::Condition final_condition = ARMUnsignedCondition(if_cond); // unsigned on lower part
+
+ // Set the conditions for the test, remembering that == needs to be
+ // decided using the low words.
+ switch (if_cond) {
+ case kCondEQ:
+ case kCondNE:
+ // Nothing to do.
+ break;
+ case kCondLT:
+ false_high_cond = kCondGT;
+ break;
+ case kCondLE:
+ true_high_cond = kCondLT;
+ break;
+ case kCondGT:
+ false_high_cond = kCondLT;
+ break;
+ case kCondGE:
+ true_high_cond = kCondGT;
+ break;
+ case kCondB:
+ false_high_cond = kCondA;
+ break;
+ case kCondBE:
+ true_high_cond = kCondB;
+ break;
+ case kCondA:
+ false_high_cond = kCondB;
+ break;
+ case kCondAE:
+ true_high_cond = kCondA;
+ break;
+ }
+ if (right.IsConstant()) {
+ int64_t value = Int64ConstantFrom(right);
+ int32_t val_low = Low32Bits(value);
+ int32_t val_high = High32Bits(value);
+
+ __ Cmp(left_high, val_high);
+ if (if_cond == kCondNE) {
+ __ B(ARMCondition(true_high_cond), true_label);
+ } else if (if_cond == kCondEQ) {
+ __ B(ARMCondition(false_high_cond), false_label);
+ } else {
+ __ B(ARMCondition(true_high_cond), true_label);
+ __ B(ARMCondition(false_high_cond), false_label);
+ }
+ // Must be equal high, so compare the lows.
+ __ Cmp(left_low, val_low);
+ } else {
+ vixl32::Register right_high = HighRegisterFrom(right);
+ vixl32::Register right_low = LowRegisterFrom(right);
+
+ __ Cmp(left_high, right_high);
+ if (if_cond == kCondNE) {
+ __ B(ARMCondition(true_high_cond), true_label);
+ } else if (if_cond == kCondEQ) {
+ __ B(ARMCondition(false_high_cond), false_label);
+ } else {
+ __ B(ARMCondition(true_high_cond), true_label);
+ __ B(ARMCondition(false_high_cond), false_label);
+ }
+ // Must be equal high, so compare the lows.
+ __ Cmp(left_low, right_low);
+ }
+ // The last comparison might be unsigned.
+ // TODO: optimize cases where this is always true/false
+ __ B(final_condition, true_label);
+}
+
+static void GenerateConditionLong(HCondition* cond, CodeGeneratorARMVIXL* codegen) {
+ DCHECK_EQ(cond->GetLeft()->GetType(), Primitive::kPrimLong);
+
+ const LocationSummary* const locations = cond->GetLocations();
+ IfCondition condition = cond->GetCondition();
+ const vixl32::Register out = OutputRegister(cond);
+ const Location left = locations->InAt(0);
+ const Location right = locations->InAt(1);
+
+ if (right.IsConstant()) {
+ IfCondition opposite = cond->GetOppositeCondition();
+
+ // Comparisons against 0 are common enough to deserve special attention.
+ if (AdjustConstantForCondition(Int64ConstantFrom(right), &condition, &opposite) == 0) {
+ switch (condition) {
+ case kCondNE:
+ case kCondA:
+ if (out.IsLow()) {
+ // We only care if both input registers are 0 or not.
+ __ Orrs(out, LowRegisterFrom(left), HighRegisterFrom(left));
+
+ // We use the scope because of the IT block that follows.
+ ExactAssemblyScope guard(codegen->GetVIXLAssembler(),
+ 2 * vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
+
+ __ it(ne);
+ __ mov(ne, out, 1);
+ return;
+ }
+
+ FALLTHROUGH_INTENDED;
+ case kCondEQ:
+ case kCondBE:
+ // We only care if both input registers are 0 or not.
+ __ Orr(out, LowRegisterFrom(left), HighRegisterFrom(left));
+ codegen->GenerateConditionWithZero(condition, out, out);
+ return;
+ case kCondLT:
+ case kCondGE:
+ // We only care about the sign bit.
+ FALLTHROUGH_INTENDED;
+ case kCondAE:
+ case kCondB:
+ codegen->GenerateConditionWithZero(condition, out, HighRegisterFrom(left));
+ return;
+ case kCondLE:
+ case kCondGT:
+ default:
+ break;
+ }
+ }
+ }
+
+ if ((condition == kCondEQ || condition == kCondNE) &&
+ // If `out` is a low register, then the GenerateConditionGeneric()
+ // function generates a shorter code sequence that is still branchless.
+ (!out.IsLow() || !CanGenerateTest(cond, codegen->GetAssembler()))) {
+ GenerateEqualLong(cond, codegen);
+ return;
+ }
+
+ if (CanGenerateTest(cond, codegen->GetAssembler())) {
+ GenerateConditionGeneric(cond, codegen);
+ return;
+ }
+
+ // Convert the jumps into the result.
+ vixl32::Label done_label;
+ vixl32::Label* const final_label = codegen->GetFinalLabel(cond, &done_label);
+ vixl32::Label true_label, false_label;
+
+ GenerateLongComparesAndJumps(cond, &true_label, &false_label, codegen);
+
+ // False case: result = 0.
+ __ Bind(&false_label);
+ __ Mov(out, 0);
+ __ B(final_label);
+
+ // True case: result = 1.
+ __ Bind(&true_label);
+ __ Mov(out, 1);
+
+ if (done_label.IsReferenced()) {
+ __ Bind(&done_label);
+ }
+}
+
+static void GenerateConditionIntegralOrNonPrimitive(HCondition* cond, CodeGeneratorARMVIXL* codegen) {
+ const Primitive::Type type = cond->GetLeft()->GetType();
+
+ DCHECK(Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) << type;
+
+ if (type == Primitive::kPrimLong) {
+ GenerateConditionLong(cond, codegen);
+ return;
+ }
+
+ IfCondition condition = cond->GetCondition();
+ vixl32::Register in = InputRegisterAt(cond, 0);
+ const vixl32::Register out = OutputRegister(cond);
+ const Location right = cond->GetLocations()->InAt(1);
+ int64_t value;
+
+ if (right.IsConstant()) {
+ IfCondition opposite = cond->GetOppositeCondition();
+
+ value = AdjustConstantForCondition(Int64ConstantFrom(right), &condition, &opposite);
+
+ // Comparisons against 0 are common enough to deserve special attention.
+ if (value == 0) {
+ switch (condition) {
+ case kCondNE:
+ case kCondA:
+ if (out.IsLow() && out.Is(in)) {
+ __ Cmp(out, 0);
+
+ // We use the scope because of the IT block that follows.
+ ExactAssemblyScope guard(codegen->GetVIXLAssembler(),
+ 2 * vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
+
+ __ it(ne);
+ __ mov(ne, out, 1);
+ return;
+ }
+
+ FALLTHROUGH_INTENDED;
+ case kCondEQ:
+ case kCondBE:
+ case kCondLT:
+ case kCondGE:
+ case kCondAE:
+ case kCondB:
+ codegen->GenerateConditionWithZero(condition, out, in);
+ return;
+ case kCondLE:
+ case kCondGT:
+ default:
+ break;
+ }
+ }
+ }
+
+ if (condition == kCondEQ || condition == kCondNE) {
+ Operand operand(0);
+
+ if (right.IsConstant()) {
+ operand = Operand::From(value);
+ } else if (out.Is(RegisterFrom(right))) {
+ // Avoid 32-bit instructions if possible.
+ operand = InputOperandAt(cond, 0);
+ in = RegisterFrom(right);
+ } else {
+ operand = InputOperandAt(cond, 1);
+ }
+
+ if (condition == kCondNE && out.IsLow()) {
+ __ Subs(out, in, operand);
+
+ // We use the scope because of the IT block that follows.
+ ExactAssemblyScope guard(codegen->GetVIXLAssembler(),
+ 2 * vixl32::k16BitT32InstructionSizeInBytes,
+ CodeBufferCheckScope::kExactSize);
+
+ __ it(ne);
+ __ mov(ne, out, 1);
+ } else {
+ __ Sub(out, in, operand);
+ codegen->GenerateConditionWithZero(condition, out, out);
+ }
+
+ return;
+ }
+
+ GenerateConditionGeneric(cond, codegen);
+}
+
static bool CanEncodeConstantAs8BitImmediate(HConstant* constant) {
const Primitive::Type type = constant->GetType();
bool ret = false;
@@ -2547,89 +2954,6 @@
void InstructionCodeGeneratorARMVIXL::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
-void InstructionCodeGeneratorARMVIXL::GenerateLongComparesAndJumps(HCondition* cond,
- vixl32::Label* true_label,
- vixl32::Label* false_label) {
- LocationSummary* locations = cond->GetLocations();
- Location left = locations->InAt(0);
- Location right = locations->InAt(1);
- IfCondition if_cond = cond->GetCondition();
-
- vixl32::Register left_high = HighRegisterFrom(left);
- vixl32::Register left_low = LowRegisterFrom(left);
- IfCondition true_high_cond = if_cond;
- IfCondition false_high_cond = cond->GetOppositeCondition();
- vixl32::Condition final_condition = ARMUnsignedCondition(if_cond); // unsigned on lower part
-
- // Set the conditions for the test, remembering that == needs to be
- // decided using the low words.
- switch (if_cond) {
- case kCondEQ:
- case kCondNE:
- // Nothing to do.
- break;
- case kCondLT:
- false_high_cond = kCondGT;
- break;
- case kCondLE:
- true_high_cond = kCondLT;
- break;
- case kCondGT:
- false_high_cond = kCondLT;
- break;
- case kCondGE:
- true_high_cond = kCondGT;
- break;
- case kCondB:
- false_high_cond = kCondA;
- break;
- case kCondBE:
- true_high_cond = kCondB;
- break;
- case kCondA:
- false_high_cond = kCondB;
- break;
- case kCondAE:
- true_high_cond = kCondA;
- break;
- }
- if (right.IsConstant()) {
- int64_t value = Int64ConstantFrom(right);
- int32_t val_low = Low32Bits(value);
- int32_t val_high = High32Bits(value);
-
- __ Cmp(left_high, val_high);
- if (if_cond == kCondNE) {
- __ B(ARMCondition(true_high_cond), true_label);
- } else if (if_cond == kCondEQ) {
- __ B(ARMCondition(false_high_cond), false_label);
- } else {
- __ B(ARMCondition(true_high_cond), true_label);
- __ B(ARMCondition(false_high_cond), false_label);
- }
- // Must be equal high, so compare the lows.
- __ Cmp(left_low, val_low);
- } else {
- vixl32::Register right_high = HighRegisterFrom(right);
- vixl32::Register right_low = LowRegisterFrom(right);
-
- __ Cmp(left_high, right_high);
- if (if_cond == kCondNE) {
- __ B(ARMCondition(true_high_cond), true_label);
- } else if (if_cond == kCondEQ) {
- __ B(ARMCondition(false_high_cond), false_label);
- } else {
- __ B(ARMCondition(true_high_cond), true_label);
- __ B(ARMCondition(false_high_cond), false_label);
- }
- // Must be equal high, so compare the lows.
- __ Cmp(left_low, right_low);
- }
- // The last comparison might be unsigned.
- // TODO: optimize cases where this is always true/false
- __ B(final_condition, true_label);
-}
-
void InstructionCodeGeneratorARMVIXL::GenerateCompareTestAndBranch(HCondition* condition,
vixl32::Label* true_target_in,
vixl32::Label* false_target_in) {
@@ -2664,7 +2988,7 @@
vixl32::Label* false_target = (false_target_in == nullptr) ? &fallthrough : false_target_in;
DCHECK_EQ(condition->InputAt(0)->GetType(), Primitive::kPrimLong);
- GenerateLongComparesAndJumps(condition, true_target, false_target);
+ GenerateLongComparesAndJumps(condition, true_target, false_target, codegen_);
if (false_target != &fallthrough) {
__ B(false_target);
@@ -2975,6 +3299,83 @@
__ Nop();
}
+// `temp` is an extra temporary register that is used for some conditions;
+// callers may not specify it, in which case the method will use a scratch
+// register instead.
+void CodeGeneratorARMVIXL::GenerateConditionWithZero(IfCondition condition,
+ vixl32::Register out,
+ vixl32::Register in,
+ vixl32::Register temp) {
+ switch (condition) {
+ case kCondEQ:
+ // x <= 0 iff x == 0 when the comparison is unsigned.
+ case kCondBE:
+ if (!temp.IsValid() || (out.IsLow() && !out.Is(in))) {
+ temp = out;
+ }
+
+ // Avoid 32-bit instructions if possible; note that `in` and `temp` must be
+ // different as well.
+ if (in.IsLow() && temp.IsLow() && !in.Is(temp)) {
+ // temp = - in; only 0 sets the carry flag.
+ __ Rsbs(temp, in, 0);
+
+ if (out.Is(in)) {
+ std::swap(in, temp);
+ }
+
+ // out = - in + in + carry = carry
+ __ Adc(out, temp, in);
+ } else {
+ // If `in` is 0, then it has 32 leading zeros, and less than that otherwise.
+ __ Clz(out, in);
+ // Any number less than 32 logically shifted right by 5 bits results in 0;
+ // the same operation on 32 yields 1.
+ __ Lsr(out, out, 5);
+ }
+
+ break;
+ case kCondNE:
+ // x > 0 iff x != 0 when the comparison is unsigned.
+ case kCondA: {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+
+ if (out.Is(in)) {
+ if (!temp.IsValid() || in.Is(temp)) {
+ temp = temps.Acquire();
+ }
+ } else if (!temp.IsValid() || !temp.IsLow()) {
+ temp = out;
+ }
+
+ // temp = in - 1; only 0 does not set the carry flag.
+ __ Subs(temp, in, 1);
+ // out = in + ~temp + carry = in + (-(in - 1) - 1) + carry = in - in + 1 - 1 + carry = carry
+ __ Sbc(out, in, temp);
+ break;
+ }
+ case kCondGE:
+ __ Mvn(out, in);
+ in = out;
+ FALLTHROUGH_INTENDED;
+ case kCondLT:
+ // We only care about the sign bit.
+ __ Lsr(out, in, 31);
+ break;
+ case kCondAE:
+ // Trivially true.
+ __ Mov(out, 1);
+ break;
+ case kCondB:
+ // Trivially false.
+ __ Mov(out, 0);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected condition " << condition;
+ UNREACHABLE();
+ }
+}
+
void LocationsBuilderARMVIXL::HandleCondition(HCondition* cond) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
@@ -3011,52 +3412,47 @@
return;
}
- const vixl32::Register out = OutputRegister(cond);
+ const Primitive::Type type = cond->GetLeft()->GetType();
- if (out.IsLow() && CanGenerateTest(cond, codegen_->GetAssembler())) {
- const auto condition = GenerateTest(cond, false, codegen_);
- // We use the scope because of the IT block that follows.
- ExactAssemblyScope guard(GetVIXLAssembler(),
- 4 * vixl32::k16BitT32InstructionSizeInBytes,
- CodeBufferCheckScope::kExactSize);
-
- __ it(condition.first);
- __ mov(condition.first, out, 1);
- __ it(condition.second);
- __ mov(condition.second, out, 0);
+ if (Primitive::IsFloatingPointType(type)) {
+ GenerateConditionGeneric(cond, codegen_);
return;
}
- // Convert the jumps into the result.
- vixl32::Label done_label;
- vixl32::Label* const final_label = codegen_->GetFinalLabel(cond, &done_label);
+ DCHECK(Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) << type;
- if (cond->InputAt(0)->GetType() == Primitive::kPrimLong) {
- vixl32::Label true_label, false_label;
+ const IfCondition condition = cond->GetCondition();
- GenerateLongComparesAndJumps(cond, &true_label, &false_label);
+ // A condition with only one boolean input, or two boolean inputs without being equality or
+ // inequality results from transformations done by the instruction simplifier, and is handled
+ // as a regular condition with integral inputs.
+ if (type == Primitive::kPrimBoolean &&
+ cond->GetRight()->GetType() == Primitive::kPrimBoolean &&
+ (condition == kCondEQ || condition == kCondNE)) {
+ vixl32::Register left = InputRegisterAt(cond, 0);
+ const vixl32::Register out = OutputRegister(cond);
+ const Location right_loc = cond->GetLocations()->InAt(1);
- // False case: result = 0.
- __ Bind(&false_label);
- __ Mov(out, 0);
- __ B(final_label);
+ // The constant case is handled by the instruction simplifier.
+ DCHECK(!right_loc.IsConstant());
- // True case: result = 1.
- __ Bind(&true_label);
- __ Mov(out, 1);
- } else {
- DCHECK(CanGenerateTest(cond, codegen_->GetAssembler()));
+ vixl32::Register right = RegisterFrom(right_loc);
- const auto condition = GenerateTest(cond, false, codegen_);
+ // Avoid 32-bit instructions if possible.
+ if (out.Is(right)) {
+ std::swap(left, right);
+ }
- __ Mov(LeaveFlags, out, 0);
- __ B(condition.second, final_label, /* far_target */ false);
- __ Mov(out, 1);
+ __ Eor(out, left, right);
+
+ if (condition == kCondEQ) {
+ __ Eor(out, out, 1);
+ }
+
+ return;
}
- if (done_label.IsReferenced()) {
- __ Bind(&done_label);
- }
+ GenerateConditionIntegralOrNonPrimitive(cond, codegen_);
}
void LocationsBuilderARMVIXL::VisitEqual(HEqual* comp) {
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index daba9bf..1cf9923 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -401,9 +401,6 @@
void GenerateCompareTestAndBranch(HCondition* condition,
vixl::aarch32::Label* true_target,
vixl::aarch32::Label* false_target);
- void GenerateLongComparesAndJumps(HCondition* cond,
- vixl::aarch32::Label* true_label,
- vixl::aarch32::Label* false_label);
void DivRemOneOrMinusOne(HBinaryOperation* instruction);
void DivRemByPowerOfTwo(HBinaryOperation* instruction);
void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
@@ -716,6 +713,14 @@
void EmitMovwMovtPlaceholder(CodeGeneratorARMVIXL::PcRelativePatchInfo* labels,
vixl::aarch32::Register out);
+ // `temp` is an extra temporary register that is used for some conditions;
+ // callers may not specify it, in which case the method will use a scratch
+ // register instead.
+ void GenerateConditionWithZero(IfCondition condition,
+ vixl::aarch32::Register out,
+ vixl::aarch32::Register in,
+ vixl::aarch32::Register temp = vixl32::Register());
+
private:
vixl::aarch32::Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke,
vixl::aarch32::Register temp);
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 1df884e..e8a62aa 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -2598,11 +2598,7 @@
// We don't care about the sign bit, so shift left.
__ Lsl(out, out, 1);
__ eor(out, out, ShifterOperand(infinity));
- // If the result is 0, then it has 32 leading zeros, and less than that otherwise.
- __ clz(out, out);
- // Any number less than 32 logically shifted right by 5 bits results in 0;
- // the same operation on 32 yields 1.
- __ Lsr(out, out, 5);
+ codegen_->GenerateConditionWithZero(kCondEQ, out, out);
}
void IntrinsicLocationsBuilderARM::VisitDoubleIsInfinite(HInvoke* invoke) {
@@ -2625,11 +2621,7 @@
__ eor(out, out, ShifterOperand(infinity_high2));
// We don't care about the sign bit, so shift left.
__ orr(out, IP, ShifterOperand(out, LSL, 1));
- // If the result is 0, then it has 32 leading zeros, and less than that otherwise.
- __ clz(out, out);
- // Any number less than 32 logically shifted right by 5 bits results in 0;
- // the same operation on 32 yields 1.
- __ Lsr(out, out, 5);
+ codegen_->GenerateConditionWithZero(kCondEQ, out, out);
}
void IntrinsicLocationsBuilderARM::VisitReferenceGetReferent(HInvoke* invoke) {
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 2d9781a..ce3ba52 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -2971,11 +2971,7 @@
// We don't care about the sign bit, so shift left.
__ Lsl(out, out, 1);
__ Eor(out, out, infinity);
- // If the result is 0, then it has 32 leading zeros, and less than that otherwise.
- __ Clz(out, out);
- // Any number less than 32 logically shifted right by 5 bits results in 0;
- // the same operation on 32 yields 1.
- __ Lsr(out, out, 5);
+ codegen_->GenerateConditionWithZero(kCondEQ, out, out);
}
void IntrinsicLocationsBuilderARMVIXL::VisitDoubleIsInfinite(HInvoke* invoke) {
@@ -3001,11 +2997,7 @@
__ Eor(out, out, infinity_high2);
// We don't care about the sign bit, so shift left.
__ Orr(out, temp, Operand(out, vixl32::LSL, 1));
- // If the result is 0, then it has 32 leading zeros, and less than that otherwise.
- __ Clz(out, out);
- // Any number less than 32 logically shifted right by 5 bits results in 0;
- // the same operation on 32 yields 1.
- __ Lsr(out, out, 5);
+ codegen_->GenerateConditionWithZero(kCondEQ, out, out);
}
void IntrinsicLocationsBuilderARMVIXL::VisitReferenceGetReferent(HInvoke* invoke) {
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index 5a6a20d..a0533f2 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -350,6 +350,55 @@
return true;
}
+ // Runs DexFileLayout test twice (second time is run on output of first time)
+ // for behavior consistency.
+ bool DexFileLayoutFixedPointExec(std::string* error_msg) {
+ ScratchFile tmp_file;
+ std::string tmp_name = tmp_file.GetFilename();
+ size_t tmp_last_slash = tmp_name.rfind("/");
+ std::string tmp_dir = tmp_name.substr(0, tmp_last_slash + 1);
+
+ // Write inputs and expected outputs.
+ std::string dex_file = tmp_dir + "classes.dex";
+ WriteFileBase64(kDexFileLayoutInputDex, dex_file.c_str());
+ std::string profile_file = tmp_dir + "primary.prof";
+ WriteFileBase64(kDexFileLayoutInputProfile, profile_file.c_str());
+ std::string output_dex = tmp_dir + "classes.dex.new";
+ std::string second_output_dex = tmp_dir + "classes.dex.new.new";
+
+ std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
+ EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
+
+ // -v makes sure that the layout did not corrupt the dex file.
+ std::vector<std::string> dexlayout_exec_argv =
+ { dexlayout, "-v", "-w", tmp_dir, "-o", tmp_name, "-p", profile_file, dex_file };
+ if (!::art::Exec(dexlayout_exec_argv, error_msg)) {
+ return false;
+ }
+
+ // -v makes sure that the layout did not corrupt the dex file.
+ std::vector<std::string> second_dexlayout_exec_argv =
+ { dexlayout, "-v", "-w", tmp_dir, "-o", tmp_name, "-p", profile_file, output_dex };
+ if (!::art::Exec(second_dexlayout_exec_argv, error_msg)) {
+ return false;
+ }
+
+ bool diff_result = true;
+ std::vector<std::string> diff_exec_argv =
+ { "/usr/bin/diff", output_dex, second_output_dex };
+ if (!::art::Exec(diff_exec_argv, error_msg)) {
+ diff_result = false;
+ }
+
+ std::vector<std::string> rm_exec_argv =
+ { "/bin/rm", dex_file, profile_file, output_dex, second_output_dex };
+ if (!::art::Exec(rm_exec_argv, error_msg)) {
+ return false;
+ }
+
+ return diff_result;
+ }
+
// Runs UnreferencedCatchHandlerTest & Unreferenced0SizeCatchHandlerTest.
bool UnreferencedCatchHandlerExec(std::string* error_msg, const char* filename) {
ScratchFile tmp_file;
@@ -427,6 +476,13 @@
ASSERT_TRUE(DexFileLayoutExec(&error_msg)) << error_msg;
}
+TEST_F(DexLayoutTest, DexFileLayoutFixedPoint) {
+ // Disable test on target.
+ TEST_DISABLED_FOR_TARGET();
+ std::string error_msg;
+ ASSERT_TRUE(DexFileLayoutFixedPointExec(&error_msg)) << error_msg;
+}
+
TEST_F(DexLayoutTest, UnreferencedCatchHandler) {
// Disable test on target.
TEST_DISABLED_FOR_TARGET();
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index e628a9f..d1da67f 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -1252,7 +1252,39 @@
.extern artLockObjectFromCode
ENTRY art_quick_lock_object
beqz $a0, art_quick_throw_null_pointer_exception
+ li $t8, LOCK_WORD_THIN_LOCK_COUNT_ONE
+ li $t3, LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED
+.Lretry_lock:
+ lw $t0, THREAD_ID_OFFSET(rSELF) # TODO: Can the thread ID really change during the loop?
+ ll $t1, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
+ and $t2, $t1, $t3 # zero the gc bits
+ bnez $t2, .Lnot_unlocked # already thin locked
+ # Unlocked case - $t1: original lock word that's zero except for the read barrier bits.
+ or $t2, $t1, $t0 # $t2 holds thread id with count of 0 with preserved read barrier bits
+ sc $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
+ beqz $t2, .Lretry_lock # store failed, retry
nop
+ jalr $zero, $ra
+ sync # full (LoadLoad|LoadStore) memory barrier
+.Lnot_unlocked:
+ # $t1: original lock word, $t0: thread_id with count of 0 and zero read barrier bits
+ srl $t2, $t1, LOCK_WORD_STATE_SHIFT
+ bnez $t2, .Lslow_lock # if either of the top two bits are set, go slow path
+ xor $t2, $t1, $t0 # lock_word.ThreadId() ^ self->ThreadId()
+ andi $t2, $t2, 0xFFFF # zero top 16 bits
+ bnez $t2, .Lslow_lock # lock word and self thread id's match -> recursive lock
+ # otherwise contention, go to slow path
+ and $t2, $t1, $t3 # zero the gc bits
+ addu $t2, $t2, $t8 # increment count in lock word
+ srl $t2, $t2, LOCK_WORD_STATE_SHIFT # if the first gc state bit is set, we overflowed.
+ bnez $t2, .Lslow_lock # if we overflow the count go slow path
+ addu $t2, $t1, $t8 # increment count for real
+ sc $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
+ beqz $t2, .Lretry_lock # store failed, retry
+ nop
+ jalr $zero, $ra
+ nop
+.Lslow_lock:
SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case we block
la $t9, artLockObjectFromCode
jalr $t9 # (Object* obj, Thread*)
@@ -1276,11 +1308,55 @@
.extern artUnlockObjectFromCode
ENTRY art_quick_unlock_object
beqz $a0, art_quick_throw_null_pointer_exception
+ li $t8, LOCK_WORD_THIN_LOCK_COUNT_ONE
+ li $t3, LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED
+.Lretry_unlock:
+#ifndef USE_READ_BARRIER
+ lw $t1, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
+#else
+ ll $t1, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0) # Need to use atomic read-modify-write for read barrier
+#endif
+ srl $t2, $t1, LOCK_WORD_STATE_SHIFT
+ bnez $t2, .Lslow_unlock # if either of the top two bits are set, go slow path
+ lw $t0, THREAD_ID_OFFSET(rSELF)
+ and $t2, $t1, $t3 # zero the gc bits
+ xor $t2, $t2, $t0 # lock_word.ThreadId() ^ self->ThreadId()
+ andi $t2, $t2, 0xFFFF # zero top 16 bits
+ bnez $t2, .Lslow_unlock # do lock word and self thread id's match?
+ and $t2, $t1, $t3 # zero the gc bits
+ bgeu $t2, $t8, .Lrecursive_thin_unlock
+ # transition to unlocked
+ nor $t2, $zero, $t3 # $t2 = LOCK_WORD_GC_STATE_MASK_SHIFTED
+ and $t2, $t1, $t2 # $t2: zero except for the preserved gc bits
+ sync # full (LoadStore|StoreStore) memory barrier
+#ifndef USE_READ_BARRIER
+ jalr $zero, $ra
+ sw $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
+#else
+ sc $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
+ beqz $t2, .Lretry_unlock # store failed, retry
nop
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case exception allocation triggers GC
+ jalr $zero, $ra
+ nop
+#endif
+.Lrecursive_thin_unlock:
+ # t1: original lock word
+ subu $t2, $t1, $t8 # decrement count
+#ifndef USE_READ_BARRIER
+ jalr $zero, $ra
+ sw $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
+#else
+ sc $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
+ beqz $t2, .Lretry_unlock # store failed, retry
+ nop
+ jalr $zero, $ra
+ nop
+#endif
+.Lslow_unlock:
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case exception allocation triggers GC
la $t9, artUnlockObjectFromCode
- jalr $t9 # (Object* obj, Thread*)
- move $a1, rSELF # pass Thread::Current
+ jalr $t9 # (Object* obj, Thread*)
+ move $a1, rSELF # pass Thread::Current
RETURN_IF_ZERO
END art_quick_unlock_object
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 40bad16..c9eeb7c 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1222,8 +1222,38 @@
*/
.extern artLockObjectFromCode
ENTRY_NO_GP art_quick_lock_object
- beq $a0, $zero, art_quick_throw_null_pointer_exception
+ beqzc $a0, art_quick_throw_null_pointer_exception
+ li $t8, LOCK_WORD_THIN_LOCK_COUNT_ONE
+ li $t3, LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED
+.Lretry_lock:
+ lw $t0, THREAD_ID_OFFSET(rSELF) # TODO: Can the thread ID really change during the loop?
+ ll $t1, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
+ and $t2, $t1, $t3 # zero the gc bits
+ bnezc $t2, .Lnot_unlocked # already thin locked
+ # Unlocked case - $t1: original lock word that's zero except for the read barrier bits.
+ or $t2, $t1, $t0 # $t2 holds thread id with count of 0 with preserved read barrier bits
+ sc $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
+ beqzc $t2, .Lretry_lock # store failed, retry
+ sync # full (LoadLoad|LoadStore) memory barrier
+ jic $ra, 0
+.Lnot_unlocked:
+ # $t1: original lock word, $t0: thread_id with count of 0 and zero read barrier bits
+ srl $t2, $t1, LOCK_WORD_STATE_SHIFT
+ bnezc $t2, .Lslow_lock # if either of the top two bits are set, go slow path
+ xor $t2, $t1, $t0 # lock_word.ThreadId() ^ self->ThreadId()
+ andi $t2, $t2, 0xFFFF # zero top 16 bits
+ bnezc $t2, .Lslow_lock # lock word and self thread id's match -> recursive lock
+ # otherwise contention, go to slow path
+ and $t2, $t1, $t3 # zero the gc bits
+ addu $t2, $t2, $t8 # increment count in lock word
+ srl $t2, $t2, LOCK_WORD_STATE_SHIFT # if the first gc state bit is set, we overflowed.
+ bnezc $t2, .Lslow_lock # if we overflow the count go slow path
+ addu $t2, $t1, $t8 # increment count for real
+ sc $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
+ beqzc $t2, .Lretry_lock # store failed, retry
nop
+ jic $ra, 0
+.Lslow_lock:
.cpsetup $t9, $t8, art_quick_lock_object
SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case we block
jal artLockObjectFromCode # (Object* obj, Thread*)
@@ -1246,8 +1276,48 @@
*/
.extern artUnlockObjectFromCode
ENTRY_NO_GP art_quick_unlock_object
- beq $a0, $zero, art_quick_throw_null_pointer_exception
+ beqzc $a0, art_quick_throw_null_pointer_exception
+ li $t8, LOCK_WORD_THIN_LOCK_COUNT_ONE
+ li $t3, LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED
+.Lretry_unlock:
+#ifndef USE_READ_BARRIER
+ lw $t1, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
+#else
+ ll $t1, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0) # Need to use atomic read-modify-write for read barrier
+#endif
+ srl $t2, $t1, LOCK_WORD_STATE_SHIFT
+ bnezc $t2, .Lslow_unlock # if either of the top two bits are set, go slow path
+ lw $t0, THREAD_ID_OFFSET(rSELF)
+ and $t2, $t1, $t3 # zero the gc bits
+ xor $t2, $t2, $t0 # lock_word.ThreadId() ^ self->ThreadId()
+ andi $t2, $t2, 0xFFFF # zero top 16 bits
+ bnezc $t2, .Lslow_unlock # do lock word and self thread id's match?
+ and $t2, $t1, $t3 # zero the gc bits
+ bgeuc $t2, $t8, .Lrecursive_thin_unlock
+ # transition to unlocked
+ nor $t2, $zero, $t3 # $t2 = LOCK_WORD_GC_STATE_MASK_SHIFTED
+ and $t2, $t1, $t2 # $t2: zero except for the preserved gc bits
+ sync # full (LoadStore|StoreStore) memory barrier
+#ifndef USE_READ_BARRIER
+ sw $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
+#else
+ sc $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
+ beqzc $t2, .Lretry_unlock # store failed, retry
nop
+#endif
+ jic $ra, 0
+.Lrecursive_thin_unlock:
+ # t1: original lock word
+ subu $t2, $t1, $t8 # decrement count
+#ifndef USE_READ_BARRIER
+ sw $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
+#else
+ sc $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
+ beqzc $t2, .Lretry_unlock # store failed, retry
+ nop
+#endif
+ jic $ra, 0
+.Lslow_unlock:
.cpsetup $t9, $t8, art_quick_unlock_object
SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case exception allocation triggers GC
jal artUnlockObjectFromCode # (Object* obj, Thread*)
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 668fb4b..ef4fa28 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -981,7 +981,46 @@
// TODO: Switch to standard begin and end to use ranged a based loop.
for (auto* it = allocation_stack_->Begin(), *end = allocation_stack_->End(); it < end; ++it) {
mirror::Object* const obj = it->AsMirrorPtr();
- if (obj != nullptr && obj->GetClass() != nullptr) {
+
+ mirror::Class* kls = nullptr;
+ if (obj != nullptr && (kls = obj->GetClass()) != nullptr) {
+ // Below invariant is safe regardless of what space the Object is in.
+ // For speed reasons, only perform it when Rosalloc could possibly be used.
+ // (Disabled for read barriers because it never uses Rosalloc).
+ // (See the DCHECK in RosAllocSpace constructor).
+ if (!kUseReadBarrier) {
+ // Rosalloc has a race in allocation. Objects can be written into the allocation
+ // stack before their header writes are visible to this thread.
+ // See b/28790624 for more details.
+ //
+ // obj.class will either be pointing to a valid Class*, or it will point
+ // to a rosalloc free buffer.
+ //
+ // If it's pointing to a valid Class* then that Class's Class will be the
+ // ClassClass (whose Class is itself).
+ //
+ // A rosalloc free buffer will point to another rosalloc free buffer
+ // (or to null), and never to itself.
+ //
+ // Either way dereferencing while its not-null is safe because it will
+ // always point to another valid pointer or to null.
+ mirror::Class* klsClass = kls->GetClass();
+
+ if (klsClass == nullptr) {
+ continue;
+ } else if (klsClass->GetClass() != klsClass) {
+ continue;
+ }
+ } else {
+ // Ensure the invariant is not broken for non-rosalloc cases.
+ DCHECK(Heap::rosalloc_space_ == nullptr)
+ << "unexpected rosalloc with read barriers";
+ DCHECK(kls->GetClass() != nullptr)
+ << "invalid object: class does not have a class";
+ DCHECK_EQ(kls->GetClass()->GetClass(), kls->GetClass())
+ << "invalid object: class's class is not ClassClass";
+ }
+
// Avoid the race condition caused by the object not yet being written into the allocation
// stack or the class not yet being written in the object. Or, if
// kUseThreadLocalAllocationStack, there can be nulls on the allocation stack.
@@ -1413,6 +1452,9 @@
}
void Heap::StartGC(Thread* self, GcCause cause, CollectorType collector_type) {
+ // Need to do this before acquiring the locks since we don't want to get suspended while
+ // holding any locks.
+ ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
MutexLock mu(self, *gc_complete_lock_);
// Ensure there is only one GC at a time.
WaitForGcToCompleteLocked(cause, self);
@@ -1421,14 +1463,9 @@
}
void Heap::TrimSpaces(Thread* self) {
- {
- // Need to do this before acquiring the locks since we don't want to get suspended while
- // holding any locks.
- ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
- // Pretend we are doing a GC to prevent background compaction from deleting the space we are
- // trimming.
- StartGC(self, kGcCauseTrim, kCollectorTypeHeapTrim);
- }
+ // Pretend we are doing a GC to prevent background compaction from deleting the space we are
+ // trimming.
+ StartGC(self, kGcCauseTrim, kCollectorTypeHeapTrim);
ScopedTrace trace(__PRETTY_FUNCTION__);
const uint64_t start_ns = NanoTime();
// Trim the managed spaces.
@@ -4013,7 +4050,7 @@
<< " IsVariableSize=" << c->IsVariableSize()
<< " ObjectSize=" << c->GetObjectSize()
<< " sizeof(Class)=" << sizeof(mirror::Class)
- << " klass=" << c.Ptr();
+ << verification_->DumpObjectInfo(c.Ptr(), /*tag*/ "klass");
CHECK_GE(byte_count, sizeof(mirror::Object));
}
diff --git a/runtime/jit/debugger_interface.cc b/runtime/jit/debugger_interface.cc
index 7cdd7c5..ae00044 100644
--- a/runtime/jit/debugger_interface.cc
+++ b/runtime/jit/debugger_interface.cc
@@ -143,7 +143,7 @@
bool DeleteJITCodeEntryForAddress(uintptr_t address) {
Thread* self = Thread::Current();
MutexLock mu(self, g_jit_debug_mutex);
- const auto& it = g_jit_code_entries.find(address);
+ const auto it = g_jit_code_entries.find(address);
if (it == g_jit_code_entries.end()) {
return false;
}
diff --git a/runtime/jit/profile_compilation_info.cc b/runtime/jit/profile_compilation_info.cc
index 4d5c9d6..220f298 100644
--- a/runtime/jit/profile_compilation_info.cc
+++ b/runtime/jit/profile_compilation_info.cc
@@ -446,7 +446,7 @@
ProfileCompilationInfo::DexFileData* ProfileCompilationInfo::GetOrAddDexFileData(
const std::string& profile_key,
uint32_t checksum) {
- const auto& profile_index_it = profile_key_map_.FindOrAdd(profile_key, profile_key_map_.size());
+ const auto profile_index_it = profile_key_map_.FindOrAdd(profile_key, profile_key_map_.size());
if (profile_key_map_.size() > std::numeric_limits<uint8_t>::max()) {
// Allow only 255 dex files to be profiled. This allows us to save bytes
// when encoding. The number is well above what we expect for normal applications.
@@ -480,7 +480,7 @@
const ProfileCompilationInfo::DexFileData* ProfileCompilationInfo::FindDexData(
const std::string& profile_key) const {
- const auto& profile_index_it = profile_key_map_.find(profile_key);
+ const auto profile_index_it = profile_key_map_.find(profile_key);
if (profile_index_it == profile_key_map_.end()) {
return nullptr;
}
@@ -1314,7 +1314,7 @@
}
std::set<DexCacheResolvedClasses> ret;
for (const DexFileData* dex_data : info_) {
- const auto& it = key_to_location_map.find(dex_data->profile_key);
+ const auto it = key_to_location_map.find(dex_data->profile_key);
if (it != key_to_location_map.end()) {
DexCacheResolvedClasses classes(it->second, it->second, dex_data->checksum);
classes.AddClasses(dex_data->class_set.begin(), dex_data->class_set.end());
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 77554e8..e618323 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -657,7 +657,7 @@
return oat_file != nullptr;
}
-static jstring DexFile_getDexFileOutputPath(JNIEnv* env,
+static jobjectArray DexFile_getDexFileOutputPaths(JNIEnv* env,
jclass,
jstring javaFilename,
jstring javaInstructionSet) {
@@ -689,7 +689,26 @@
return nullptr;
}
- return env->NewStringUTF(best_oat_file->GetLocation().c_str());
+ std::string oat_filename = best_oat_file->GetLocation();
+ std::string vdex_filename = GetVdexFilename(best_oat_file->GetLocation());
+
+ ScopedLocalRef<jstring> jvdexFilename(env, env->NewStringUTF(vdex_filename.c_str()));
+ if (jvdexFilename.get() == nullptr) {
+ return nullptr;
+ }
+ ScopedLocalRef<jstring> joatFilename(env, env->NewStringUTF(oat_filename.c_str()));
+ if (joatFilename.get() == nullptr) {
+ return nullptr;
+ }
+
+ // Now create output array and copy the set into it.
+ jobjectArray result = env->NewObjectArray(2,
+ WellKnownClasses::java_lang_String,
+ nullptr);
+ env->SetObjectArrayElement(result, 0, jvdexFilename.get());
+ env->SetObjectArrayElement(result, 1, joatFilename.get());
+
+ return result;
}
static JNINativeMethod gMethods[] = {
@@ -726,8 +745,8 @@
NATIVE_METHOD(DexFile, isBackedByOatFile, "(Ljava/lang/Object;)Z"),
NATIVE_METHOD(DexFile, getDexFileStatus,
"(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;"),
- NATIVE_METHOD(DexFile, getDexFileOutputPath,
- "(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;")
+ NATIVE_METHOD(DexFile, getDexFileOutputPaths,
+ "(Ljava/lang/String;Ljava/lang/String;)[Ljava/lang/String;")
};
void register_dalvik_system_DexFile(JNIEnv* env) {
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 9affeb0..a816522 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -1064,7 +1064,7 @@
CHECK(!oat_filename.empty()) << oat_location;
CheckLocation(oat_location);
- std::string vdex_filename = ReplaceFileExtension(oat_filename, "vdex");
+ std::string vdex_filename = GetVdexFilename(oat_filename);
// Check that the files even exist, fast-fail.
if (kIsVdexEnabled && !OS::FileExists(vdex_filename.c_str())) {
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 603bbbf..9e08b34 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -630,7 +630,7 @@
return kUpdateNotAttempted;
}
const std::string& oat_file_name = *info.Filename();
- const std::string& vdex_file_name = ReplaceFileExtension(oat_file_name, "vdex");
+ const std::string& vdex_file_name = GetVdexFilename(oat_file_name);
// dex2oat ignores missing dex files and doesn't report an error.
// Check explicitly here so we can detect the error properly.
@@ -962,7 +962,7 @@
if (file == nullptr) {
// Check to see if there is a vdex file we can make use of.
std::string error_msg;
- std::string vdex_filename = ReplaceFileExtension(filename_, "vdex");
+ std::string vdex_filename = GetVdexFilename(filename_);
std::unique_ptr<VdexFile> vdex = VdexFile::Open(vdex_filename,
/*writeable*/false,
/*low_4gb*/false,
diff --git a/runtime/openjdkjvmti/ti_class.cc b/runtime/openjdkjvmti/ti_class.cc
index e0af6e8..dd90a71 100644
--- a/runtime/openjdkjvmti/ti_class.cc
+++ b/runtime/openjdkjvmti/ti_class.cc
@@ -129,6 +129,25 @@
return dex_file;
}
+// A deleter that acts like the jvmtiEnv->Deallocate so that asan does not get tripped up.
+// TODO We should everything use the actual jvmtiEnv->Allocate/Deallocate functions once we can
+// figure out which env to use.
+template <typename T>
+class FakeJvmtiDeleter {
+ public:
+ FakeJvmtiDeleter() {}
+
+ FakeJvmtiDeleter(FakeJvmtiDeleter&) = default;
+ FakeJvmtiDeleter(FakeJvmtiDeleter&&) = default;
+ FakeJvmtiDeleter& operator=(const FakeJvmtiDeleter&) = default;
+
+ template <typename U> void operator()(const U* ptr) const {
+ if (ptr != nullptr) {
+ free(const_cast<U*>(ptr));
+ }
+ }
+};
+
struct ClassCallback : public art::ClassLoadCallback {
void ClassPreDefine(const char* descriptor,
art::Handle<art::mirror::Class> klass,
@@ -173,7 +192,8 @@
// Call all Non-retransformable agents.
jint post_no_redefine_len = 0;
unsigned char* post_no_redefine_dex_data = nullptr;
- std::unique_ptr<const unsigned char> post_no_redefine_unique_ptr(nullptr);
+ std::unique_ptr<const unsigned char, FakeJvmtiDeleter<const unsigned char>>
+ post_no_redefine_unique_ptr(nullptr, FakeJvmtiDeleter<const unsigned char>());
event_handler->DispatchEvent<ArtJvmtiEvent::kClassFileLoadHookNonRetransformable>(
self,
static_cast<JNIEnv*>(env),
@@ -190,13 +210,16 @@
post_no_redefine_dex_data = const_cast<unsigned char*>(dex_file_copy->Begin());
post_no_redefine_len = dex_file_copy->Size();
} else {
- post_no_redefine_unique_ptr = std::unique_ptr<const unsigned char>(post_no_redefine_dex_data);
+ post_no_redefine_unique_ptr =
+ std::unique_ptr<const unsigned char, FakeJvmtiDeleter<const unsigned char>>(
+ post_no_redefine_dex_data, FakeJvmtiDeleter<const unsigned char>());
DCHECK_GT(post_no_redefine_len, 0);
}
// Call all retransformable agents.
jint final_len = 0;
unsigned char* final_dex_data = nullptr;
- std::unique_ptr<const unsigned char> final_dex_unique_ptr(nullptr);
+ std::unique_ptr<const unsigned char, FakeJvmtiDeleter<const unsigned char>>
+ final_dex_unique_ptr(nullptr, FakeJvmtiDeleter<const unsigned char>());
event_handler->DispatchEvent<ArtJvmtiEvent::kClassFileLoadHookRetransformable>(
self,
static_cast<JNIEnv*>(env),
@@ -213,7 +236,9 @@
final_dex_data = post_no_redefine_dex_data;
final_len = post_no_redefine_len;
} else {
- final_dex_unique_ptr = std::unique_ptr<const unsigned char>(final_dex_data);
+ final_dex_unique_ptr =
+ std::unique_ptr<const unsigned char, FakeJvmtiDeleter<const unsigned char>>(
+ final_dex_data, FakeJvmtiDeleter<const unsigned char>());
DCHECK_GT(final_len, 0);
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 3697f21..c46bd8d 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1017,6 +1017,30 @@
MemMap::Init();
+ // Try to reserve a dedicated fault page. This is allocated for clobbered registers and sentinels.
+ // If we cannot reserve it, log a warning.
+ // Note: We allocate this first to have a good chance of grabbing the page. The address (0xebad..)
+ // is out-of-the-way enough that it should not collide with boot image mapping.
+ // Note: Don't request an error message. That will lead to a maps dump in the case of failure,
+ // leading to logspam.
+ {
+ constexpr uintptr_t kSentinelAddr =
+ RoundDown(static_cast<uintptr_t>(Context::kBadGprBase), kPageSize);
+ protected_fault_page_.reset(MemMap::MapAnonymous("Sentinel fault page",
+ reinterpret_cast<uint8_t*>(kSentinelAddr),
+ kPageSize,
+ PROT_NONE,
+ /* low_4g */ true,
+ /* reuse */ false,
+ /* error_msg */ nullptr));
+ if (protected_fault_page_ == nullptr) {
+ LOG(WARNING) << "Could not reserve sentinel fault page";
+ } else if (reinterpret_cast<uintptr_t>(protected_fault_page_->Begin()) != kSentinelAddr) {
+ LOG(WARNING) << "Could not reserve sentinel fault page at the right address.";
+ protected_fault_page_.reset();
+ }
+ }
+
using Opt = RuntimeArgumentMap;
VLOG(startup) << "Runtime::Init -verbose:startup enabled";
@@ -1401,27 +1425,6 @@
callbacks_->NextRuntimePhase(RuntimePhaseCallback::RuntimePhase::kInitialAgents);
}
- // Try to reserve a dedicated fault page. This is allocated for clobbered registers and sentinels.
- // If we cannot reserve it, log a warning.
- // Note: This is allocated last so that the heap and other things have priority, if necessary.
- {
- constexpr uintptr_t kSentinelAddr =
- RoundDown(static_cast<uintptr_t>(Context::kBadGprBase), kPageSize);
- protected_fault_page_.reset(MemMap::MapAnonymous("Sentinel fault page",
- reinterpret_cast<uint8_t*>(kSentinelAddr),
- kPageSize,
- PROT_NONE,
- true,
- false,
- &error_msg));
- if (protected_fault_page_ == nullptr) {
- LOG(WARNING) << "Could not reserve sentinel fault page: " << error_msg;
- } else if (reinterpret_cast<uintptr_t>(protected_fault_page_->Begin()) != kSentinelAddr) {
- LOG(WARNING) << "Could not reserve sentinel fault page at the right address.";
- protected_fault_page_.reset();
- }
- }
-
VLOG(startup) << "Runtime::Init exiting";
return true;
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 8d216ce..20a53b7 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -841,6 +841,10 @@
return true;
}
+std::string GetVdexFilename(const std::string& oat_location) {
+ return ReplaceFileExtension(oat_location, "vdex");
+}
+
static void InsertIsaDirectory(const InstructionSet isa, std::string* filename) {
// in = /foo/bar/baz
// out = /foo/bar/<isa>/baz
diff --git a/runtime/utils.h b/runtime/utils.h
index 2011d9e..f1f5576 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -173,6 +173,9 @@
// Returns the system location for an image
std::string GetSystemImageFilename(const char* location, InstructionSet isa);
+// Returns the vdex filename for the given oat filename.
+std::string GetVdexFilename(const std::string& oat_filename);
+
// Returns true if the file exists.
bool FileExists(const std::string& filename);
bool FileExistsAndNotEmpty(const std::string& filename);
diff --git a/sigchainlib/Android.bp b/sigchainlib/Android.bp
index 08af254..0c64b7d 100644
--- a/sigchainlib/Android.bp
+++ b/sigchainlib/Android.bp
@@ -32,6 +32,12 @@
shared_libs: ["liblog"],
},
},
+ // Sigchainlib is whole-statically linked into binaries. For Android.mk-based binaries,
+ // this will drag ASAN symbols into the binary, even for modules using LOCAL_SANITIZE := never.
+ // So disable sanitization for now. b/38456126
+ sanitize: {
+ never: true,
+ },
}
// Create a dummy version of libsigchain which expose the necessary symbols
diff --git a/test/409-materialized-condition/src/Main.java b/test/409-materialized-condition/src/Main.java
index 0c179a9..5f21bc3 100644
--- a/test/409-materialized-condition/src/Main.java
+++ b/test/409-materialized-condition/src/Main.java
@@ -50,6 +50,49 @@
return b;
}
+ public static boolean $noinline$intEq0(int x) {
+ return x == 0;
+ }
+
+ public static boolean $noinline$intNe0(int x) {
+ return x != 0;
+ }
+
+ public static boolean $noinline$longEq0(long x) {
+ return x == 0;
+ }
+
+ public static boolean $noinline$longNe0(long x) {
+ return x != 0;
+ }
+
+ public static boolean $noinline$longEqCst(long x) {
+ return x == 0x0123456789ABCDEFL;
+ }
+
+ public static boolean $noinline$longNeCst(long x) {
+ return x != 0x0123456789ABCDEFL;
+ }
+
+ public static void assertEqual(boolean expected, boolean actual) {
+ if (expected != actual) {
+ throw new Error("Assertion failed: " + expected + " != " + actual);
+ }
+ }
+
+ // The purpose of this method is to test code generation for a materialized
+ // HCondition that is not equality or inequality, and that has one boolean
+ // input. That can't be done directly, so we have to rely on the instruction
+ // simplifier to transform the control-flow graph appropriately.
+ public static boolean $noinline$booleanCondition(boolean in) {
+ int value = in ? 1 : 0;
+
+ // Calling a non-inlineable method that uses `value` as well prevents a
+ // transformation of the return value into `false`.
+ $noinline$intNe0(value);
+ return value > 127;
+ }
+
public static void main(String[] args) {
System.out.println("foo1");
int res = foo1();
@@ -62,5 +105,49 @@
if (res != 42) {
throw new Error("Unexpected return value for foo2: " + res + ", expected 42.");
}
+
+ assertEqual($noinline$booleanCondition(false), false);
+ assertEqual($noinline$booleanCondition(true), false);
+
+ int[] int_inputs = {0, 1, -1, Integer.MIN_VALUE, Integer.MAX_VALUE, 42, -9000};
+ long[] long_inputs = {
+ 0L, 1L, -1L, Long.MIN_VALUE, Long.MAX_VALUE, 0x100000000L,
+ 0x100000001L, -9000L, 0x0123456789ABCDEFL};
+
+ boolean[] int_eq_0_expected = {true, false, false, false, false, false, false};
+
+ for (int i = 0; i < int_inputs.length; i++) {
+ assertEqual(int_eq_0_expected[i], $noinline$intEq0(int_inputs[i]));
+ }
+
+ boolean[] int_ne_0_expected = {false, true, true, true, true, true, true};
+
+ for (int i = 0; i < int_inputs.length; i++) {
+ assertEqual(int_ne_0_expected[i], $noinline$intNe0(int_inputs[i]));
+ }
+
+ boolean[] long_eq_0_expected = {true, false, false, false, false, false, false, false, false};
+
+ for (int i = 0; i < long_inputs.length; i++) {
+ assertEqual(long_eq_0_expected[i], $noinline$longEq0(long_inputs[i]));
+ }
+
+ boolean[] long_ne_0_expected = {false, true, true, true, true, true, true, true, true};
+
+ for (int i = 0; i < long_inputs.length; i++) {
+ assertEqual(long_ne_0_expected[i], $noinline$longNe0(long_inputs[i]));
+ }
+
+ boolean[] long_eq_cst_expected = {false, false, false, false, false, false, false, false, true};
+
+ for (int i = 0; i < long_inputs.length; i++) {
+ assertEqual(long_eq_cst_expected[i], $noinline$longEqCst(long_inputs[i]));
+ }
+
+ boolean[] long_ne_cst_expected = {true, true, true, true, true, true, true, true, false};
+
+ for (int i = 0; i < long_inputs.length; i++) {
+ assertEqual(long_ne_cst_expected[i], $noinline$longNeCst(long_inputs[i]));
+ }
}
}
diff --git a/test/570-checker-select/src/Main.java b/test/570-checker-select/src/Main.java
index 3ac6f89..2dad14c 100644
--- a/test/570-checker-select/src/Main.java
+++ b/test/570-checker-select/src/Main.java
@@ -414,6 +414,46 @@
return a > 0x7FFFFFFFFFFFFFFFL ? x : y;
}
+ /// CHECK-START-ARM: long Main.$noinline$LongNonmatCondCst_LongVarVar4(long, long, long) disassembly (after)
+ /// CHECK: Select
+ /// CHECK-NEXT: orrs ip, {{r\d+}}, {{r\d+}}
+ /// CHECK-NOT: cmp
+ /// CHECK-NOT: sbcs
+
+ public static long $noinline$LongNonmatCondCst_LongVarVar4(long a, long x, long y) {
+ return a == 0 ? x : y;
+ }
+
+ /// CHECK-START-ARM: long Main.$noinline$LongNonmatCondCst_LongVarVar5(long, long, long) disassembly (after)
+ /// CHECK: Select
+ /// CHECK-NEXT: orrs ip, {{r\d+}}, {{r\d+}}
+ /// CHECK-NOT: cmp
+ /// CHECK-NOT: sbcs
+
+ public static long $noinline$LongNonmatCondCst_LongVarVar5(long a, long x, long y) {
+ return a != 0 ? x : y;
+ }
+
+ /// CHECK-START-ARM: long Main.$noinline$LongNonmatCondCst_LongVarVar6(long, long, long) disassembly (after)
+ /// CHECK: Select
+ /// CHECK-NEXT: cmp {{r\d+}}, #0
+ /// CHECK-NOT: cmp
+ /// CHECK-NOT: sbcs
+
+ public static long $noinline$LongNonmatCondCst_LongVarVar6(long a, long x, long y) {
+ return a >= 0 ? x : y;
+ }
+
+ /// CHECK-START-ARM: long Main.$noinline$LongNonmatCondCst_LongVarVar7(long, long, long) disassembly (after)
+ /// CHECK: Select
+ /// CHECK-NEXT: cmp {{r\d+}}, #0
+ /// CHECK-NOT: cmp
+ /// CHECK-NOT: sbcs
+
+ public static long $noinline$LongNonmatCondCst_LongVarVar7(long a, long x, long y) {
+ return a < 0 ? x : y;
+ }
+
/// CHECK-START: long Main.LongMatCond_LongVarVar(long, long, long, long) register (after)
/// CHECK: <<Cond:z\d+>> LessThanOrEqual [{{j\d+}},{{j\d+}}]
/// CHECK: <<Sel1:j\d+>> Select [{{j\d+}},{{j\d+}},<<Cond>>]
@@ -688,6 +728,37 @@
assertEqual(7L, $noinline$LongNonmatCondCst_LongVarVar3(2L, 5L, 7L));
+ long[] long_inputs = {
+ 0L, 1L, -1L, Long.MIN_VALUE, Long.MAX_VALUE, 2L, 0x100000000L, 0xFFFFFFFF00000000L, -9000L};
+
+ long[] expected_1 = {5L, 7L, 7L, 7L, 7L, 7L, 7L, 7L, 7L};
+
+ for (int i = 0; i < long_inputs.length; i++) {
+ assertEqual(expected_1[i], $noinline$LongNonmatCondCst_LongVarVar4(long_inputs[i], 5L, 7L));
+ }
+
+ long[] expected_2 = {7L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L};
+
+ for (int i = 0; i < long_inputs.length; i++) {
+ assertEqual(expected_2[i], $noinline$LongNonmatCondCst_LongVarVar5(long_inputs[i], 5L, 7L));
+ }
+
+ long[] expected_3 = {5L, 5L, 7L, 7L, 5L, 5L, 5L, 7L, 7L};
+
+ for (int i = 0; i < long_inputs.length; i++) {
+ assertEqual(expected_3[i], $noinline$LongNonmatCondCst_LongVarVar6(long_inputs[i], 5L, 7L));
+ }
+
+ long[] expected_4 = {7L, 7L, 5L, 5L, 7L, 7L, 7L, 5L, 5L};
+
+ for (int i = 0; i < long_inputs.length; i++) {
+ assertEqual(expected_4[i], $noinline$LongNonmatCondCst_LongVarVar7(long_inputs[i], 5L, 7L));
+ }
+
+ assertEqual(7L, $noinline$LongNonmatCondCst_LongVarVar7(0L, 5L, 7L));
+ assertEqual(7L, $noinline$LongNonmatCondCst_LongVarVar7(2L, 5L, 7L));
+ assertEqual(5L, $noinline$LongNonmatCondCst_LongVarVar7(-9000L, 5L, 7L));
+
assertEqual(5, FloatLtNonmatCond_IntVarVar(3, 2, 5, 7));
assertEqual(7, FloatLtNonmatCond_IntVarVar(2, 3, 5, 7));
assertEqual(7, FloatLtNonmatCond_IntVarVar(Float.NaN, 2, 5, 7));
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index a89fe5b..ca52a99 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -668,12 +668,6 @@
# Note: this is required as envsetup right now exports detect_leaks=0.
RUN_TEST_ASAN_OPTIONS=""
-# JVMTI has a mismatch of malloc with delete. b/38322765
-if [ "x$RUN_TEST_ASAN_OPTIONS" != "x" ] ; then
- RUN_TEST_ASAN_OPTIONS="${RUN_TEST_ASAN_OPTIONS}:"
-fi
-RUN_TEST_ASAN_OPTIONS="${RUN_TEST_ASAN_OPTIONS}alloc_dealloc_mismatch=0"
-
# Multiple shutdown leaks. b/38341789
if [ "x$RUN_TEST_ASAN_OPTIONS" != "x" ] ; then
RUN_TEST_ASAN_OPTIONS="${RUN_TEST_ASAN_OPTIONS}:"
diff --git a/tools/ahat/src/heapdump/AhatClassInstance.java b/tools/ahat/src/heapdump/AhatClassInstance.java
index 273530a..c10d604 100644
--- a/tools/ahat/src/heapdump/AhatClassInstance.java
+++ b/tools/ahat/src/heapdump/AhatClassInstance.java
@@ -154,10 +154,7 @@
}
@Override public AhatInstance getAssociatedBitmapInstance() {
- if (isInstanceOfClass("android.graphics.Bitmap")) {
- return this;
- }
- return null;
+ return getBitmapInfo() == null ? null : this;
}
@Override public boolean isClassInstance() {
@@ -178,14 +175,27 @@
* Returns null if the field value is null, not a byte[] or could not be read.
*/
private byte[] getByteArrayField(String fieldName) {
- Value value = getField(fieldName);
- if (!value.isAhatInstance()) {
- return null;
- }
- return value.asAhatInstance().asByteArray();
+ AhatInstance field = getRefField(fieldName);
+ return field == null ? null : field.asByteArray();
}
- public BufferedImage asBitmap() {
+ private static class BitmapInfo {
+ public final int width;
+ public final int height;
+ public final byte[] buffer;
+
+ public BitmapInfo(int width, int height, byte[] buffer) {
+ this.width = width;
+ this.height = height;
+ this.buffer = buffer;
+ }
+ }
+
+ /**
+ * Return bitmap info for this object, or null if no appropriate bitmap
+ * info is available.
+ */
+ private BitmapInfo getBitmapInfo() {
if (!isInstanceOfClass("android.graphics.Bitmap")) {
return null;
}
@@ -205,20 +215,34 @@
return null;
}
+ if (buffer.length < 4 * height * width) {
+ return null;
+ }
+
+ return new BitmapInfo(width, height, buffer);
+
+ }
+
+ public BufferedImage asBitmap() {
+ BitmapInfo info = getBitmapInfo();
+ if (info == null) {
+ return null;
+ }
+
// Convert the raw data to an image
// Convert BGRA to ABGR
- int[] abgr = new int[height * width];
+ int[] abgr = new int[info.height * info.width];
for (int i = 0; i < abgr.length; i++) {
abgr[i] = (
- (((int) buffer[i * 4 + 3] & 0xFF) << 24)
- + (((int) buffer[i * 4 + 0] & 0xFF) << 16)
- + (((int) buffer[i * 4 + 1] & 0xFF) << 8)
- + ((int) buffer[i * 4 + 2] & 0xFF));
+ (((int) info.buffer[i * 4 + 3] & 0xFF) << 24)
+ + (((int) info.buffer[i * 4 + 0] & 0xFF) << 16)
+ + (((int) info.buffer[i * 4 + 1] & 0xFF) << 8)
+ + ((int) info.buffer[i * 4 + 2] & 0xFF));
}
BufferedImage bitmap = new BufferedImage(
- width, height, BufferedImage.TYPE_4BYTE_ABGR);
- bitmap.setRGB(0, 0, width, height, abgr, 0, width);
+ info.width, info.height, BufferedImage.TYPE_4BYTE_ABGR);
+ bitmap.setRGB(0, 0, info.width, info.height, abgr, 0, info.width);
return bitmap;
}
}
diff --git a/tools/setup-buildbot-device.sh b/tools/setup-buildbot-device.sh
index 6c2c072..546a6bf 100755
--- a/tools/setup-buildbot-device.sh
+++ b/tools/setup-buildbot-device.sh
@@ -38,6 +38,11 @@
seconds_per_hour=3600
+# Kill logd first, so that when we set the adb buffer size later in this file,
+# it is brought up again.
+echo -e "${green}Killing logd, seen leaking on fugu/N${nc}"
+adb shell killall -9 /system/bin/logd
+
# Update date on device if the difference with host is more than one hour.
if [ $abs_time_difference_in_seconds -gt $seconds_per_hour ]; then
echo -e "${green}Update date on device${nc}"
@@ -61,9 +66,6 @@
echo -e "${green}Battery info${nc}"
adb shell dumpsys battery
-echo -e "${green}Killing logd, seen leaking on fugu/N${nc}"
-adb shell killall -9 /system/bin/logd
-
echo -e "${green}Setting adb buffer size to 32MB${nc}"
adb logcat -G 32M
adb logcat -g