diff options
| -rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 2 | ||||
| -rw-r--r-- | compiler/optimizing/common_arm64.h | 3 | ||||
| -rw-r--r-- | runtime/arch/arm64/instruction_set_features_arm64.h | 11 | ||||
| -rw-r--r-- | runtime/arch/arm64/instruction_set_features_arm64_test.cc | 2 |
4 files changed, 9 insertions, 9 deletions
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 33eacbaf08..0fa4fa4256 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -1380,7 +1380,7 @@ void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::RequiresRegister()); + locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction)); if (instruction->HasUses()) { locations->SetOut(Location::SameAsFirstInput()); } diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h index 966165bf4c..53f1f3c45c 100644 --- a/compiler/optimizing/common_arm64.h +++ b/compiler/optimizing/common_arm64.h @@ -194,7 +194,8 @@ static bool CanEncodeConstantAsImmediate(HConstant* constant, HInstruction* inst int64_t value = CodeGenerator::GetInt64ValueOf(constant); - if (instr->IsAdd() || instr->IsSub() || instr->IsCondition() || instr->IsCompare()) { + if (instr->IsAdd() || instr->IsSub() || instr->IsCondition() || + instr->IsCompare() || instr->IsBoundsCheck()) { // Uses aliases of ADD/SUB instructions. return vixl::Assembler::IsImmAddSub(value); } else if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) { diff --git a/runtime/arch/arm64/instruction_set_features_arm64.h b/runtime/arch/arm64/instruction_set_features_arm64.h index 3b3e2c95fe..e59ff58954 100644 --- a/runtime/arch/arm64/instruction_set_features_arm64.h +++ b/runtime/arch/arm64/instruction_set_features_arm64.h @@ -66,13 +66,12 @@ class Arm64InstructionSetFeatures FINAL : public InstructionSetFeatures { return fix_cortex_a53_843419_; } - // TODO: Tune this on a per CPU basis. For now, we pessimistically assume - // that all ARM64 CPUs prefer explicit memory barriers over acquire-release. - // - // NOTE: This should not be the case! However we want to exercise the - // explicit memory barriers code paths in the Optimizing Compiler. + // NOTE: This flag can be tunned on a CPU basis. In general all ARMv8 CPUs + // should prefer the Acquire-Release semantics over the explicit DMBs when + // handling load/store-volatile. For a specific use case see the ARM64 + // Optimizing backend. bool PreferAcquireRelease() const { - return false; + return true; } virtual ~Arm64InstructionSetFeatures() {} diff --git a/runtime/arch/arm64/instruction_set_features_arm64_test.cc b/runtime/arch/arm64/instruction_set_features_arm64_test.cc index 753107baa8..599f24ed30 100644 --- a/runtime/arch/arm64/instruction_set_features_arm64_test.cc +++ b/runtime/arch/arm64/instruction_set_features_arm64_test.cc @@ -31,7 +31,7 @@ TEST(Arm64InstructionSetFeaturesTest, Arm64Features) { EXPECT_STREQ("smp,a53", arm64_features->GetFeatureString().c_str()); EXPECT_EQ(arm64_features->AsBitmap(), 3U); // See the comments in instruction_set_features_arm64.h. - EXPECT_FALSE(arm64_features->AsArm64InstructionSetFeatures()->PreferAcquireRelease()); + EXPECT_TRUE(arm64_features->AsArm64InstructionSetFeatures()->PreferAcquireRelease()); } } // namespace art |