summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/Android.bp4
-rw-r--r--compiler/dex/quick_compiler_callbacks.h15
-rw-r--r--compiler/dex/verified_method.cc4
-rw-r--r--compiler/oat_writer.cc15
-rw-r--r--compiler/optimizing/code_generator.cc60
-rw-r--r--compiler/optimizing/code_generator.h4
-rw-r--r--compiler/optimizing/code_generator_arm.cc2
-rw-r--r--compiler/optimizing/code_generator_arm64.cc2
-rw-r--r--compiler/optimizing/code_generator_mips.cc187
-rw-r--r--compiler/optimizing/code_generator_mips.h2
-rw-r--r--compiler/optimizing/code_generator_mips64.cc2
-rw-r--r--compiler/optimizing/code_generator_x86.cc222
-rw-r--r--compiler/optimizing/code_generator_x86.h11
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc246
-rw-r--r--compiler/optimizing/code_generator_x86_64.h12
-rw-r--r--compiler/optimizing/induction_var_analysis.cc8
-rw-r--r--compiler/optimizing/induction_var_analysis_test.cc31
-rw-r--r--compiler/optimizing/induction_var_range.cc30
-rw-r--r--compiler/optimizing/instruction_simplifier_shared.cc3
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc6
-rw-r--r--compiler/optimizing/nodes.h83
-rw-r--r--compiler/optimizing/nodes_shared.h12
-rw-r--r--compiler/optimizing/sharpening.cc38
-rw-r--r--compiler/utils/mips/assembler_mips.h83
-rw-r--r--compiler/utils/mips/assembler_mips_test.cc79
25 files changed, 562 insertions, 599 deletions
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 4af43ccdc1..8a2c94a90a 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -293,7 +293,7 @@ art_cc_library {
art_cc_test {
name: "art_compiler_tests",
defaults: [
- "art_test_defaults",
+ "art_gtest_defaults",
],
srcs: [
"compiled_method_test.cc",
@@ -392,7 +392,7 @@ art_cc_test {
name: "art_compiler_host_tests",
device_supported: false,
defaults: [
- "art_test_defaults",
+ "art_gtest_defaults",
],
codegen: {
arm: {
diff --git a/compiler/dex/quick_compiler_callbacks.h b/compiler/dex/quick_compiler_callbacks.h
index 1f696863b6..824194c7bd 100644
--- a/compiler/dex/quick_compiler_callbacks.h
+++ b/compiler/dex/quick_compiler_callbacks.h
@@ -29,8 +29,10 @@ class QuickCompilerCallbacks FINAL : public CompilerCallbacks {
QuickCompilerCallbacks(VerificationResults* verification_results,
DexFileToMethodInlinerMap* method_inliner_map,
CompilerCallbacks::CallbackMode mode)
- : CompilerCallbacks(mode), verification_results_(verification_results),
- method_inliner_map_(method_inliner_map) {
+ : CompilerCallbacks(mode),
+ verification_results_(verification_results),
+ method_inliner_map_(method_inliner_map),
+ verifier_deps_(nullptr) {
CHECK(verification_results != nullptr);
CHECK(method_inliner_map != nullptr);
}
@@ -47,9 +49,18 @@ class QuickCompilerCallbacks FINAL : public CompilerCallbacks {
return true;
}
+ verifier::VerifierDeps* GetVerifierDeps() const OVERRIDE {
+ return verifier_deps_;
+ }
+
+ void SetVerifierDeps(verifier::VerifierDeps* deps) {
+ verifier_deps_ = deps;
+ }
+
private:
VerificationResults* const verification_results_;
DexFileToMethodInlinerMap* const method_inliner_map_;
+ verifier::VerifierDeps* verifier_deps_;
};
} // namespace art
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 4bcd59ac90..e19fb7b300 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -231,7 +231,7 @@ void VerifiedMethod::GenerateSafeCastSet(verifier::MethodVerifier* method_verifi
inst->VRegA_21c()));
const verifier::RegType& cast_type =
method_verifier->ResolveCheckedClass(inst->VRegB_21c());
- is_safe_cast = cast_type.IsStrictlyAssignableFrom(reg_type);
+ is_safe_cast = cast_type.IsStrictlyAssignableFrom(reg_type, method_verifier);
} else {
const verifier::RegType& array_type(line->GetRegisterType(method_verifier,
inst->VRegB_23x()));
@@ -243,7 +243,7 @@ void VerifiedMethod::GenerateSafeCastSet(verifier::MethodVerifier* method_verifi
inst->VRegA_23x()));
const verifier::RegType& component_type = method_verifier->GetRegTypeCache()
->GetComponentType(array_type, method_verifier->GetClassLoader());
- is_safe_cast = component_type.IsStrictlyAssignableFrom(value_type);
+ is_safe_cast = component_type.IsStrictlyAssignableFrom(value_type, method_verifier);
}
}
if (is_safe_cast) {
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 5e0c64ba8b..d629c0c887 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -259,7 +259,16 @@ class OatWriter::OatDexFile {
// Data to write to a separate section.
dchecked_vector<uint32_t> class_offsets_;
+ void InitTypeLookupTable(const DexFile& dex_file, uint8_t* storage) const {
+ lookup_table_.reset(TypeLookupTable::Create(dex_file, storage));
+ }
+
+ TypeLookupTable* GetTypeLookupTable() const {
+ return lookup_table_.get();
+ }
+
private:
+ mutable std::unique_ptr<TypeLookupTable> lookup_table_;
size_t GetClassOffsetsRawSize() const {
return class_offsets_.size() * sizeof(class_offsets_[0]);
}
@@ -2285,9 +2294,9 @@ bool OatWriter::WriteTypeLookupTables(
}
// Create the lookup table. When `nullptr` is given as the storage buffer,
- // TypeLookupTable allocates its own and DexFile takes ownership.
- opened_dex_files[i]->CreateTypeLookupTable(/* storage */ nullptr);
- TypeLookupTable* table = opened_dex_files[i]->GetTypeLookupTable();
+ // TypeLookupTable allocates its own and OatDexFile takes ownership.
+ oat_dex_file->InitTypeLookupTable(*opened_dex_files[i], /* storage */ nullptr);
+ TypeLookupTable* table = oat_dex_file->GetTypeLookupTable();
// Type tables are required to be 4 byte aligned.
size_t initial_offset = oat_size_;
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 6732670ffc..51ba187c1b 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1179,37 +1179,51 @@ void CodeGenerator::EmitParallelMoves(Location from1,
GetMoveResolver()->EmitNativeCode(&parallel_move);
}
-void CodeGenerator::ValidateInvokeRuntime(HInstruction* instruction, SlowPathCode* slow_path) {
+void CodeGenerator::ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,
+ HInstruction* instruction,
+ SlowPathCode* slow_path) {
// Ensure that the call kind indication given to the register allocator is
- // coherent with the runtime call generated, and that the GC side effect is
- // set when required.
+ // coherent with the runtime call generated.
if (slow_path == nullptr) {
DCHECK(instruction->GetLocations()->WillCall())
<< "instruction->DebugName()=" << instruction->DebugName();
- DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()))
- << "instruction->DebugName()=" << instruction->DebugName()
- << " instruction->GetSideEffects().ToString()=" << instruction->GetSideEffects().ToString();
} else {
DCHECK(instruction->GetLocations()->CallsOnSlowPath() || slow_path->IsFatal())
<< "instruction->DebugName()=" << instruction->DebugName()
<< " slow_path->GetDescription()=" << slow_path->GetDescription();
- DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) ||
- // When (non-Baker) read barriers are enabled, some instructions
- // use a slow path to emit a read barrier, which does not trigger
- // GC.
- (kEmitCompilerReadBarrier &&
- !kUseBakerReadBarrier &&
- (instruction->IsInstanceFieldGet() ||
- instruction->IsStaticFieldGet() ||
- instruction->IsArrayGet() ||
- instruction->IsLoadClass() ||
- instruction->IsLoadString() ||
- instruction->IsInstanceOf() ||
- instruction->IsCheckCast() ||
- (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified()))))
- << "instruction->DebugName()=" << instruction->DebugName()
- << " instruction->GetSideEffects().ToString()=" << instruction->GetSideEffects().ToString()
- << " slow_path->GetDescription()=" << slow_path->GetDescription();
+ }
+
+ // Check that the GC side effect is set when required.
+ // TODO: Reverse EntrypointCanTriggerGC
+ if (EntrypointCanTriggerGC(entrypoint)) {
+ if (slow_path == nullptr) {
+ DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()))
+ << "instruction->DebugName()=" << instruction->DebugName()
+ << " instruction->GetSideEffects().ToString()="
+ << instruction->GetSideEffects().ToString();
+ } else {
+ DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) ||
+ // When (non-Baker) read barriers are enabled, some instructions
+ // use a slow path to emit a read barrier, which does not trigger
+ // GC.
+ (kEmitCompilerReadBarrier &&
+ !kUseBakerReadBarrier &&
+ (instruction->IsInstanceFieldGet() ||
+ instruction->IsStaticFieldGet() ||
+ instruction->IsArrayGet() ||
+ instruction->IsLoadClass() ||
+ instruction->IsLoadString() ||
+ instruction->IsInstanceOf() ||
+ instruction->IsCheckCast() ||
+ (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified()))))
+ << "instruction->DebugName()=" << instruction->DebugName()
+ << " instruction->GetSideEffects().ToString()="
+ << instruction->GetSideEffects().ToString()
+ << " slow_path->GetDescription()=" << slow_path->GetDescription();
+ }
+ } else {
+ // The GC side effect is not required for the instruction. But the instruction might still have
+ // it, for example if it calls other entrypoints requiring it.
}
// Check the coherency of leaf information.
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index b4d4b9b760..22b5c9cff4 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -404,7 +404,9 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
// Perfoms checks pertaining to an InvokeRuntime call.
- void ValidateInvokeRuntime(HInstruction* instruction, SlowPathCode* slow_path);
+ void ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,
+ HInstruction* instruction,
+ SlowPathCode* slow_path);
// Perfoms checks pertaining to an InvokeRuntimeWithoutRecordingPcInfo call.
static void ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction* instruction,
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 40c2b9c1ec..8b2d6fd838 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1177,7 +1177,7 @@ void CodeGeneratorARM::InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- ValidateInvokeRuntime(instruction, slow_path);
+ ValidateInvokeRuntime(entrypoint, instruction, slow_path);
GenerateInvokeRuntime(GetThreadOffset<kArmPointerSize>(entrypoint).Int32Value());
if (EntrypointRequiresStackMap(entrypoint)) {
RecordPcInfo(instruction, dex_pc, slow_path);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 599185acd3..7f542da047 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1452,7 +1452,7 @@ void CodeGeneratorARM64::InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- ValidateInvokeRuntime(instruction, slow_path);
+ ValidateInvokeRuntime(entrypoint, instruction, slow_path);
GenerateInvokeRuntime(GetThreadOffset<kArm64PointerSize>(entrypoint).Int32Value());
if (EntrypointRequiresStackMap(entrypoint)) {
RecordPcInfo(instruction, dex_pc, slow_path);
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index b767aa5ef2..b2e75952a0 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -905,7 +905,7 @@ void CodeGeneratorMIPS::MoveConstant(Location destination, HConstant* c) {
} else {
DCHECK(destination.IsStackSlot())
<< "Cannot move " << c->DebugName() << " to " << destination;
- __ StoreConst32ToOffset(value, SP, destination.GetStackIndex(), TMP);
+ __ StoreConstToOffset(kStoreWord, value, SP, destination.GetStackIndex(), TMP);
}
} else if (c->IsLongConstant()) {
// Move 64 bit constant.
@@ -917,7 +917,7 @@ void CodeGeneratorMIPS::MoveConstant(Location destination, HConstant* c) {
} else {
DCHECK(destination.IsDoubleStackSlot())
<< "Cannot move " << c->DebugName() << " to " << destination;
- __ StoreConst64ToOffset(value, SP, destination.GetStackIndex(), TMP);
+ __ StoreConstToOffset(kStoreDoubleword, value, SP, destination.GetStackIndex(), TMP);
}
} else if (c->IsFloatConstant()) {
// Move 32 bit float constant.
@@ -927,7 +927,7 @@ void CodeGeneratorMIPS::MoveConstant(Location destination, HConstant* c) {
} else {
DCHECK(destination.IsStackSlot())
<< "Cannot move " << c->DebugName() << " to " << destination;
- __ StoreConst32ToOffset(value, SP, destination.GetStackIndex(), TMP);
+ __ StoreConstToOffset(kStoreWord, value, SP, destination.GetStackIndex(), TMP);
}
} else {
// Move 64 bit double constant.
@@ -939,7 +939,7 @@ void CodeGeneratorMIPS::MoveConstant(Location destination, HConstant* c) {
} else {
DCHECK(destination.IsDoubleStackSlot())
<< "Cannot move " << c->DebugName() << " to " << destination;
- __ StoreConst64ToOffset(value, SP, destination.GetStackIndex(), TMP);
+ __ StoreConstToOffset(kStoreDoubleword, value, SP, destination.GetStackIndex(), TMP);
}
}
}
@@ -1224,7 +1224,7 @@ void CodeGeneratorMIPS::InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- ValidateInvokeRuntime(instruction, slow_path);
+ ValidateInvokeRuntime(entrypoint, instruction, slow_path);
bool reordering = __ SetReorder(false);
__ LoadFromOffset(kLoadWord, T9, TR, GetThreadOffset<kMipsPointerSize>(entrypoint).Int32Value());
__ Jalr(T9);
@@ -1960,6 +1960,25 @@ void InstructionCodeGeneratorMIPS::VisitArrayLength(HArrayLength* instruction) {
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
+Location LocationsBuilderMIPS::RegisterOrZeroConstant(HInstruction* instruction) {
+ return (instruction->IsConstant() && instruction->AsConstant()->IsZeroBitPattern())
+ ? Location::ConstantLocation(instruction->AsConstant())
+ : Location::RequiresRegister();
+}
+
+Location LocationsBuilderMIPS::FpuRegisterOrConstantForStore(HInstruction* instruction) {
+ // We can store 0.0 directly (from the ZERO register) without loading it into an FPU register.
+ // We can store a non-zero float or double constant without first loading it into the FPU,
+ // but we should only prefer this if the constant has a single use.
+ if (instruction->IsConstant() &&
+ (instruction->AsConstant()->IsZeroBitPattern() ||
+ instruction->GetUses().HasExactlyOneElement())) {
+ return Location::ConstantLocation(instruction->AsConstant());
+ // Otherwise fall through and require an FPU register for the constant.
+ }
+ return Location::RequiresFpuRegister();
+}
+
void LocationsBuilderMIPS::VisitArraySet(HArraySet* instruction) {
bool needs_runtime_call = instruction->NeedsTypeCheck();
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
@@ -1974,9 +1993,9 @@ void LocationsBuilderMIPS::VisitArraySet(HArraySet* instruction) {
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
- locations->SetInAt(2, Location::RequiresFpuRegister());
+ locations->SetInAt(2, FpuRegisterOrConstantForStore(instruction->InputAt(2)));
} else {
- locations->SetInAt(2, Location::RequiresRegister());
+ locations->SetInAt(2, RegisterOrZeroConstant(instruction->InputAt(2)));
}
}
}
@@ -1985,24 +2004,29 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
LocationSummary* locations = instruction->GetLocations();
Register obj = locations->InAt(0).AsRegister<Register>();
Location index = locations->InAt(1);
+ Location value_location = locations->InAt(2);
Primitive::Type value_type = instruction->GetComponentType();
bool needs_runtime_call = locations->WillCall();
bool needs_write_barrier =
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
auto null_checker = GetImplicitNullChecker(instruction);
+ Register base_reg = index.IsConstant() ? obj : TMP;
switch (value_type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
- Register value = locations->InAt(2).AsRegister<Register>();
if (index.IsConstant()) {
- size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
- __ StoreToOffset(kStoreByte, value, obj, offset, null_checker);
+ data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1;
} else {
- __ Addu(TMP, obj, index.AsRegister<Register>());
- __ StoreToOffset(kStoreByte, value, TMP, data_offset, null_checker);
+ __ Addu(base_reg, obj, index.AsRegister<Register>());
+ }
+ if (value_location.IsConstant()) {
+ int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
+ __ StoreConstToOffset(kStoreByte, value, base_reg, data_offset, TMP, null_checker);
+ } else {
+ Register value = value_location.AsRegister<Register>();
+ __ StoreToOffset(kStoreByte, value, base_reg, data_offset, null_checker);
}
break;
}
@@ -2010,15 +2034,18 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
- Register value = locations->InAt(2).AsRegister<Register>();
if (index.IsConstant()) {
- size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
- __ StoreToOffset(kStoreHalfword, value, obj, offset, null_checker);
+ data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2;
} else {
- __ Sll(TMP, index.AsRegister<Register>(), TIMES_2);
- __ Addu(TMP, obj, TMP);
- __ StoreToOffset(kStoreHalfword, value, TMP, data_offset, null_checker);
+ __ Sll(base_reg, index.AsRegister<Register>(), TIMES_2);
+ __ Addu(base_reg, obj, base_reg);
+ }
+ if (value_location.IsConstant()) {
+ int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
+ __ StoreConstToOffset(kStoreHalfword, value, base_reg, data_offset, TMP, null_checker);
+ } else {
+ Register value = value_location.AsRegister<Register>();
+ __ StoreToOffset(kStoreHalfword, value, base_reg, data_offset, null_checker);
}
break;
}
@@ -2027,20 +2054,23 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimNot: {
if (!needs_runtime_call) {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- Register value = locations->InAt(2).AsRegister<Register>();
if (index.IsConstant()) {
- size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- __ StoreToOffset(kStoreWord, value, obj, offset, null_checker);
+ data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
} else {
- DCHECK(index.IsRegister()) << index;
- __ Sll(TMP, index.AsRegister<Register>(), TIMES_4);
- __ Addu(TMP, obj, TMP);
- __ StoreToOffset(kStoreWord, value, TMP, data_offset, null_checker);
+ __ Sll(base_reg, index.AsRegister<Register>(), TIMES_4);
+ __ Addu(base_reg, obj, base_reg);
}
- if (needs_write_barrier) {
- DCHECK_EQ(value_type, Primitive::kPrimNot);
- codegen_->MarkGCCard(obj, value);
+ if (value_location.IsConstant()) {
+ int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
+ __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
+ DCHECK(!needs_write_barrier);
+ } else {
+ Register value = value_location.AsRegister<Register>();
+ __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
+ if (needs_write_barrier) {
+ DCHECK_EQ(value_type, Primitive::kPrimNot);
+ codegen_->MarkGCCard(obj, value);
+ }
}
} else {
DCHECK_EQ(value_type, Primitive::kPrimNot);
@@ -2052,47 +2082,54 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimLong: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
- Register value = locations->InAt(2).AsRegisterPairLow<Register>();
if (index.IsConstant()) {
- size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- __ StoreToOffset(kStoreDoubleword, value, obj, offset, null_checker);
+ data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8;
} else {
- __ Sll(TMP, index.AsRegister<Register>(), TIMES_8);
- __ Addu(TMP, obj, TMP);
- __ StoreToOffset(kStoreDoubleword, value, TMP, data_offset, null_checker);
+ __ Sll(base_reg, index.AsRegister<Register>(), TIMES_8);
+ __ Addu(base_reg, obj, base_reg);
+ }
+ if (value_location.IsConstant()) {
+ int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
+ __ StoreConstToOffset(kStoreDoubleword, value, base_reg, data_offset, TMP, null_checker);
+ } else {
+ Register value = value_location.AsRegisterPairLow<Register>();
+ __ StoreToOffset(kStoreDoubleword, value, base_reg, data_offset, null_checker);
}
break;
}
case Primitive::kPrimFloat: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
- FRegister value = locations->InAt(2).AsFpuRegister<FRegister>();
- DCHECK(locations->InAt(2).IsFpuRegister());
if (index.IsConstant()) {
- size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- __ StoreSToOffset(value, obj, offset, null_checker);
+ data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
} else {
- __ Sll(TMP, index.AsRegister<Register>(), TIMES_4);
- __ Addu(TMP, obj, TMP);
- __ StoreSToOffset(value, TMP, data_offset, null_checker);
+ __ Sll(base_reg, index.AsRegister<Register>(), TIMES_4);
+ __ Addu(base_reg, obj, base_reg);
+ }
+ if (value_location.IsConstant()) {
+ int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
+ __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
+ } else {
+ FRegister value = value_location.AsFpuRegister<FRegister>();
+ __ StoreSToOffset(value, base_reg, data_offset, null_checker);
}
break;
}
case Primitive::kPrimDouble: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
- FRegister value = locations->InAt(2).AsFpuRegister<FRegister>();
- DCHECK(locations->InAt(2).IsFpuRegister());
if (index.IsConstant()) {
- size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- __ StoreDToOffset(value, obj, offset, null_checker);
+ data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8;
} else {
- __ Sll(TMP, index.AsRegister<Register>(), TIMES_8);
- __ Addu(TMP, obj, TMP);
- __ StoreDToOffset(value, TMP, data_offset, null_checker);
+ __ Sll(base_reg, index.AsRegister<Register>(), TIMES_8);
+ __ Addu(base_reg, obj, base_reg);
+ }
+ if (value_location.IsConstant()) {
+ int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
+ __ StoreConstToOffset(kStoreDoubleword, value, base_reg, data_offset, TMP, null_checker);
+ } else {
+ FRegister value = value_location.AsFpuRegister<FRegister>();
+ __ StoreDToOffset(value, base_reg, data_offset, null_checker);
}
break;
}
@@ -3888,9 +3925,9 @@ void LocationsBuilderMIPS::HandleFieldSet(HInstruction* instruction, const Field
}
} else {
if (Primitive::IsFloatingPointType(field_type)) {
- locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetInAt(1, FpuRegisterOrConstantForStore(instruction->InputAt(1)));
} else {
- locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, RegisterOrZeroConstant(instruction->InputAt(1)));
}
}
}
@@ -3901,6 +3938,7 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
Primitive::Type type = field_info.GetFieldType();
LocationSummary* locations = instruction->GetLocations();
Register obj = locations->InAt(0).AsRegister<Register>();
+ Location value_location = locations->InAt(1);
StoreOperandType store_type = kStoreByte;
bool is_volatile = field_info.IsVolatile();
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
@@ -3941,24 +3979,24 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
if (type == Primitive::kPrimDouble) {
// Pass FP parameters in core registers.
- Location in = locations->InAt(1);
- if (in.IsFpuRegister()) {
- __ Mfc1(locations->GetTemp(1).AsRegister<Register>(), in.AsFpuRegister<FRegister>());
+ if (value_location.IsFpuRegister()) {
+ __ Mfc1(locations->GetTemp(1).AsRegister<Register>(),
+ value_location.AsFpuRegister<FRegister>());
__ MoveFromFpuHigh(locations->GetTemp(2).AsRegister<Register>(),
- in.AsFpuRegister<FRegister>());
- } else if (in.IsDoubleStackSlot()) {
+ value_location.AsFpuRegister<FRegister>());
+ } else if (value_location.IsDoubleStackSlot()) {
__ LoadFromOffset(kLoadWord,
locations->GetTemp(1).AsRegister<Register>(),
SP,
- in.GetStackIndex());
+ value_location.GetStackIndex());
__ LoadFromOffset(kLoadWord,
locations->GetTemp(2).AsRegister<Register>(),
SP,
- in.GetStackIndex() + 4);
+ value_location.GetStackIndex() + 4);
} else {
- DCHECK(in.IsConstant());
- DCHECK(in.GetConstant()->IsDoubleConstant());
- int64_t value = bit_cast<int64_t, double>(in.GetConstant()->AsDoubleConstant()->GetValue());
+ DCHECK(value_location.IsConstant());
+ DCHECK(value_location.GetConstant()->IsDoubleConstant());
+ int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
__ LoadConst64(locations->GetTemp(2).AsRegister<Register>(),
locations->GetTemp(1).AsRegister<Register>(),
value);
@@ -3967,19 +4005,19 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
codegen_->InvokeRuntime(kQuickA64Store, instruction, dex_pc);
CheckEntrypointTypes<kQuickA64Store, void, volatile int64_t *, int64_t>();
} else {
- if (!Primitive::IsFloatingPointType(type)) {
+ if (value_location.IsConstant()) {
+ int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
+ __ StoreConstToOffset(store_type, value, obj, offset, TMP, null_checker);
+ } else if (!Primitive::IsFloatingPointType(type)) {
Register src;
if (type == Primitive::kPrimLong) {
- DCHECK(locations->InAt(1).IsRegisterPair());
- src = locations->InAt(1).AsRegisterPairLow<Register>();
+ src = value_location.AsRegisterPairLow<Register>();
} else {
- DCHECK(locations->InAt(1).IsRegister());
- src = locations->InAt(1).AsRegister<Register>();
+ src = value_location.AsRegister<Register>();
}
__ StoreToOffset(store_type, src, obj, offset, null_checker);
} else {
- DCHECK(locations->InAt(1).IsFpuRegister());
- FRegister src = locations->InAt(1).AsFpuRegister<FRegister>();
+ FRegister src = value_location.AsFpuRegister<FRegister>();
if (type == Primitive::kPrimFloat) {
__ StoreSToOffset(src, obj, offset, null_checker);
} else {
@@ -3990,8 +4028,7 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
// TODO: memory barriers?
if (CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1))) {
- DCHECK(locations->InAt(1).IsRegister());
- Register src = locations->InAt(1).AsRegister<Register>();
+ Register src = value_location.AsRegister<Register>();
codegen_->MarkGCCard(obj, src);
}
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index a42374f146..553a7e6674 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -191,6 +191,8 @@ class LocationsBuilderMIPS : public HGraphVisitor {
void HandleShift(HBinaryOperation* operation);
void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
+ Location RegisterOrZeroConstant(HInstruction* instruction);
+ Location FpuRegisterOrConstantForStore(HInstruction* instruction);
InvokeDexCallingConventionVisitorMIPS parameter_visitor_;
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 4d87523206..557e5da916 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -946,7 +946,7 @@ void CodeGeneratorMIPS64::InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- ValidateInvokeRuntime(instruction, slow_path);
+ ValidateInvokeRuntime(entrypoint, instruction, slow_path);
// TODO: anything related to T9/GP/GOT/PIC/.so's?
__ LoadFromOffset(kLoadDoubleword,
T9,
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 28db29cb58..172ce4ab12 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -754,7 +754,7 @@ void CodeGeneratorX86::InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- ValidateInvokeRuntime(instruction, slow_path);
+ ValidateInvokeRuntime(entrypoint, instruction, slow_path);
GenerateInvokeRuntime(GetThreadOffset<kX86PointerSize>(entrypoint).Int32Value());
if (EntrypointRequiresStackMap(entrypoint)) {
RecordPcInfo(instruction, dex_pc, slow_path);
@@ -1069,15 +1069,11 @@ void CodeGeneratorX86::Move64(Location destination, Location source) {
__ movsd(Address(ESP, destination.GetStackIndex()), source.AsFpuRegister<XmmRegister>());
} else if (source.IsConstant()) {
HConstant* constant = source.GetConstant();
- int64_t value;
- if (constant->IsLongConstant()) {
- value = constant->AsLongConstant()->GetValue();
- } else {
- DCHECK(constant->IsDoubleConstant());
- value = bit_cast<int64_t, double>(constant->AsDoubleConstant()->GetValue());
- }
+ DCHECK(constant->IsLongConstant() || constant->IsDoubleConstant());
+ int64_t value = GetInt64ValueOf(constant);
__ movl(Address(ESP, destination.GetStackIndex()), Immediate(Low32Bits(value)));
- __ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)), Immediate(High32Bits(value)));
+ __ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)),
+ Immediate(High32Bits(value)));
} else {
DCHECK(source.IsDoubleStackSlot()) << source;
EmitParallelMoves(
@@ -1427,14 +1423,7 @@ void InstructionCodeGeneratorX86::GenerateTestAndBranch(HInstruction* instructio
Location lhs = condition->GetLocations()->InAt(0);
Location rhs = condition->GetLocations()->InAt(1);
// LHS is guaranteed to be in a register (see LocationsBuilderX86::HandleCondition).
- if (rhs.IsRegister()) {
- __ cmpl(lhs.AsRegister<Register>(), rhs.AsRegister<Register>());
- } else if (rhs.IsConstant()) {
- int32_t constant = CodeGenerator::GetInt32ValueOf(rhs.GetConstant());
- codegen_->Compare32BitValue(lhs.AsRegister<Register>(), constant);
- } else {
- __ cmpl(lhs.AsRegister<Register>(), Address(ESP, rhs.GetStackIndex()));
- }
+ codegen_->GenerateIntCompare(lhs, rhs);
if (true_target == nullptr) {
__ j(X86Condition(condition->GetOppositeCondition()), false_target);
} else {
@@ -1528,18 +1517,6 @@ void LocationsBuilderX86::VisitSelect(HSelect* select) {
locations->SetOut(Location::SameAsFirstInput());
}
-void CodeGeneratorX86::GenerateIntCompare(Location lhs, Location rhs) {
- Register lhs_reg = lhs.AsRegister<Register>();
- if (rhs.IsConstant()) {
- int32_t value = CodeGenerator::GetInt32ValueOf(rhs.GetConstant());
- Compare32BitValue(lhs_reg, value);
- } else if (rhs.IsStackSlot()) {
- assembler_.cmpl(lhs_reg, Address(ESP, rhs.GetStackIndex()));
- } else {
- assembler_.cmpl(lhs_reg, rhs.AsRegister<Register>());
- }
-}
-
void InstructionCodeGeneratorX86::VisitSelect(HSelect* select) {
LocationSummary* locations = select->GetLocations();
DCHECK(locations->InAt(0).Equals(locations->Out()));
@@ -3621,7 +3598,7 @@ void InstructionCodeGeneratorX86::VisitDivZeroCheck(HDivZeroCheck* instruction)
} else {
DCHECK(value.IsConstant()) << value;
if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
- __ jmp(slow_path->GetEntryLabel());
+ __ jmp(slow_path->GetEntryLabel());
}
}
break;
@@ -5033,56 +5010,31 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
switch (type) {
case Primitive::kPrimBoolean: {
Register out = out_loc.AsRegister<Register>();
- if (index.IsConstant()) {
- __ movzxb(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
- } else {
- __ movzxb(out, Address(obj, index.AsRegister<Register>(), TIMES_1, data_offset));
- }
+ __ movzxb(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_1, data_offset));
break;
}
case Primitive::kPrimByte: {
Register out = out_loc.AsRegister<Register>();
- if (index.IsConstant()) {
- __ movsxb(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
- } else {
- __ movsxb(out, Address(obj, index.AsRegister<Register>(), TIMES_1, data_offset));
- }
+ __ movsxb(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_1, data_offset));
break;
}
case Primitive::kPrimShort: {
Register out = out_loc.AsRegister<Register>();
- if (index.IsConstant()) {
- __ movsxw(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
- } else {
- __ movsxw(out, Address(obj, index.AsRegister<Register>(), TIMES_2, data_offset));
- }
+ __ movsxw(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_2, data_offset));
break;
}
case Primitive::kPrimChar: {
Register out = out_loc.AsRegister<Register>();
- if (index.IsConstant()) {
- __ movzxw(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
- } else {
- __ movzxw(out, Address(obj, index.AsRegister<Register>(), TIMES_2, data_offset));
- }
+ __ movzxw(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_2, data_offset));
break;
}
case Primitive::kPrimInt: {
Register out = out_loc.AsRegister<Register>();
- if (index.IsConstant()) {
- __ movl(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset));
- } else {
- __ movl(out, Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset));
- }
+ __ movl(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_4, data_offset));
break;
}
@@ -5099,21 +5051,16 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
instruction, out_loc, obj, data_offset, index, /* needs_null_check */ true);
} else {
Register out = out_loc.AsRegister<Register>();
+ __ movl(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_4, data_offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ // If read barriers are enabled, emit read barriers other than
+ // Baker's using a slow path (and also unpoison the loaded
+ // reference, if heap poisoning is enabled).
if (index.IsConstant()) {
uint32_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- __ movl(out, Address(obj, offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- // If read barriers are enabled, emit read barriers other than
- // Baker's using a slow path (and also unpoison the loaded
- // reference, if heap poisoning is enabled).
codegen_->MaybeGenerateReadBarrierSlow(instruction, out_loc, out_loc, obj_loc, offset);
} else {
- __ movl(out, Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- // If read barriers are enabled, emit read barriers other than
- // Baker's using a slow path (and also unpoison the loaded
- // reference, if heap poisoning is enabled).
codegen_->MaybeGenerateReadBarrierSlow(
instruction, out_loc, out_loc, obj_loc, data_offset, index);
}
@@ -5123,40 +5070,23 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimLong: {
DCHECK_NE(obj, out_loc.AsRegisterPairLow<Register>());
- if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- __ movl(out_loc.AsRegisterPairLow<Register>(), Address(obj, offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- __ movl(out_loc.AsRegisterPairHigh<Register>(), Address(obj, offset + kX86WordSize));
- } else {
- __ movl(out_loc.AsRegisterPairLow<Register>(),
- Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- __ movl(out_loc.AsRegisterPairHigh<Register>(),
- Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize));
- }
+ __ movl(out_loc.AsRegisterPairLow<Register>(),
+ CodeGeneratorX86::ArrayAddress(obj, index, TIMES_8, data_offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ movl(out_loc.AsRegisterPairHigh<Register>(),
+ CodeGeneratorX86::ArrayAddress(obj, index, TIMES_8, data_offset + kX86WordSize));
break;
}
case Primitive::kPrimFloat: {
XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
- if (index.IsConstant()) {
- __ movss(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset));
- } else {
- __ movss(out, Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset));
- }
+ __ movss(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_4, data_offset));
break;
}
case Primitive::kPrimDouble: {
XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
- if (index.IsConstant()) {
- __ movsd(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset));
- } else {
- __ movsd(out, Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset));
- }
+ __ movsd(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_8, data_offset));
break;
}
@@ -5227,9 +5157,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte: {
uint32_t offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
- Address address = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + offset)
- : Address(array, index.AsRegister<Register>(), TIMES_1, offset);
+ Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_1, offset);
if (value.IsRegister()) {
__ movb(address, value.AsRegister<ByteRegister>());
} else {
@@ -5242,9 +5170,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
uint32_t offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
- Address address = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + offset)
- : Address(array, index.AsRegister<Register>(), TIMES_2, offset);
+ Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_2, offset);
if (value.IsRegister()) {
__ movw(address, value.AsRegister<Register>());
} else {
@@ -5256,9 +5182,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimNot: {
uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- Address address = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
- : Address(array, index.AsRegister<Register>(), TIMES_4, offset);
+ Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_4, offset);
if (!value.IsRegister()) {
// Just setting null.
@@ -5354,9 +5278,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimInt: {
uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- Address address = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
- : Address(array, index.AsRegister<Register>(), TIMES_4, offset);
+ Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_4, offset);
if (value.IsRegister()) {
__ movl(address, value.AsRegister<Register>());
} else {
@@ -5370,44 +5292,27 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimLong: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
- if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- if (value.IsRegisterPair()) {
- __ movl(Address(array, offset), value.AsRegisterPairLow<Register>());
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- __ movl(Address(array, offset + kX86WordSize), value.AsRegisterPairHigh<Register>());
- } else {
- DCHECK(value.IsConstant());
- int64_t val = value.GetConstant()->AsLongConstant()->GetValue();
- __ movl(Address(array, offset), Immediate(Low32Bits(val)));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- __ movl(Address(array, offset + kX86WordSize), Immediate(High32Bits(val)));
- }
+ if (value.IsRegisterPair()) {
+ __ movl(CodeGeneratorX86::ArrayAddress(array, index, TIMES_8, data_offset),
+ value.AsRegisterPairLow<Register>());
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ movl(CodeGeneratorX86::ArrayAddress(array, index, TIMES_8, data_offset + kX86WordSize),
+ value.AsRegisterPairHigh<Register>());
} else {
- if (value.IsRegisterPair()) {
- __ movl(Address(array, index.AsRegister<Register>(), TIMES_8, data_offset),
- value.AsRegisterPairLow<Register>());
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- __ movl(Address(array, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize),
- value.AsRegisterPairHigh<Register>());
- } else {
- DCHECK(value.IsConstant());
- int64_t val = value.GetConstant()->AsLongConstant()->GetValue();
- __ movl(Address(array, index.AsRegister<Register>(), TIMES_8, data_offset),
- Immediate(Low32Bits(val)));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- __ movl(Address(array, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize),
- Immediate(High32Bits(val)));
- }
+ DCHECK(value.IsConstant());
+ int64_t val = value.GetConstant()->AsLongConstant()->GetValue();
+ __ movl(CodeGeneratorX86::ArrayAddress(array, index, TIMES_8, data_offset),
+ Immediate(Low32Bits(val)));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ movl(CodeGeneratorX86::ArrayAddress(array, index, TIMES_8, data_offset + kX86WordSize),
+ Immediate(High32Bits(val)));
}
break;
}
case Primitive::kPrimFloat: {
uint32_t offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
- Address address = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
- : Address(array, index.AsRegister<Register>(), TIMES_4, offset);
+ Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_4, offset);
if (value.IsFpuRegister()) {
__ movss(address, value.AsFpuRegister<XmmRegister>());
} else {
@@ -5421,17 +5326,13 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimDouble: {
uint32_t offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
- Address address = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + offset)
- : Address(array, index.AsRegister<Register>(), TIMES_8, offset);
+ Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_8, offset);
if (value.IsFpuRegister()) {
__ movsd(address, value.AsFpuRegister<XmmRegister>());
} else {
DCHECK(value.IsConstant());
- Address address_hi = index.IsConstant() ?
- Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) +
- offset + kX86WordSize) :
- Address(array, index.AsRegister<Register>(), TIMES_8, offset + kX86WordSize);
+ Address address_hi =
+ CodeGeneratorX86::ArrayAddress(array, index, TIMES_8, offset + kX86WordSize);
int64_t v = bit_cast<int64_t, double>(value.GetConstant()->AsDoubleConstant()->GetValue());
__ movl(address, Immediate(Low32Bits(v)));
codegen_->MaybeRecordImplicitNullCheck(instruction);
@@ -5525,13 +5426,7 @@ void InstructionCodeGeneratorX86::VisitBoundsCheck(HBoundsCheck* instruction) {
}
codegen_->MaybeRecordImplicitNullCheck(array_length);
} else {
- Register length = length_loc.AsRegister<Register>();
- if (index_loc.IsConstant()) {
- int32_t value = CodeGenerator::GetInt32ValueOf(index_loc.GetConstant());
- __ cmpl(length, Immediate(value));
- } else {
- __ cmpl(length, index_loc.AsRegister<Register>());
- }
+ codegen_->GenerateIntCompare(length_loc, index_loc);
}
codegen_->AddSlowPath(slow_path);
__ j(kBelowEqual, slow_path->GetEntryLabel());
@@ -6909,9 +6804,7 @@ void CodeGeneratorX86::GenerateArrayLoadWithBakerReadBarrier(HInstruction* instr
"art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
// /* HeapReference<Object> */ ref =
// *(obj + data_offset + index * sizeof(HeapReference<Object>))
- Address src = index.IsConstant() ?
- Address(obj, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset) :
- Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset);
+ Address src = CodeGeneratorX86::ArrayAddress(obj, index, TIMES_4, data_offset);
GenerateReferenceLoadWithBakerReadBarrier(instruction, ref, obj, src, needs_null_check);
}
@@ -7392,6 +7285,27 @@ void CodeGeneratorX86::Compare32BitValue(Register dest, int32_t value) {
}
}
+void CodeGeneratorX86::GenerateIntCompare(Location lhs, Location rhs) {
+ Register lhs_reg = lhs.AsRegister<Register>();
+ if (rhs.IsConstant()) {
+ int32_t value = CodeGenerator::GetInt32ValueOf(rhs.GetConstant());
+ Compare32BitValue(lhs_reg, value);
+ } else if (rhs.IsStackSlot()) {
+ __ cmpl(lhs_reg, Address(ESP, rhs.GetStackIndex()));
+ } else {
+ __ cmpl(lhs_reg, rhs.AsRegister<Register>());
+ }
+}
+
+Address CodeGeneratorX86::ArrayAddress(Register obj,
+ Location index,
+ ScaleFactor scale,
+ uint32_t data_offset) {
+ return index.IsConstant() ?
+ Address(obj, (index.GetConstant()->AsIntConstant()->GetValue() << scale) + data_offset) :
+ Address(obj, index.AsRegister<Register>(), scale, data_offset);
+}
+
Address CodeGeneratorX86::LiteralCaseTable(HX86PackedSwitch* switch_instr,
Register reg,
Register value) {
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 04a0a3de6a..5866e65d88 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -427,8 +427,6 @@ class CodeGeneratorX86 : public CodeGenerator {
Register value,
bool value_can_be_null);
- void GenerateIntCompare(Location lhs, Location rhs);
-
void GenerateMemoryBarrier(MemBarrierKind kind);
Label* GetLabelOf(HBasicBlock* block) const {
@@ -474,6 +472,15 @@ class CodeGeneratorX86 : public CodeGenerator {
// Compare a register with a 32-bit value in the most efficient manner.
void Compare32BitValue(Register dest, int32_t value);
+ // Compare int values. Supports only register locations for `lhs`.
+ void GenerateIntCompare(Location lhs, Location rhs);
+
+ // Construct address for array access.
+ static Address ArrayAddress(Register obj,
+ Location index,
+ ScaleFactor scale,
+ uint32_t data_offset);
+
Address LiteralCaseTable(HX86PackedSwitch* switch_instr, Register reg, Register value);
void Finalize(CodeAllocator* allocator) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 88d98fc1e1..1943ddc6f7 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -981,7 +981,7 @@ void CodeGeneratorX86_64::InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- ValidateInvokeRuntime(instruction, slow_path);
+ ValidateInvokeRuntime(entrypoint, instruction, slow_path);
GenerateInvokeRuntime(GetThreadOffset<kX86_64PointerSize>(entrypoint).Int32Value());
if (EntrypointRequiresStackMap(entrypoint)) {
RecordPcInfo(instruction, dex_pc, slow_path);
@@ -1204,13 +1204,8 @@ void CodeGeneratorX86_64::Move(Location destination, Location source) {
source.AsFpuRegister<XmmRegister>());
} else if (source.IsConstant()) {
HConstant* constant = source.GetConstant();
- int64_t value;
- if (constant->IsDoubleConstant()) {
- value = bit_cast<int64_t, double>(constant->AsDoubleConstant()->GetValue());
- } else {
- DCHECK(constant->IsLongConstant());
- value = constant->AsLongConstant()->GetValue();
- }
+ DCHECK(constant->IsLongConstant() || constant->IsDoubleConstant());
+ int64_t value = GetInt64ValueOf(constant);
Store64BitValueToStack(destination, value);
} else {
DCHECK(source.IsDoubleStackSlot());
@@ -1309,31 +1304,11 @@ void InstructionCodeGeneratorX86_64::GenerateCompareTest(HCondition* condition)
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- CpuRegister left_reg = left.AsRegister<CpuRegister>();
- if (right.IsConstant()) {
- int32_t value = CodeGenerator::GetInt32ValueOf(right.GetConstant());
- if (value == 0) {
- __ testl(left_reg, left_reg);
- } else {
- __ cmpl(left_reg, Immediate(value));
- }
- } else if (right.IsStackSlot()) {
- __ cmpl(left_reg, Address(CpuRegister(RSP), right.GetStackIndex()));
- } else {
- __ cmpl(left_reg, right.AsRegister<CpuRegister>());
- }
+ codegen_->GenerateIntCompare(left, right);
break;
}
case Primitive::kPrimLong: {
- CpuRegister left_reg = left.AsRegister<CpuRegister>();
- if (right.IsConstant()) {
- int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
- codegen_->Compare64BitValue(left_reg, value);
- } else if (right.IsDoubleStackSlot()) {
- __ cmpq(left_reg, Address(CpuRegister(RSP), right.GetStackIndex()));
- } else {
- __ cmpq(left_reg, right.AsRegister<CpuRegister>());
- }
+ codegen_->GenerateLongCompare(left, right);
break;
}
case Primitive::kPrimFloat: {
@@ -1488,15 +1463,7 @@ void InstructionCodeGeneratorX86_64::GenerateTestAndBranch(HInstruction* instruc
Location lhs = condition->GetLocations()->InAt(0);
Location rhs = condition->GetLocations()->InAt(1);
- if (rhs.IsRegister()) {
- __ cmpl(lhs.AsRegister<CpuRegister>(), rhs.AsRegister<CpuRegister>());
- } else if (rhs.IsConstant()) {
- int32_t constant = CodeGenerator::GetInt32ValueOf(rhs.GetConstant());
- codegen_->Compare32BitValue(lhs.AsRegister<CpuRegister>(), constant);
- } else {
- __ cmpl(lhs.AsRegister<CpuRegister>(),
- Address(CpuRegister(RSP), rhs.GetStackIndex()));
- }
+ codegen_->GenerateIntCompare(lhs, rhs);
if (true_target == nullptr) {
__ j(X86_64IntegerCondition(condition->GetOppositeCondition()), false_target);
} else {
@@ -1696,28 +1663,14 @@ void InstructionCodeGeneratorX86_64::HandleCondition(HCondition* cond) {
// Clear output register: setcc only sets the low byte.
__ xorl(reg, reg);
- if (rhs.IsRegister()) {
- __ cmpl(lhs.AsRegister<CpuRegister>(), rhs.AsRegister<CpuRegister>());
- } else if (rhs.IsConstant()) {
- int32_t constant = CodeGenerator::GetInt32ValueOf(rhs.GetConstant());
- codegen_->Compare32BitValue(lhs.AsRegister<CpuRegister>(), constant);
- } else {
- __ cmpl(lhs.AsRegister<CpuRegister>(), Address(CpuRegister(RSP), rhs.GetStackIndex()));
- }
+ codegen_->GenerateIntCompare(lhs, rhs);
__ setcc(X86_64IntegerCondition(cond->GetCondition()), reg);
return;
case Primitive::kPrimLong:
// Clear output register: setcc only sets the low byte.
__ xorl(reg, reg);
- if (rhs.IsRegister()) {
- __ cmpq(lhs.AsRegister<CpuRegister>(), rhs.AsRegister<CpuRegister>());
- } else if (rhs.IsConstant()) {
- int64_t value = rhs.GetConstant()->AsLongConstant()->GetValue();
- codegen_->Compare64BitValue(lhs.AsRegister<CpuRegister>(), value);
- } else {
- __ cmpq(lhs.AsRegister<CpuRegister>(), Address(CpuRegister(RSP), rhs.GetStackIndex()));
- }
+ codegen_->GenerateLongCompare(lhs, rhs);
__ setcc(X86_64IntegerCondition(cond->GetCondition()), reg);
return;
case Primitive::kPrimFloat: {
@@ -1885,27 +1838,11 @@ void InstructionCodeGeneratorX86_64::VisitCompare(HCompare* compare) {
case Primitive::kPrimShort:
case Primitive::kPrimChar:
case Primitive::kPrimInt: {
- CpuRegister left_reg = left.AsRegister<CpuRegister>();
- if (right.IsConstant()) {
- int32_t value = right.GetConstant()->AsIntConstant()->GetValue();
- codegen_->Compare32BitValue(left_reg, value);
- } else if (right.IsStackSlot()) {
- __ cmpl(left_reg, Address(CpuRegister(RSP), right.GetStackIndex()));
- } else {
- __ cmpl(left_reg, right.AsRegister<CpuRegister>());
- }
+ codegen_->GenerateIntCompare(left, right);
break;
}
case Primitive::kPrimLong: {
- CpuRegister left_reg = left.AsRegister<CpuRegister>();
- if (right.IsConstant()) {
- int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
- codegen_->Compare64BitValue(left_reg, value);
- } else if (right.IsDoubleStackSlot()) {
- __ cmpq(left_reg, Address(CpuRegister(RSP), right.GetStackIndex()));
- } else {
- __ cmpq(left_reg, right.AsRegister<CpuRegister>());
- }
+ codegen_->GenerateLongCompare(left, right);
break;
}
case Primitive::kPrimFloat: {
@@ -3714,7 +3651,7 @@ void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instructio
} else {
DCHECK(value.IsConstant()) << value;
if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
- __ jmp(slow_path->GetEntryLabel());
+ __ jmp(slow_path->GetEntryLabel());
}
}
break;
@@ -3729,7 +3666,7 @@ void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instructio
} else {
DCHECK(value.IsConstant()) << value;
if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
- __ jmp(slow_path->GetEntryLabel());
+ __ jmp(slow_path->GetEntryLabel());
}
}
break;
@@ -4538,56 +4475,31 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
switch (type) {
case Primitive::kPrimBoolean: {
CpuRegister out = out_loc.AsRegister<CpuRegister>();
- if (index.IsConstant()) {
- __ movzxb(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
- } else {
- __ movzxb(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_1, data_offset));
- }
+ __ movzxb(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_1, data_offset));
break;
}
case Primitive::kPrimByte: {
CpuRegister out = out_loc.AsRegister<CpuRegister>();
- if (index.IsConstant()) {
- __ movsxb(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
- } else {
- __ movsxb(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_1, data_offset));
- }
+ __ movsxb(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_1, data_offset));
break;
}
case Primitive::kPrimShort: {
CpuRegister out = out_loc.AsRegister<CpuRegister>();
- if (index.IsConstant()) {
- __ movsxw(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
- } else {
- __ movsxw(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_2, data_offset));
- }
+ __ movsxw(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_2, data_offset));
break;
}
case Primitive::kPrimChar: {
CpuRegister out = out_loc.AsRegister<CpuRegister>();
- if (index.IsConstant()) {
- __ movzxw(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
- } else {
- __ movzxw(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_2, data_offset));
- }
+ __ movzxw(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_2, data_offset));
break;
}
case Primitive::kPrimInt: {
CpuRegister out = out_loc.AsRegister<CpuRegister>();
- if (index.IsConstant()) {
- __ movl(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset));
- } else {
- __ movl(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset));
- }
+ __ movl(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_4, data_offset));
break;
}
@@ -4604,21 +4516,16 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
instruction, out_loc, obj, data_offset, index, /* needs_null_check */ true);
} else {
CpuRegister out = out_loc.AsRegister<CpuRegister>();
+ __ movl(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_4, data_offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ // If read barriers are enabled, emit read barriers other than
+ // Baker's using a slow path (and also unpoison the loaded
+ // reference, if heap poisoning is enabled).
if (index.IsConstant()) {
uint32_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- __ movl(out, Address(obj, offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- // If read barriers are enabled, emit read barriers other than
- // Baker's using a slow path (and also unpoison the loaded
- // reference, if heap poisoning is enabled).
codegen_->MaybeGenerateReadBarrierSlow(instruction, out_loc, out_loc, obj_loc, offset);
} else {
- __ movl(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- // If read barriers are enabled, emit read barriers other than
- // Baker's using a slow path (and also unpoison the loaded
- // reference, if heap poisoning is enabled).
codegen_->MaybeGenerateReadBarrierSlow(
instruction, out_loc, out_loc, obj_loc, data_offset, index);
}
@@ -4628,34 +4535,19 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimLong: {
CpuRegister out = out_loc.AsRegister<CpuRegister>();
- if (index.IsConstant()) {
- __ movq(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset));
- } else {
- __ movq(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset));
- }
+ __ movq(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_8, data_offset));
break;
}
case Primitive::kPrimFloat: {
XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
- if (index.IsConstant()) {
- __ movss(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset));
- } else {
- __ movss(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset));
- }
+ __ movss(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_4, data_offset));
break;
}
case Primitive::kPrimDouble: {
XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
- if (index.IsConstant()) {
- __ movsd(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset));
- } else {
- __ movsd(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset));
- }
+ __ movsd(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_8, data_offset));
break;
}
@@ -4718,9 +4610,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte: {
uint32_t offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
- Address address = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + offset)
- : Address(array, index.AsRegister<CpuRegister>(), TIMES_1, offset);
+ Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_1, offset);
if (value.IsRegister()) {
__ movb(address, value.AsRegister<CpuRegister>());
} else {
@@ -4733,9 +4623,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
uint32_t offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
- Address address = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + offset)
- : Address(array, index.AsRegister<CpuRegister>(), TIMES_2, offset);
+ Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_2, offset);
if (value.IsRegister()) {
__ movw(address, value.AsRegister<CpuRegister>());
} else {
@@ -4748,9 +4636,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimNot: {
uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- Address address = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
- : Address(array, index.AsRegister<CpuRegister>(), TIMES_4, offset);
+ Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_4, offset);
if (!value.IsRegister()) {
// Just setting null.
@@ -4846,9 +4732,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimInt: {
uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- Address address = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
- : Address(array, index.AsRegister<CpuRegister>(), TIMES_4, offset);
+ Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_4, offset);
if (value.IsRegister()) {
__ movl(address, value.AsRegister<CpuRegister>());
} else {
@@ -4862,18 +4746,14 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimLong: {
uint32_t offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
- Address address = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + offset)
- : Address(array, index.AsRegister<CpuRegister>(), TIMES_8, offset);
+ Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_8, offset);
if (value.IsRegister()) {
__ movq(address, value.AsRegister<CpuRegister>());
codegen_->MaybeRecordImplicitNullCheck(instruction);
} else {
int64_t v = value.GetConstant()->AsLongConstant()->GetValue();
- Address address_high = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) +
- offset + sizeof(int32_t))
- : Address(array, index.AsRegister<CpuRegister>(), TIMES_8, offset + sizeof(int32_t));
+ Address address_high =
+ CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_8, offset + sizeof(int32_t));
codegen_->MoveInt64ToAddress(address, address_high, v, instruction);
}
break;
@@ -4881,15 +4761,12 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimFloat: {
uint32_t offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
- Address address = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
- : Address(array, index.AsRegister<CpuRegister>(), TIMES_4, offset);
+ Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_4, offset);
if (value.IsFpuRegister()) {
__ movss(address, value.AsFpuRegister<XmmRegister>());
} else {
DCHECK(value.IsConstant());
- int32_t v =
- bit_cast<int32_t, float>(value.GetConstant()->AsFloatConstant()->GetValue());
+ int32_t v = bit_cast<int32_t, float>(value.GetConstant()->AsFloatConstant()->GetValue());
__ movl(address, Immediate(v));
}
codegen_->MaybeRecordImplicitNullCheck(instruction);
@@ -4898,19 +4775,15 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimDouble: {
uint32_t offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
- Address address = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + offset)
- : Address(array, index.AsRegister<CpuRegister>(), TIMES_8, offset);
+ Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_8, offset);
if (value.IsFpuRegister()) {
__ movsd(address, value.AsFpuRegister<XmmRegister>());
codegen_->MaybeRecordImplicitNullCheck(instruction);
} else {
int64_t v =
bit_cast<int64_t, double>(value.GetConstant()->AsDoubleConstant()->GetValue());
- Address address_high = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) +
- offset + sizeof(int32_t))
- : Address(array, index.AsRegister<CpuRegister>(), TIMES_8, offset + sizeof(int32_t));
+ Address address_high =
+ CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_8, offset + sizeof(int32_t));
codegen_->MoveInt64ToAddress(address, address_high, v, instruction);
}
break;
@@ -5001,13 +4874,7 @@ void InstructionCodeGeneratorX86_64::VisitBoundsCheck(HBoundsCheck* instruction)
}
codegen_->MaybeRecordImplicitNullCheck(array_length);
} else {
- CpuRegister length = length_loc.AsRegister<CpuRegister>();
- if (index_loc.IsConstant()) {
- int32_t value = CodeGenerator::GetInt32ValueOf(index_loc.GetConstant());
- __ cmpl(length, Immediate(value));
- } else {
- __ cmpl(length, index_loc.AsRegister<CpuRegister>());
- }
+ codegen_->GenerateIntCompare(length_loc, index_loc);
}
codegen_->AddSlowPath(slow_path);
__ j(kBelowEqual, slow_path->GetEntryLabel());
@@ -6361,9 +6228,7 @@ void CodeGeneratorX86_64::GenerateArrayLoadWithBakerReadBarrier(HInstruction* in
"art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
// /* HeapReference<Object> */ ref =
// *(obj + data_offset + index * sizeof(HeapReference<Object>))
- Address src = index.IsConstant() ?
- Address(obj, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset) :
- Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset);
+ Address src = CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_4, data_offset);
GenerateReferenceLoadWithBakerReadBarrier(instruction, ref, obj, src, needs_null_check);
}
@@ -6668,6 +6533,39 @@ void CodeGeneratorX86_64::Compare64BitValue(CpuRegister dest, int64_t value) {
}
}
+void CodeGeneratorX86_64::GenerateIntCompare(Location lhs, Location rhs) {
+ CpuRegister lhs_reg = lhs.AsRegister<CpuRegister>();
+ if (rhs.IsConstant()) {
+ int32_t value = CodeGenerator::GetInt32ValueOf(rhs.GetConstant());
+ Compare32BitValue(lhs_reg, value);
+ } else if (rhs.IsStackSlot()) {
+ __ cmpl(lhs_reg, Address(CpuRegister(RSP), rhs.GetStackIndex()));
+ } else {
+ __ cmpl(lhs_reg, rhs.AsRegister<CpuRegister>());
+ }
+}
+
+void CodeGeneratorX86_64::GenerateLongCompare(Location lhs, Location rhs) {
+ CpuRegister lhs_reg = lhs.AsRegister<CpuRegister>();
+ if (rhs.IsConstant()) {
+ int64_t value = rhs.GetConstant()->AsLongConstant()->GetValue();
+ Compare64BitValue(lhs_reg, value);
+ } else if (rhs.IsDoubleStackSlot()) {
+ __ cmpq(lhs_reg, Address(CpuRegister(RSP), rhs.GetStackIndex()));
+ } else {
+ __ cmpq(lhs_reg, rhs.AsRegister<CpuRegister>());
+ }
+}
+
+Address CodeGeneratorX86_64::ArrayAddress(CpuRegister obj,
+ Location index,
+ ScaleFactor scale,
+ uint32_t data_offset) {
+ return index.IsConstant() ?
+ Address(obj, (index.GetConstant()->AsIntConstant()->GetValue() << scale) + data_offset) :
+ Address(obj, index.AsRegister<CpuRegister>(), scale, data_offset);
+}
+
void CodeGeneratorX86_64::Store64BitValueToStack(Location dest, int64_t value) {
DCHECK(dest.IsDoubleStackSlot());
if (IsInt<32>(value)) {
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 693d0b8d26..7108676b8e 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -510,6 +510,18 @@ class CodeGeneratorX86_64 : public CodeGenerator {
void Compare32BitValue(CpuRegister dest, int32_t value);
void Compare64BitValue(CpuRegister dest, int64_t value);
+ // Compare int values. Supports only register locations for `lhs`.
+ void GenerateIntCompare(Location lhs, Location rhs);
+
+ // Compare long values. Supports only register locations for `lhs`.
+ void GenerateLongCompare(Location lhs, Location rhs);
+
+ // Construct address for array access.
+ static Address ArrayAddress(CpuRegister obj,
+ Location index,
+ ScaleFactor scale,
+ uint32_t data_offset);
+
Address LiteralCaseTable(HPackedSwitch* switch_instr);
// Store a 64 bit value into a DoubleStackSlot in the most efficient manner.
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index 129c2a94b5..c501ccf80f 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -714,10 +714,12 @@ void HInductionVarAnalysis::VisitTripCount(HLoopInformation* loop,
case kCondGE: op = kGE; break;
default: LOG(FATAL) << "CONDITION UNREACHABLE";
}
+ // Associate trip count with control instruction, rather than the condition (even
+ // though it's its use) since former provides a convenient use-free placeholder.
+ HInstruction* control = loop->GetHeader()->GetLastInstruction();
InductionInfo* taken_test = CreateInvariantOp(op, lower_expr, upper_expr);
- AssignInfo(loop,
- loop->GetHeader()->GetLastInstruction(),
- CreateTripCount(tcKind, trip_count, taken_test, type));
+ DCHECK(control->IsIf());
+ AssignInfo(loop, control, CreateTripCount(tcKind, trip_count, taken_test, type));
}
bool HInductionVarAnalysis::IsTaken(InductionInfo* lower_expr,
diff --git a/compiler/optimizing/induction_var_analysis_test.cc b/compiler/optimizing/induction_var_analysis_test.cc
index 580d24b74b..292bc4e06e 100644
--- a/compiler/optimizing/induction_var_analysis_test.cc
+++ b/compiler/optimizing/induction_var_analysis_test.cc
@@ -157,6 +157,13 @@ class InductionVarAnalysisTest : public CommonCompilerTest {
iva_->LookupInfo(loop_body_[d]->GetLoopInformation(), instruction));
}
+ // Returns induction information of the trip-count of loop at depth d.
+ std::string GetTripCount(int d) {
+ HInstruction* control = loop_header_[d]->GetLastInstruction();
+ DCHECK(control->IsIf());
+ return GetInductionInfo(control, d);
+ }
+
// Returns true if instructions have identical induction.
bool HaveSameInduction(HInstruction* instruction1, HInstruction* instruction2) {
return HInductionVarAnalysis::InductionEqual(
@@ -239,8 +246,7 @@ TEST_F(InductionVarAnalysisTest, FindBasicInduction) {
EXPECT_FALSE(HaveSameInduction(store->InputAt(1), increment_[0]));
// Trip-count.
- EXPECT_STREQ("((100) (TC-loop) ((0) < (100)))",
- GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str());
+ EXPECT_STREQ("((100) (TC-loop) ((0) < (100)))", GetTripCount(0).c_str());
}
TEST_F(InductionVarAnalysisTest, FindDerivedInduction) {
@@ -579,8 +585,7 @@ TEST_F(InductionVarAnalysisTest, FindDeepLoopInduction) {
}
EXPECT_STREQ("((1) * i + (1)):PrimInt", GetInductionInfo(increment_[d], d).c_str());
// Trip-count.
- EXPECT_STREQ("((100) (TC-loop) ((0) < (100)))",
- GetInductionInfo(loop_header_[d]->GetLastInstruction(), d).c_str());
+ EXPECT_STREQ("((100) (TC-loop) ((0) < (100)))", GetTripCount(d).c_str());
}
}
@@ -607,8 +612,7 @@ TEST_F(InductionVarAnalysisTest, ByteInductionIntLoopControl) {
EXPECT_FALSE(HaveSameInduction(store1->InputAt(1), store2->InputAt(1)));
// Trip-count.
- EXPECT_STREQ("((100) (TC-loop) ((0) < (100)))",
- GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str());
+ EXPECT_STREQ("((100) (TC-loop) ((0) < (100)))", GetTripCount(0).c_str());
}
TEST_F(InductionVarAnalysisTest, ByteLoopControl1) {
@@ -626,8 +630,7 @@ TEST_F(InductionVarAnalysisTest, ByteLoopControl1) {
EXPECT_STREQ("((1) * i + ((-128) + (1))):PrimByte", GetInductionInfo(increment_[0], 0).c_str());
// Trip-count.
- EXPECT_STREQ("(((127) - (-128)) (TC-loop) ((-128) < (127)))",
- GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str());
+ EXPECT_STREQ("(((127) - (-128)) (TC-loop) ((-128) < (127)))", GetTripCount(0).c_str());
}
TEST_F(InductionVarAnalysisTest, ByteLoopControl2) {
@@ -645,7 +648,7 @@ TEST_F(InductionVarAnalysisTest, ByteLoopControl2) {
EXPECT_STREQ("((1) * i + ((-128) + (1))):PrimByte", GetInductionInfo(increment_[0], 0).c_str());
// Trip-count undefined.
- EXPECT_STREQ("", GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str());
+ EXPECT_STREQ("", GetTripCount(0).c_str());
}
TEST_F(InductionVarAnalysisTest, ShortLoopControl1) {
@@ -664,8 +667,7 @@ TEST_F(InductionVarAnalysisTest, ShortLoopControl1) {
EXPECT_STREQ("((1) * i + ((-32768) + (1))):PrimShort",
GetInductionInfo(increment_[0], 0).c_str());
// Trip-count.
- EXPECT_STREQ("(((32767) - (-32768)) (TC-loop) ((-32768) < (32767)))",
- GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str());
+ EXPECT_STREQ("(((32767) - (-32768)) (TC-loop) ((-32768) < (32767)))", GetTripCount(0).c_str());
}
TEST_F(InductionVarAnalysisTest, ShortLoopControl2) {
@@ -684,7 +686,7 @@ TEST_F(InductionVarAnalysisTest, ShortLoopControl2) {
EXPECT_STREQ("((1) * i + ((-32768) + (1))):PrimShort",
GetInductionInfo(increment_[0], 0).c_str());
// Trip-count undefined.
- EXPECT_STREQ("", GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str());
+ EXPECT_STREQ("", GetTripCount(0).c_str());
}
TEST_F(InductionVarAnalysisTest, CharLoopControl1) {
@@ -701,8 +703,7 @@ TEST_F(InductionVarAnalysisTest, CharLoopControl1) {
EXPECT_STREQ("((1) * i + (1)):PrimChar", GetInductionInfo(increment_[0], 0).c_str());
// Trip-count.
- EXPECT_STREQ("((65535) (TC-loop) ((0) < (65535)))",
- GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str());
+ EXPECT_STREQ("((65535) (TC-loop) ((0) < (65535)))", GetTripCount(0).c_str());
}
TEST_F(InductionVarAnalysisTest, CharLoopControl2) {
@@ -719,7 +720,7 @@ TEST_F(InductionVarAnalysisTest, CharLoopControl2) {
EXPECT_STREQ("((1) * i + (1)):PrimChar", GetInductionInfo(increment_[0], 0).c_str());
// Trip-count undefined.
- EXPECT_STREQ("", GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str());
+ EXPECT_STREQ("", GetTripCount(0).c_str());
}
} // namespace art
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index 18e6f5ca9f..cd8b7c7960 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -106,6 +106,12 @@ static HInstruction* Insert(HBasicBlock* block, HInstruction* instruction) {
return instruction;
}
+/** Helper method to obtain loop's control instruction. */
+static HInstruction* GetLoopControl(HLoopInformation* loop) {
+ DCHECK(loop != nullptr);
+ return loop->GetHeader()->GetLastInstruction();
+}
+
//
// Public class methods.
//
@@ -179,7 +185,7 @@ void InductionVarRange::GenerateRange(HInstruction* context,
/*out*/HInstruction** lower,
/*out*/HInstruction** upper) {
bool is_last_value = false;
- int64_t s = 0;
+ int64_t stride_value = 0;
bool b1, b2; // unused
if (!GenerateCode(context,
instruction,
@@ -189,7 +195,7 @@ void InductionVarRange::GenerateRange(HInstruction* context,
lower,
upper,
nullptr,
- &s,
+ &stride_value,
&b1,
&b2)) {
LOG(FATAL) << "Failed precondition: CanGenerateRange()";
@@ -232,7 +238,9 @@ bool InductionVarRange::CanGenerateLastValue(HInstruction* instruction) {
nullptr,
nullptr,
nullptr, // nothing generated yet
- &stride_value, &needs_finite_test, &needs_taken_test)
+ &stride_value,
+ &needs_finite_test,
+ &needs_taken_test)
&& !needs_finite_test && !needs_taken_test;
}
@@ -265,7 +273,10 @@ void InductionVarRange::Replace(HInstruction* instruction,
for (HLoopInformation* lp = instruction->GetBlock()->GetLoopInformation(); // closest enveloping loop
lp != nullptr;
lp = lp->GetPreHeader()->GetLoopInformation()) {
+ // Update instruction's information.
ReplaceInduction(induction_analysis_->LookupInfo(lp, instruction), fetch, replacement);
+ // Update loop's trip-count information.
+ ReplaceInduction(induction_analysis_->LookupInfo(lp, GetLoopControl(lp)), fetch, replacement);
}
}
@@ -308,13 +319,13 @@ bool InductionVarRange::HasInductionInfo(
/*out*/ HLoopInformation** loop,
/*out*/ HInductionVarAnalysis::InductionInfo** info,
/*out*/ HInductionVarAnalysis::InductionInfo** trip) const {
- HLoopInformation* l = context->GetBlock()->GetLoopInformation(); // closest enveloping loop
- if (l != nullptr) {
- HInductionVarAnalysis::InductionInfo* i = induction_analysis_->LookupInfo(l, instruction);
+ HLoopInformation* lp = context->GetBlock()->GetLoopInformation(); // closest enveloping loop
+ if (lp != nullptr) {
+ HInductionVarAnalysis::InductionInfo* i = induction_analysis_->LookupInfo(lp, instruction);
if (i != nullptr) {
- *loop = l;
+ *loop = lp;
*info = i;
- *trip = induction_analysis_->LookupInfo(l, l->GetHeader()->GetLastInstruction());
+ *trip = induction_analysis_->LookupInfo(lp, GetLoopControl(lp));
return true;
}
}
@@ -878,7 +889,8 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info,
} else if (stride_value == -1) {
oper = new (graph->GetArena()) HSub(type, opb, opa);
} else {
- HInstruction* mul = new (graph->GetArena()) HMul(type, graph->GetIntConstant(stride_value), opa);
+ HInstruction* mul = new (graph->GetArena()) HMul(
+ type, graph->GetIntConstant(stride_value), opa);
oper = new (graph->GetArena()) HAdd(type, Insert(block, mul), opb);
}
*result = Insert(block, oper);
diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc
index 8f7778fe68..04e063c92e 100644
--- a/compiler/optimizing/instruction_simplifier_shared.cc
+++ b/compiler/optimizing/instruction_simplifier_shared.cc
@@ -259,7 +259,8 @@ bool TryExtractArrayAccessAddress(HInstruction* access,
HIntConstant* offset = graph->GetIntConstant(data_offset);
HIntermediateAddress* address =
new (arena) HIntermediateAddress(array, offset, kNoDexPc);
- address->SetReferenceTypeInfo(array->GetReferenceTypeInfo());
+ // TODO: Is it ok to not have this on the intermediate address?
+ // address->SetReferenceTypeInfo(array->GetReferenceTypeInfo());
access->GetBlock()->InsertInstructionBefore(address, access);
access->ReplaceInput(address, 0);
// Both instructions must depend on GC to prevent any instruction that can
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index be8eb51e42..1d153e2e18 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1857,11 +1857,11 @@ static void GenHighestOneBit(LocationSummary* locations,
if (type == Primitive::kPrimLong) {
__ Dclz(TMP, in);
__ LoadConst64(AT, INT64_C(0x8000000000000000));
- __ Dsrlv(out, AT, TMP);
+ __ Dsrlv(AT, AT, TMP);
} else {
__ Clz(TMP, in);
__ LoadConst32(AT, 0x80000000);
- __ Srlv(out, AT, TMP);
+ __ Srlv(AT, AT, TMP);
}
// For either value of "type", when "in" is zero, "out" should also
// be zero. Without this extra "and" operation, when "in" is zero,
@@ -1869,7 +1869,7 @@ static void GenHighestOneBit(LocationSummary* locations,
// the MIPS logical shift operations "dsrlv", and "srlv" don't use
// the shift amount (TMP) directly; they use either (TMP % 64) or
// (TMP % 32), respectively.
- __ And(out, out, in);
+ __ And(out, AT, in);
}
// int java.lang.Integer.highestOneBit(int)
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index caecc578c6..6d207765e3 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -4374,7 +4374,7 @@ class HDiv FINAL : public HBinaryOperation {
HInstruction* left,
HInstruction* right,
uint32_t dex_pc)
- : HBinaryOperation(result_type, left, right, SideEffectsForArchRuntimeCalls(), dex_pc) {}
+ : HBinaryOperation(result_type, left, right, SideEffects::None(), dex_pc) {}
template <typename T>
T ComputeIntegral(T x, T y) const {
@@ -4409,11 +4409,6 @@ class HDiv FINAL : public HBinaryOperation {
ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
}
- static SideEffects SideEffectsForArchRuntimeCalls() {
- // The generated code can use a runtime call.
- return SideEffects::CanTriggerGC();
- }
-
DECLARE_INSTRUCTION(Div);
private:
@@ -4426,7 +4421,7 @@ class HRem FINAL : public HBinaryOperation {
HInstruction* left,
HInstruction* right,
uint32_t dex_pc)
- : HBinaryOperation(result_type, left, right, SideEffectsForArchRuntimeCalls(), dex_pc) {}
+ : HBinaryOperation(result_type, left, right, SideEffects::None(), dex_pc) {}
template <typename T>
T ComputeIntegral(T x, T y) const {
@@ -4461,10 +4456,6 @@ class HRem FINAL : public HBinaryOperation {
ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
}
- static SideEffects SideEffectsForArchRuntimeCalls() {
- return SideEffects::CanTriggerGC();
- }
-
DECLARE_INSTRUCTION(Rem);
private:
@@ -4917,9 +4908,7 @@ class HTypeConversion FINAL : public HExpression<1> {
public:
// Instantiate a type conversion of `input` to `result_type`.
HTypeConversion(Primitive::Type result_type, HInstruction* input, uint32_t dex_pc)
- : HExpression(result_type,
- SideEffectsForArchRuntimeCalls(input->GetType(), result_type),
- dex_pc) {
+ : HExpression(result_type, SideEffects::None(), dex_pc) {
SetRawInputAt(0, input);
// Invariant: We should never generate a conversion to a Boolean value.
DCHECK_NE(Primitive::kPrimBoolean, result_type);
@@ -4938,18 +4927,6 @@ class HTypeConversion FINAL : public HExpression<1> {
// containing the result. If the input cannot be converted, return nullptr.
HConstant* TryStaticEvaluation() const;
- static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type input_type,
- Primitive::Type result_type) {
- // Some architectures may not require the 'GC' side effects, but at this point
- // in the compilation process we do not know what architecture we will
- // generate code for, so we must be conservative.
- if ((Primitive::IsFloatingPointType(input_type) && Primitive::IsIntegralType(result_type))
- || (input_type == Primitive::kPrimLong && Primitive::IsFloatingPointType(result_type))) {
- return SideEffects::CanTriggerGC();
- }
- return SideEffects::None();
- }
-
DECLARE_INSTRUCTION(TypeConversion);
private:
@@ -5031,9 +5008,7 @@ class HInstanceFieldGet FINAL : public HExpression<1> {
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
- : HExpression(field_type,
- SideEffectsForArchRuntimeCalls(field_type, is_volatile),
- dex_pc),
+ : HExpression(field_type, SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc),
field_info_(field_offset,
field_type,
is_volatile,
@@ -5064,16 +5039,6 @@ class HInstanceFieldGet FINAL : public HExpression<1> {
Primitive::Type GetFieldType() const { return field_info_.GetFieldType(); }
bool IsVolatile() const { return field_info_.IsVolatile(); }
- static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type field_type, bool is_volatile) {
- SideEffects side_effects = SideEffects::FieldReadOfType(field_type, is_volatile);
-
- // MIPS delegates volatile kPrimLong and kPrimDouble loads to a runtime helper.
- if (Primitive::Is64BitType(field_type)) {
- side_effects.Add(SideEffects::CanTriggerGC());
- }
- return side_effects;
- }
-
DECLARE_INSTRUCTION(InstanceFieldGet);
private:
@@ -5094,8 +5059,7 @@ class HInstanceFieldSet FINAL : public HTemplateInstruction<2> {
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
- : HTemplateInstruction(SideEffectsForArchRuntimeCalls(field_type, is_volatile),
- dex_pc),
+ : HTemplateInstruction(SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc),
field_info_(field_offset,
field_type,
is_volatile,
@@ -5120,16 +5084,6 @@ class HInstanceFieldSet FINAL : public HTemplateInstruction<2> {
bool GetValueCanBeNull() const { return GetPackedFlag<kFlagValueCanBeNull>(); }
void ClearValueCanBeNull() { SetPackedFlag<kFlagValueCanBeNull>(false); }
- static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type field_type, bool is_volatile) {
- SideEffects side_effects = SideEffects::FieldWriteOfType(field_type, is_volatile);
-
- // MIPS delegates volatile kPrimLong and kPrimDouble stores to a runtime helper.
- if (Primitive::Is64BitType(field_type)) {
- side_effects.Add(SideEffects::CanTriggerGC());
- }
- return side_effects;
- }
-
DECLARE_INSTRUCTION(InstanceFieldSet);
private:
@@ -5934,9 +5888,7 @@ class HStaticFieldGet FINAL : public HExpression<1> {
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
- : HExpression(field_type,
- SideEffectsForArchRuntimeCalls(field_type, is_volatile),
- dex_pc),
+ : HExpression(field_type, SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc),
field_info_(field_offset,
field_type,
is_volatile,
@@ -5964,16 +5916,6 @@ class HStaticFieldGet FINAL : public HExpression<1> {
Primitive::Type GetFieldType() const { return field_info_.GetFieldType(); }
bool IsVolatile() const { return field_info_.IsVolatile(); }
- static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type field_type, bool is_volatile) {
- SideEffects side_effects = SideEffects::FieldReadOfType(field_type, is_volatile);
-
- // MIPS delegates volatile kPrimLong and kPrimDouble loads to a runtime helper.
- if (Primitive::Is64BitType(field_type)) {
- side_effects.Add(SideEffects::CanTriggerGC());
- }
- return side_effects;
- }
-
DECLARE_INSTRUCTION(StaticFieldGet);
private:
@@ -5994,8 +5936,7 @@ class HStaticFieldSet FINAL : public HTemplateInstruction<2> {
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
- : HTemplateInstruction(SideEffectsForArchRuntimeCalls(field_type, is_volatile),
- dex_pc),
+ : HTemplateInstruction(SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc),
field_info_(field_offset,
field_type,
is_volatile,
@@ -6017,16 +5958,6 @@ class HStaticFieldSet FINAL : public HTemplateInstruction<2> {
bool GetValueCanBeNull() const { return GetPackedFlag<kFlagValueCanBeNull>(); }
void ClearValueCanBeNull() { SetPackedFlag<kFlagValueCanBeNull>(false); }
- static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type field_type, bool is_volatile) {
- SideEffects side_effects = SideEffects::FieldWriteOfType(field_type, is_volatile);
-
- // MIPS delegates volatile kPrimLong and kPrimDouble stores to a runtime helper.
- if (Primitive::Is64BitType(field_type)) {
- side_effects.Add(SideEffects::CanTriggerGC());
- }
- return side_effects;
- }
-
DECLARE_INSTRUCTION(StaticFieldSet);
private:
diff --git a/compiler/optimizing/nodes_shared.h b/compiler/optimizing/nodes_shared.h
index 8bd8667f84..814202e97b 100644
--- a/compiler/optimizing/nodes_shared.h
+++ b/compiler/optimizing/nodes_shared.h
@@ -17,6 +17,11 @@
#ifndef ART_COMPILER_OPTIMIZING_NODES_SHARED_H_
#define ART_COMPILER_OPTIMIZING_NODES_SHARED_H_
+// This `#include` should never be used by compilation, as this file (`nodes_shared.h`) is included
+// in `nodes.h`. However it helps editing tools (e.g. YouCompleteMe) by giving them better context
+// (defining `HInstruction` and co).
+#include "nodes.h"
+
namespace art {
class HMultiplyAccumulate FINAL : public HExpression<3> {
@@ -117,10 +122,15 @@ class HBitwiseNegatedRight FINAL : public HBinaryOperation {
// This instruction computes an intermediate address pointing in the 'middle' of an object. The
// result pointer cannot be handled by GC, so extra care is taken to make sure that this value is
// never used across anything that can trigger GC.
+// The result of this instruction is not a pointer in the sense of `Primitive::kPrimNot`. So we
+// represent it by the type `Primitive::kPrimInt`.
class HIntermediateAddress FINAL : public HExpression<2> {
public:
HIntermediateAddress(HInstruction* base_address, HInstruction* offset, uint32_t dex_pc)
- : HExpression(Primitive::kPrimNot, SideEffects::DependsOnGC(), dex_pc) {
+ : HExpression(Primitive::kPrimInt, SideEffects::DependsOnGC(), dex_pc) {
+ DCHECK_EQ(Primitive::ComponentSize(Primitive::kPrimInt),
+ Primitive::ComponentSize(Primitive::kPrimNot))
+ << "kPrimInt and kPrimNot have different sizes.";
SetRawInputAt(0, base_address);
SetRawInputAt(1, offset);
}
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index b8e1379ef9..e64c005410 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -157,20 +157,11 @@ void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
}
void HSharpening::ProcessLoadClass(HLoadClass* load_class) {
- if (load_class->NeedsAccessCheck()) {
- // We need to call the runtime anyway, so we simply get the class as that call's return value.
- return;
- }
- if (load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass) {
- // Loading from the ArtMethod* is the most efficient retrieval.
- // TODO: This may not actually be true for all architectures and
- // locations of target classes. The additional register pressure
- // for using the ArtMethod* should be considered.
- return;
- }
-
- DCHECK_EQ(load_class->GetLoadKind(), HLoadClass::LoadKind::kDexCacheViaMethod);
+ DCHECK(load_class->GetLoadKind() == HLoadClass::LoadKind::kDexCacheViaMethod ||
+ load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass)
+ << load_class->GetLoadKind();
DCHECK(!load_class->IsInDexCache()) << "HLoadClass should not be optimized before sharpening.";
+ DCHECK(!load_class->IsInBootImage()) << "HLoadClass should not be optimized before sharpening.";
const DexFile& dex_file = load_class->GetDexFile();
uint32_t type_index = load_class->GetTypeIndex();
@@ -242,13 +233,28 @@ void HSharpening::ProcessLoadClass(HLoadClass* load_class) {
}
}
}
- if (is_in_dex_cache) {
- load_class->MarkInDexCache();
- }
+
if (is_in_boot_image) {
load_class->MarkInBootImage();
}
+ if (load_class->NeedsAccessCheck()) {
+ // We need to call the runtime anyway, so we simply get the class as that call's return value.
+ return;
+ }
+
+ if (load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass) {
+ // Loading from the ArtMethod* is the most efficient retrieval in code size.
+ // TODO: This may not actually be true for all architectures and
+ // locations of target classes. The additional register pressure
+ // for using the ArtMethod* should be considered.
+ return;
+ }
+
+ if (is_in_dex_cache) {
+ load_class->MarkInDexCache();
+ }
+
HLoadClass::LoadKind load_kind = codegen_->GetSupportedLoadClassKind(desired_load_kind);
switch (load_kind) {
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 099620ccb8..e1255f7f23 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -496,46 +496,61 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
public:
template <typename ImplicitNullChecker = NoImplicitNullChecker>
- void StoreConst32ToOffset(int32_t value,
- Register base,
- int32_t offset,
- Register temp,
- ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
+ void StoreConstToOffset(StoreOperandType type,
+ int64_t value,
+ Register base,
+ int32_t offset,
+ Register temp,
+ ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
+ // We permit `base` and `temp` to coincide (however, we check that neither is AT),
+ // in which case the `base` register may be overwritten in the process.
CHECK_NE(temp, AT); // Must not use AT as temp, so as not to overwrite the adjusted base.
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ false);
- if (value == 0) {
- temp = ZERO;
- } else {
- LoadConst32(temp, value);
- }
- Sw(temp, base, offset);
- null_checker();
- }
-
- template <typename ImplicitNullChecker = NoImplicitNullChecker>
- void StoreConst64ToOffset(int64_t value,
- Register base,
- int32_t offset,
- Register temp,
- ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
- CHECK_NE(temp, AT); // Must not use AT as temp, so as not to overwrite the adjusted base.
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ true);
+ AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword));
uint32_t low = Low32Bits(value);
uint32_t high = High32Bits(value);
+ Register reg;
+ // If the adjustment left `base` unchanged and equal to `temp`, we can't use `temp`
+ // to load and hold the value but we can use AT instead as AT hasn't been used yet.
+ // Otherwise, `temp` can be used for the value. And if `temp` is the same as the
+ // original `base` (that is, `base` prior to the adjustment), the original `base`
+ // register will be overwritten.
+ if (base == temp) {
+ temp = AT;
+ }
if (low == 0) {
- Sw(ZERO, base, offset);
+ reg = ZERO;
} else {
- LoadConst32(temp, low);
- Sw(temp, base, offset);
+ reg = temp;
+ LoadConst32(reg, low);
}
- null_checker();
- if (high == 0) {
- Sw(ZERO, base, offset + kMipsWordSize);
- } else {
- if (high != low) {
- LoadConst32(temp, high);
- }
- Sw(temp, base, offset + kMipsWordSize);
+ switch (type) {
+ case kStoreByte:
+ Sb(reg, base, offset);
+ break;
+ case kStoreHalfword:
+ Sh(reg, base, offset);
+ break;
+ case kStoreWord:
+ Sw(reg, base, offset);
+ break;
+ case kStoreDoubleword:
+ Sw(reg, base, offset);
+ null_checker();
+ if (high == 0) {
+ reg = ZERO;
+ } else {
+ reg = temp;
+ if (high != low) {
+ LoadConst32(reg, high);
+ }
+ }
+ Sw(reg, base, offset + kMipsWordSize);
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+ if (type != kStoreDoubleword) {
+ null_checker();
}
}
diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc
index a92455fe60..a9abf2f86e 100644
--- a/compiler/utils/mips/assembler_mips_test.cc
+++ b/compiler/utils/mips/assembler_mips_test.cc
@@ -1977,6 +1977,85 @@ TEST_F(AssemblerMIPSTest, StoreDToOffset) {
DriverStr(expected, "StoreDToOffset");
}
+TEST_F(AssemblerMIPSTest, StoreConstToOffset) {
+ __ StoreConstToOffset(mips::kStoreByte, 0xFF, mips::A1, +0, mips::T8);
+ __ StoreConstToOffset(mips::kStoreHalfword, 0xFFFF, mips::A1, +0, mips::T8);
+ __ StoreConstToOffset(mips::kStoreWord, 0x12345678, mips::A1, +0, mips::T8);
+ __ StoreConstToOffset(mips::kStoreDoubleword, 0x123456789ABCDEF0, mips::A1, +0, mips::T8);
+
+ __ StoreConstToOffset(mips::kStoreByte, 0, mips::A1, +0, mips::T8);
+ __ StoreConstToOffset(mips::kStoreHalfword, 0, mips::A1, +0, mips::T8);
+ __ StoreConstToOffset(mips::kStoreWord, 0, mips::A1, +0, mips::T8);
+ __ StoreConstToOffset(mips::kStoreDoubleword, 0, mips::A1, +0, mips::T8);
+
+ __ StoreConstToOffset(mips::kStoreDoubleword, 0x1234567812345678, mips::A1, +0, mips::T8);
+ __ StoreConstToOffset(mips::kStoreDoubleword, 0x1234567800000000, mips::A1, +0, mips::T8);
+ __ StoreConstToOffset(mips::kStoreDoubleword, 0x0000000012345678, mips::A1, +0, mips::T8);
+
+ __ StoreConstToOffset(mips::kStoreWord, 0, mips::T8, +0, mips::T8);
+ __ StoreConstToOffset(mips::kStoreWord, 0x12345678, mips::T8, +0, mips::T8);
+
+ __ StoreConstToOffset(mips::kStoreWord, 0, mips::A1, -0xFFF0, mips::T8);
+ __ StoreConstToOffset(mips::kStoreWord, 0x12345678, mips::A1, +0xFFF0, mips::T8);
+
+ __ StoreConstToOffset(mips::kStoreWord, 0, mips::T8, -0xFFF0, mips::T8);
+ __ StoreConstToOffset(mips::kStoreWord, 0x12345678, mips::T8, +0xFFF0, mips::T8);
+
+ const char* expected =
+ "ori $t8, $zero, 0xFF\n"
+ "sb $t8, 0($a1)\n"
+ "ori $t8, $zero, 0xFFFF\n"
+ "sh $t8, 0($a1)\n"
+ "lui $t8, 0x1234\n"
+ "ori $t8, $t8, 0x5678\n"
+ "sw $t8, 0($a1)\n"
+ "lui $t8, 0x9ABC\n"
+ "ori $t8, $t8, 0xDEF0\n"
+ "sw $t8, 0($a1)\n"
+ "lui $t8, 0x1234\n"
+ "ori $t8, $t8, 0x5678\n"
+ "sw $t8, 4($a1)\n"
+
+ "sb $zero, 0($a1)\n"
+ "sh $zero, 0($a1)\n"
+ "sw $zero, 0($a1)\n"
+ "sw $zero, 0($a1)\n"
+ "sw $zero, 4($a1)\n"
+
+ "lui $t8, 0x1234\n"
+ "ori $t8, $t8, 0x5678\n"
+ "sw $t8, 0($a1)\n"
+ "sw $t8, 4($a1)\n"
+ "sw $zero, 0($a1)\n"
+ "lui $t8, 0x1234\n"
+ "ori $t8, $t8, 0x5678\n"
+ "sw $t8, 4($a1)\n"
+ "lui $t8, 0x1234\n"
+ "ori $t8, $t8, 0x5678\n"
+ "sw $t8, 0($a1)\n"
+ "sw $zero, 4($a1)\n"
+
+ "sw $zero, 0($t8)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, $at, 0x5678\n"
+ "sw $at, 0($t8)\n"
+
+ "addiu $at, $a1, -0x7FF8\n"
+ "sw $zero, -0x7FF8($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "lui $t8, 0x1234\n"
+ "ori $t8, $t8, 0x5678\n"
+ "sw $t8, 0x7FF8($at)\n"
+
+ "addiu $at, $t8, -0x7FF8\n"
+ "sw $zero, -0x7FF8($at)\n"
+ "addiu $at, $t8, 0x7FF8\n"
+ "lui $t8, 0x1234\n"
+ "ori $t8, $t8, 0x5678\n"
+ "sw $t8, 0x7FF8($at)\n";
+ DriverStr(expected, "StoreConstToOffset");
+}
+
TEST_F(AssemblerMIPSTest, B) {
mips::MipsLabel label1, label2;
__ B(&label1);