summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/builder.cc484
-rw-r--r--compiler/optimizing/builder.h42
-rw-r--r--compiler/optimizing/code_generator.cc18
-rw-r--r--compiler/optimizing/code_generator.h7
-rw-r--r--compiler/optimizing/code_generator_arm.cc413
-rw-r--r--compiler/optimizing/code_generator_arm.h6
-rw-r--r--compiler/optimizing/code_generator_arm64.cc7
-rw-r--r--compiler/optimizing/code_generator_arm64.h5
-rw-r--r--compiler/optimizing/code_generator_x86.cc499
-rw-r--r--compiler/optimizing/code_generator_x86.h6
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc480
-rw-r--r--compiler/optimizing/code_generator_x86_64.h6
-rw-r--r--compiler/optimizing/codegen_test.cc25
-rw-r--r--compiler/optimizing/nodes.h174
-rw-r--r--compiler/optimizing/register_allocator_test.cc2
-rw-r--r--compiler/optimizing/ssa_builder.cc5
-rw-r--r--compiler/optimizing/ssa_builder.h1
17 files changed, 1771 insertions, 409 deletions
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 8418ab0a7e..b51b6e7d25 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -41,25 +41,29 @@ namespace art {
*/
class Temporaries : public ValueObject {
public:
- Temporaries(HGraph* graph, size_t count) : graph_(graph), count_(count), index_(0) {
- graph_->UpdateNumberOfTemporaries(count_);
- }
+ explicit Temporaries(HGraph* graph) : graph_(graph), index_(0) {}
void Add(HInstruction* instruction) {
- // We currently only support vreg size temps.
- DCHECK(instruction->GetType() != Primitive::kPrimLong
- && instruction->GetType() != Primitive::kPrimDouble);
- HInstruction* temp = new (graph_->GetArena()) HTemporary(index_++);
+ HInstruction* temp = new (graph_->GetArena()) HTemporary(index_);
instruction->GetBlock()->AddInstruction(temp);
+
DCHECK(temp->GetPrevious() == instruction);
+
+ size_t offset;
+ if (instruction->GetType() == Primitive::kPrimLong
+ || instruction->GetType() == Primitive::kPrimDouble) {
+ offset = 2;
+ } else {
+ offset = 1;
+ }
+ index_ += offset;
+
+ graph_->UpdateTemporariesVRegSlots(index_);
}
private:
HGraph* const graph_;
- // The total number of temporaries that will be used.
- const size_t count_;
-
// Current index in the temporary stack, updated by `Add`.
size_t index_;
};
@@ -115,37 +119,37 @@ void HGraphBuilder::InitializeParameters(uint16_t number_of_parameters) {
}
template<typename T>
-void HGraphBuilder::If_22t(const Instruction& instruction, uint32_t dex_offset) {
+void HGraphBuilder::If_22t(const Instruction& instruction, uint32_t dex_pc) {
int32_t target_offset = instruction.GetTargetOffset();
- PotentiallyAddSuspendCheck(target_offset, dex_offset);
+ PotentiallyAddSuspendCheck(target_offset, dex_pc);
HInstruction* first = LoadLocal(instruction.VRegA(), Primitive::kPrimInt);
HInstruction* second = LoadLocal(instruction.VRegB(), Primitive::kPrimInt);
T* comparison = new (arena_) T(first, second);
current_block_->AddInstruction(comparison);
HInstruction* ifinst = new (arena_) HIf(comparison);
current_block_->AddInstruction(ifinst);
- HBasicBlock* target = FindBlockStartingAt(dex_offset + target_offset);
+ HBasicBlock* target = FindBlockStartingAt(dex_pc + target_offset);
DCHECK(target != nullptr);
current_block_->AddSuccessor(target);
- target = FindBlockStartingAt(dex_offset + instruction.SizeInCodeUnits());
+ target = FindBlockStartingAt(dex_pc + instruction.SizeInCodeUnits());
DCHECK(target != nullptr);
current_block_->AddSuccessor(target);
current_block_ = nullptr;
}
template<typename T>
-void HGraphBuilder::If_21t(const Instruction& instruction, uint32_t dex_offset) {
+void HGraphBuilder::If_21t(const Instruction& instruction, uint32_t dex_pc) {
int32_t target_offset = instruction.GetTargetOffset();
- PotentiallyAddSuspendCheck(target_offset, dex_offset);
+ PotentiallyAddSuspendCheck(target_offset, dex_pc);
HInstruction* value = LoadLocal(instruction.VRegA(), Primitive::kPrimInt);
T* comparison = new (arena_) T(value, GetIntConstant(0));
current_block_->AddInstruction(comparison);
HInstruction* ifinst = new (arena_) HIf(comparison);
current_block_->AddInstruction(ifinst);
- HBasicBlock* target = FindBlockStartingAt(dex_offset + target_offset);
+ HBasicBlock* target = FindBlockStartingAt(dex_pc + target_offset);
DCHECK(target != nullptr);
current_block_->AddSuccessor(target);
- target = FindBlockStartingAt(dex_offset + instruction.SizeInCodeUnits());
+ target = FindBlockStartingAt(dex_pc + instruction.SizeInCodeUnits());
DCHECK(target != nullptr);
current_block_->AddSuccessor(target);
current_block_ = nullptr;
@@ -192,13 +196,13 @@ HGraph* HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item) {
InitializeParameters(code_item.ins_size_);
- size_t dex_offset = 0;
+ size_t dex_pc = 0;
while (code_ptr < code_end) {
- // Update the current block if dex_offset starts a new block.
- MaybeUpdateCurrentBlock(dex_offset);
+ // Update the current block if dex_pc starts a new block.
+ MaybeUpdateCurrentBlock(dex_pc);
const Instruction& instruction = *Instruction::At(code_ptr);
- if (!AnalyzeDexInstruction(instruction, dex_offset)) return nullptr;
- dex_offset += instruction.SizeInCodeUnits();
+ if (!AnalyzeDexInstruction(instruction, dex_pc)) return nullptr;
+ dex_pc += instruction.SizeInCodeUnits();
code_ptr += instruction.SizeInCodeUnits();
}
@@ -239,25 +243,25 @@ void HGraphBuilder::ComputeBranchTargets(const uint16_t* code_ptr, const uint16_
// Iterate over all instructions and find branching instructions. Create blocks for
// the locations these instructions branch to.
- size_t dex_offset = 0;
+ size_t dex_pc = 0;
while (code_ptr < code_end) {
const Instruction& instruction = *Instruction::At(code_ptr);
if (instruction.IsBranch()) {
- int32_t target = instruction.GetTargetOffset() + dex_offset;
+ int32_t target = instruction.GetTargetOffset() + dex_pc;
// Create a block for the target instruction.
if (FindBlockStartingAt(target) == nullptr) {
block = new (arena_) HBasicBlock(graph_, target);
branch_targets_.Put(target, block);
}
- dex_offset += instruction.SizeInCodeUnits();
+ dex_pc += instruction.SizeInCodeUnits();
code_ptr += instruction.SizeInCodeUnits();
- if ((code_ptr < code_end) && (FindBlockStartingAt(dex_offset) == nullptr)) {
- block = new (arena_) HBasicBlock(graph_, dex_offset);
- branch_targets_.Put(dex_offset, block);
+ if ((code_ptr < code_end) && (FindBlockStartingAt(dex_pc) == nullptr)) {
+ block = new (arena_) HBasicBlock(graph_, dex_pc);
+ branch_targets_.Put(dex_pc, block);
}
} else {
code_ptr += instruction.SizeInCodeUnits();
- dex_offset += instruction.SizeInCodeUnits();
+ dex_pc += instruction.SizeInCodeUnits();
}
}
}
@@ -291,6 +295,16 @@ void HGraphBuilder::Binop_23x(const Instruction& instruction, Primitive::Type ty
}
template<typename T>
+void HGraphBuilder::Binop_23x(const Instruction& instruction,
+ Primitive::Type type,
+ uint32_t dex_pc) {
+ HInstruction* first = LoadLocal(instruction.VRegB(), type);
+ HInstruction* second = LoadLocal(instruction.VRegC(), type);
+ current_block_->AddInstruction(new (arena_) T(type, first, second, dex_pc));
+ UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+}
+
+template<typename T>
void HGraphBuilder::Binop_12x(const Instruction& instruction, Primitive::Type type) {
HInstruction* first = LoadLocal(instruction.VRegA(), type);
HInstruction* second = LoadLocal(instruction.VRegB(), type);
@@ -299,6 +313,16 @@ void HGraphBuilder::Binop_12x(const Instruction& instruction, Primitive::Type ty
}
template<typename T>
+void HGraphBuilder::Binop_12x(const Instruction& instruction,
+ Primitive::Type type,
+ uint32_t dex_pc) {
+ HInstruction* first = LoadLocal(instruction.VRegA(), type);
+ HInstruction* second = LoadLocal(instruction.VRegB(), type);
+ current_block_->AddInstruction(new (arena_) T(type, first, second, dex_pc));
+ UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
+}
+
+template<typename T>
void HGraphBuilder::Binop_22s(const Instruction& instruction, bool reverse) {
HInstruction* first = LoadLocal(instruction.VRegB(), Primitive::kPrimInt);
HInstruction* second = GetIntConstant(instruction.VRegC_22s());
@@ -332,7 +356,7 @@ void HGraphBuilder::BuildReturn(const Instruction& instruction, Primitive::Type
}
bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
- uint32_t dex_offset,
+ uint32_t dex_pc,
uint32_t method_idx,
uint32_t number_of_vreg_arguments,
bool is_range,
@@ -374,39 +398,44 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
const size_t number_of_arguments = strlen(descriptor) - (is_instance_call ? 0 : 1);
HInvoke* invoke = nullptr;
- if (invoke_type == kVirtual || invoke_type == kInterface) {
+ if (invoke_type == kVirtual || invoke_type == kInterface || invoke_type == kSuper) {
MethodReference target_method(dex_file_, method_idx);
uintptr_t direct_code;
uintptr_t direct_method;
int table_index;
InvokeType optimized_invoke_type = invoke_type;
- // TODO: Add devirtualization support.
- compiler_driver_->ComputeInvokeInfo(dex_compilation_unit_, dex_offset, true, true,
+ compiler_driver_->ComputeInvokeInfo(dex_compilation_unit_, dex_pc, true, true,
&optimized_invoke_type, &target_method, &table_index,
&direct_code, &direct_method);
if (table_index == -1) {
return false;
}
- if (invoke_type == kVirtual) {
+ if (optimized_invoke_type == kVirtual) {
invoke = new (arena_) HInvokeVirtual(
- arena_, number_of_arguments, return_type, dex_offset, table_index);
- } else {
- DCHECK_EQ(invoke_type, kInterface);
+ arena_, number_of_arguments, return_type, dex_pc, table_index);
+ } else if (optimized_invoke_type == kInterface) {
invoke = new (arena_) HInvokeInterface(
- arena_, number_of_arguments, return_type, dex_offset, method_idx, table_index);
+ arena_, number_of_arguments, return_type, dex_pc, method_idx, table_index);
+ } else if (optimized_invoke_type == kDirect) {
+ // For this compiler, sharpening only works if we compile PIC.
+ DCHECK(compiler_driver_->GetCompilerOptions().GetCompilePic());
+ // Treat invoke-direct like static calls for now.
+ invoke = new (arena_) HInvokeStatic(
+ arena_, number_of_arguments, return_type, dex_pc, target_method.dex_method_index);
}
} else {
+ DCHECK(invoke_type == kDirect || invoke_type == kStatic);
// Treat invoke-direct like static calls for now.
invoke = new (arena_) HInvokeStatic(
- arena_, number_of_arguments, return_type, dex_offset, method_idx);
+ arena_, number_of_arguments, return_type, dex_pc, method_idx);
}
size_t start_index = 0;
- Temporaries temps(graph_, is_instance_call ? 1 : 0);
+ Temporaries temps(graph_);
if (is_instance_call) {
HInstruction* arg = LoadLocal(is_range ? register_index : args[0], Primitive::kPrimNot);
- HNullCheck* null_check = new (arena_) HNullCheck(arg, dex_offset);
+ HNullCheck* null_check = new (arena_) HNullCheck(arg, dex_pc);
current_block_->AddInstruction(null_check);
temps.Add(null_check);
invoke->SetArgumentAt(0, null_check);
@@ -420,7 +449,7 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
bool is_wide = (type == Primitive::kPrimLong) || (type == Primitive::kPrimDouble);
if (!is_range && is_wide && args[i] + 1 != args[i + 1]) {
LOG(WARNING) << "Non sequential register pair in " << dex_compilation_unit_->GetSymbol()
- << " at " << dex_offset;
+ << " at " << dex_pc;
// We do not implement non sequential register pair.
return false;
}
@@ -438,7 +467,7 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
}
bool HGraphBuilder::BuildInstanceFieldAccess(const Instruction& instruction,
- uint32_t dex_offset,
+ uint32_t dex_pc,
bool is_put) {
uint32_t source_or_dest_reg = instruction.VRegA_22c();
uint32_t obj_reg = instruction.VRegB_22c();
@@ -459,9 +488,9 @@ bool HGraphBuilder::BuildInstanceFieldAccess(const Instruction& instruction,
Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType();
HInstruction* object = LoadLocal(obj_reg, Primitive::kPrimNot);
- current_block_->AddInstruction(new (arena_) HNullCheck(object, dex_offset));
+ current_block_->AddInstruction(new (arena_) HNullCheck(object, dex_pc));
if (is_put) {
- Temporaries temps(graph_, 1);
+ Temporaries temps(graph_);
HInstruction* null_check = current_block_->GetLastInstruction();
// We need one temporary for the null check.
temps.Add(null_check);
@@ -485,7 +514,7 @@ bool HGraphBuilder::BuildInstanceFieldAccess(const Instruction& instruction,
bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
- uint32_t dex_offset,
+ uint32_t dex_pc,
bool is_put) {
uint32_t source_or_dest_reg = instruction.VRegA_21c();
uint16_t field_index = instruction.VRegB_21c();
@@ -515,18 +544,18 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
}
HLoadClass* constant = new (arena_) HLoadClass(
- storage_index, is_referrers_class, dex_offset);
+ storage_index, is_referrers_class, dex_pc);
current_block_->AddInstruction(constant);
HInstruction* cls = constant;
if (!is_initialized) {
- cls = new (arena_) HClinitCheck(constant, dex_offset);
+ cls = new (arena_) HClinitCheck(constant, dex_pc);
current_block_->AddInstruction(cls);
}
if (is_put) {
// We need to keep the class alive before loading the value.
- Temporaries temps(graph_, 1);
+ Temporaries temps(graph_);
temps.Add(cls);
HInstruction* value = LoadLocal(source_or_dest_reg, field_type);
DCHECK_EQ(value->GetType(), field_type);
@@ -539,29 +568,41 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
return true;
}
-void HGraphBuilder::BuildCheckedDiv(uint16_t out_reg,
- uint16_t first_reg,
- int32_t second_reg,
- uint32_t dex_offset,
+void HGraphBuilder::BuildCheckedDiv(uint16_t out_vreg,
+ uint16_t first_vreg,
+ int64_t second_vreg_or_constant,
+ uint32_t dex_pc,
Primitive::Type type,
- bool second_is_lit) {
- DCHECK(type == Primitive::kPrimInt);
-
- HInstruction* first = LoadLocal(first_reg, type);
- HInstruction* second = second_is_lit ? GetIntConstant(second_reg) : LoadLocal(second_reg, type);
- if (!second->IsIntConstant() || (second->AsIntConstant()->GetValue() == 0)) {
- second = new (arena_) HDivZeroCheck(second, dex_offset);
- Temporaries temps(graph_, 1);
+ bool second_is_constant) {
+ DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
+
+ HInstruction* first = LoadLocal(first_vreg, type);
+ HInstruction* second = nullptr;
+ if (second_is_constant) {
+ if (type == Primitive::kPrimInt) {
+ second = GetIntConstant(second_vreg_or_constant);
+ } else {
+ second = GetLongConstant(second_vreg_or_constant);
+ }
+ } else {
+ second = LoadLocal(second_vreg_or_constant, type);
+ }
+
+ if (!second_is_constant
+ || (type == Primitive::kPrimInt && second->AsIntConstant()->GetValue() == 0)
+ || (type == Primitive::kPrimLong && second->AsLongConstant()->GetValue() == 0)) {
+ second = new (arena_) HDivZeroCheck(second, dex_pc);
+ Temporaries temps(graph_);
current_block_->AddInstruction(second);
temps.Add(current_block_->GetLastInstruction());
}
- current_block_->AddInstruction(new (arena_) HDiv(type, first, second));
- UpdateLocal(out_reg, current_block_->GetLastInstruction());
+ current_block_->AddInstruction(new (arena_) HDiv(type, first, second, dex_pc));
+ UpdateLocal(out_vreg, current_block_->GetLastInstruction());
}
void HGraphBuilder::BuildArrayAccess(const Instruction& instruction,
- uint32_t dex_offset,
+ uint32_t dex_pc,
bool is_put,
Primitive::Type anticipated_type) {
uint8_t source_or_dest_reg = instruction.VRegA_23x();
@@ -569,10 +610,10 @@ void HGraphBuilder::BuildArrayAccess(const Instruction& instruction,
uint8_t index_reg = instruction.VRegC_23x();
// We need one temporary for the null check, one for the index, and one for the length.
- Temporaries temps(graph_, 3);
+ Temporaries temps(graph_);
HInstruction* object = LoadLocal(array_reg, Primitive::kPrimNot);
- object = new (arena_) HNullCheck(object, dex_offset);
+ object = new (arena_) HNullCheck(object, dex_pc);
current_block_->AddInstruction(object);
temps.Add(object);
@@ -580,28 +621,28 @@ void HGraphBuilder::BuildArrayAccess(const Instruction& instruction,
current_block_->AddInstruction(length);
temps.Add(length);
HInstruction* index = LoadLocal(index_reg, Primitive::kPrimInt);
- index = new (arena_) HBoundsCheck(index, length, dex_offset);
+ index = new (arena_) HBoundsCheck(index, length, dex_pc);
current_block_->AddInstruction(index);
temps.Add(index);
if (is_put) {
HInstruction* value = LoadLocal(source_or_dest_reg, anticipated_type);
// TODO: Insert a type check node if the type is Object.
current_block_->AddInstruction(new (arena_) HArraySet(
- object, index, value, anticipated_type, dex_offset));
+ object, index, value, anticipated_type, dex_pc));
} else {
current_block_->AddInstruction(new (arena_) HArrayGet(object, index, anticipated_type));
UpdateLocal(source_or_dest_reg, current_block_->GetLastInstruction());
}
}
-void HGraphBuilder::BuildFilledNewArray(uint32_t dex_offset,
+void HGraphBuilder::BuildFilledNewArray(uint32_t dex_pc,
uint32_t type_index,
uint32_t number_of_vreg_arguments,
bool is_range,
uint32_t* args,
uint32_t register_index) {
HInstruction* length = GetIntConstant(number_of_vreg_arguments);
- HInstruction* object = new (arena_) HNewArray(length, dex_offset, type_index);
+ HInstruction* object = new (arena_) HNewArray(length, dex_pc, type_index);
current_block_->AddInstruction(object);
const char* descriptor = dex_file_->StringByTypeIdx(type_index);
@@ -613,13 +654,13 @@ void HGraphBuilder::BuildFilledNewArray(uint32_t dex_offset,
bool is_reference_array = (primitive == 'L') || (primitive == '[');
Primitive::Type type = is_reference_array ? Primitive::kPrimNot : Primitive::kPrimInt;
- Temporaries temps(graph_, 1);
+ Temporaries temps(graph_);
temps.Add(object);
for (size_t i = 0; i < number_of_vreg_arguments; ++i) {
HInstruction* value = LoadLocal(is_range ? register_index + i : args[i], type);
HInstruction* index = GetIntConstant(i);
current_block_->AddInstruction(
- new (arena_) HArraySet(object, index, value, type, dex_offset));
+ new (arena_) HArraySet(object, index, value, type, dex_pc));
}
latest_result_ = object;
}
@@ -629,26 +670,26 @@ void HGraphBuilder::BuildFillArrayData(HInstruction* object,
const T* data,
uint32_t element_count,
Primitive::Type anticipated_type,
- uint32_t dex_offset) {
+ uint32_t dex_pc) {
for (uint32_t i = 0; i < element_count; ++i) {
HInstruction* index = GetIntConstant(i);
HInstruction* value = GetIntConstant(data[i]);
current_block_->AddInstruction(new (arena_) HArraySet(
- object, index, value, anticipated_type, dex_offset));
+ object, index, value, anticipated_type, dex_pc));
}
}
-void HGraphBuilder::BuildFillArrayData(const Instruction& instruction, uint32_t dex_offset) {
- Temporaries temps(graph_, 1);
+void HGraphBuilder::BuildFillArrayData(const Instruction& instruction, uint32_t dex_pc) {
+ Temporaries temps(graph_);
HInstruction* array = LoadLocal(instruction.VRegA_31t(), Primitive::kPrimNot);
- HNullCheck* null_check = new (arena_) HNullCheck(array, dex_offset);
+ HNullCheck* null_check = new (arena_) HNullCheck(array, dex_pc);
current_block_->AddInstruction(null_check);
temps.Add(null_check);
HInstruction* length = new (arena_) HArrayLength(null_check);
current_block_->AddInstruction(length);
- int32_t payload_offset = instruction.VRegB_31t() + dex_offset;
+ int32_t payload_offset = instruction.VRegB_31t() + dex_pc;
const Instruction::ArrayDataPayload* payload =
reinterpret_cast<const Instruction::ArrayDataPayload*>(code_start_ + payload_offset);
const uint8_t* data = payload->data;
@@ -657,7 +698,7 @@ void HGraphBuilder::BuildFillArrayData(const Instruction& instruction, uint32_t
// Implementation of this DEX instruction seems to be that the bounds check is
// done before doing any stores.
HInstruction* last_index = GetIntConstant(payload->element_count - 1);
- current_block_->AddInstruction(new (arena_) HBoundsCheck(last_index, length, dex_offset));
+ current_block_->AddInstruction(new (arena_) HBoundsCheck(last_index, length, dex_pc));
switch (payload->element_width) {
case 1:
@@ -665,27 +706,27 @@ void HGraphBuilder::BuildFillArrayData(const Instruction& instruction, uint32_t
reinterpret_cast<const int8_t*>(data),
element_count,
Primitive::kPrimByte,
- dex_offset);
+ dex_pc);
break;
case 2:
BuildFillArrayData(null_check,
reinterpret_cast<const int16_t*>(data),
element_count,
Primitive::kPrimShort,
- dex_offset);
+ dex_pc);
break;
case 4:
BuildFillArrayData(null_check,
reinterpret_cast<const int32_t*>(data),
element_count,
Primitive::kPrimInt,
- dex_offset);
+ dex_pc);
break;
case 8:
BuildFillWideArrayData(null_check,
reinterpret_cast<const int64_t*>(data),
element_count,
- dex_offset);
+ dex_pc);
break;
default:
LOG(FATAL) << "Unknown element width for " << payload->element_width;
@@ -695,24 +736,56 @@ void HGraphBuilder::BuildFillArrayData(const Instruction& instruction, uint32_t
void HGraphBuilder::BuildFillWideArrayData(HInstruction* object,
const int64_t* data,
uint32_t element_count,
- uint32_t dex_offset) {
+ uint32_t dex_pc) {
for (uint32_t i = 0; i < element_count; ++i) {
HInstruction* index = GetIntConstant(i);
HInstruction* value = GetLongConstant(data[i]);
current_block_->AddInstruction(new (arena_) HArraySet(
- object, index, value, Primitive::kPrimLong, dex_offset));
+ object, index, value, Primitive::kPrimLong, dex_pc));
}
}
-void HGraphBuilder::PotentiallyAddSuspendCheck(int32_t target_offset, uint32_t dex_offset) {
+bool HGraphBuilder::BuildTypeCheck(const Instruction& instruction,
+ uint8_t destination,
+ uint8_t reference,
+ uint16_t type_index,
+ uint32_t dex_pc) {
+ bool type_known_final;
+ bool type_known_abstract;
+ bool is_referrers_class;
+ bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
+ dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index,
+ &type_known_final, &type_known_abstract, &is_referrers_class);
+ if (!can_access) {
+ return false;
+ }
+ HInstruction* object = LoadLocal(reference, Primitive::kPrimNot);
+ HLoadClass* cls = new (arena_) HLoadClass(type_index, is_referrers_class, dex_pc);
+ current_block_->AddInstruction(cls);
+ // The class needs a temporary before being used by the type check.
+ Temporaries temps(graph_);
+ temps.Add(cls);
+ if (instruction.Opcode() == Instruction::INSTANCE_OF) {
+ current_block_->AddInstruction(
+ new (arena_) HInstanceOf(object, cls, type_known_final, dex_pc));
+ UpdateLocal(destination, current_block_->GetLastInstruction());
+ } else {
+ DCHECK_EQ(instruction.Opcode(), Instruction::CHECK_CAST);
+ current_block_->AddInstruction(
+ new (arena_) HCheckCast(object, cls, type_known_final, dex_pc));
+ }
+ return true;
+}
+
+void HGraphBuilder::PotentiallyAddSuspendCheck(int32_t target_offset, uint32_t dex_pc) {
if (target_offset <= 0) {
// Unconditionnally add a suspend check to backward branches. We can remove
// them after we recognize loops in the graph.
- current_block_->AddInstruction(new (arena_) HSuspendCheck(dex_offset));
+ current_block_->AddInstruction(new (arena_) HSuspendCheck(dex_pc));
}
}
-bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32_t dex_offset) {
+bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32_t dex_pc) {
if (current_block_ == nullptr) {
return true; // Dead code
}
@@ -815,8 +888,8 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
}
#define IF_XX(comparison, cond) \
- case Instruction::IF_##cond: If_22t<comparison>(instruction, dex_offset); break; \
- case Instruction::IF_##cond##Z: If_21t<comparison>(instruction, dex_offset); break
+ case Instruction::IF_##cond: If_22t<comparison>(instruction, dex_pc); break; \
+ case Instruction::IF_##cond##Z: If_21t<comparison>(instruction, dex_pc); break
IF_XX(HEqual, EQ);
IF_XX(HNotEqual, NE);
@@ -829,8 +902,8 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::GOTO_16:
case Instruction::GOTO_32: {
int32_t offset = instruction.GetTargetOffset();
- PotentiallyAddSuspendCheck(offset, dex_offset);
- HBasicBlock* target = FindBlockStartingAt(offset + dex_offset);
+ PotentiallyAddSuspendCheck(offset, dex_pc);
+ HBasicBlock* target = FindBlockStartingAt(offset + dex_pc);
DCHECK(target != nullptr);
current_block_->AddInstruction(new (arena_) HGoto());
current_block_->AddSuccessor(target);
@@ -858,29 +931,31 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
- case Instruction::INVOKE_STATIC:
case Instruction::INVOKE_DIRECT:
- case Instruction::INVOKE_VIRTUAL:
- case Instruction::INVOKE_INTERFACE: {
+ case Instruction::INVOKE_INTERFACE:
+ case Instruction::INVOKE_STATIC:
+ case Instruction::INVOKE_SUPER:
+ case Instruction::INVOKE_VIRTUAL: {
uint32_t method_idx = instruction.VRegB_35c();
uint32_t number_of_vreg_arguments = instruction.VRegA_35c();
uint32_t args[5];
instruction.GetVarArgs(args);
- if (!BuildInvoke(instruction, dex_offset, method_idx,
+ if (!BuildInvoke(instruction, dex_pc, method_idx,
number_of_vreg_arguments, false, args, -1)) {
return false;
}
break;
}
- case Instruction::INVOKE_STATIC_RANGE:
case Instruction::INVOKE_DIRECT_RANGE:
- case Instruction::INVOKE_VIRTUAL_RANGE:
- case Instruction::INVOKE_INTERFACE_RANGE: {
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ case Instruction::INVOKE_STATIC_RANGE:
+ case Instruction::INVOKE_SUPER_RANGE:
+ case Instruction::INVOKE_VIRTUAL_RANGE: {
uint32_t method_idx = instruction.VRegB_3rc();
uint32_t number_of_vreg_arguments = instruction.VRegA_3rc();
uint32_t register_index = instruction.VRegC();
- if (!BuildInvoke(instruction, dex_offset, method_idx,
+ if (!BuildInvoke(instruction, dex_pc, method_idx,
number_of_vreg_arguments, true, nullptr, register_index)) {
return false;
}
@@ -922,6 +997,21 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::LONG_TO_INT: {
+ Conversion_12x(instruction, Primitive::kPrimLong, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::INT_TO_BYTE: {
+ Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimByte);
+ break;
+ }
+
+ case Instruction::INT_TO_CHAR: {
+ Conversion_12x(instruction, Primitive::kPrimInt, Primitive::kPrimChar);
+ break;
+ }
+
case Instruction::ADD_INT: {
Binop_23x<HAdd>(instruction, Primitive::kPrimInt);
break;
@@ -989,17 +1079,53 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::DIV_INT: {
BuildCheckedDiv(instruction.VRegA(), instruction.VRegB(), instruction.VRegC(),
- dex_offset, Primitive::kPrimInt, false);
+ dex_pc, Primitive::kPrimInt, false);
+ break;
+ }
+
+ case Instruction::DIV_LONG: {
+ BuildCheckedDiv(instruction.VRegA(), instruction.VRegB(), instruction.VRegC(),
+ dex_pc, Primitive::kPrimLong, false);
break;
}
case Instruction::DIV_FLOAT: {
- Binop_23x<HDiv>(instruction, Primitive::kPrimFloat);
+ Binop_23x<HDiv>(instruction, Primitive::kPrimFloat, dex_pc);
break;
}
case Instruction::DIV_DOUBLE: {
- Binop_23x<HDiv>(instruction, Primitive::kPrimDouble);
+ Binop_23x<HDiv>(instruction, Primitive::kPrimDouble, dex_pc);
+ break;
+ }
+
+ case Instruction::AND_INT: {
+ Binop_23x<HAnd>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::AND_LONG: {
+ Binop_23x<HAnd>(instruction, Primitive::kPrimLong);
+ break;
+ }
+
+ case Instruction::OR_INT: {
+ Binop_23x<HOr>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::OR_LONG: {
+ Binop_23x<HOr>(instruction, Primitive::kPrimLong);
+ break;
+ }
+
+ case Instruction::XOR_INT: {
+ Binop_23x<HXor>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::XOR_LONG: {
+ Binop_23x<HXor>(instruction, Primitive::kPrimLong);
break;
}
@@ -1060,17 +1186,53 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::DIV_INT_2ADDR: {
BuildCheckedDiv(instruction.VRegA(), instruction.VRegA(), instruction.VRegB(),
- dex_offset, Primitive::kPrimInt, false);
+ dex_pc, Primitive::kPrimInt, false);
+ break;
+ }
+
+ case Instruction::DIV_LONG_2ADDR: {
+ BuildCheckedDiv(instruction.VRegA(), instruction.VRegA(), instruction.VRegB(),
+ dex_pc, Primitive::kPrimLong, false);
break;
}
case Instruction::DIV_FLOAT_2ADDR: {
- Binop_12x<HDiv>(instruction, Primitive::kPrimFloat);
+ Binop_12x<HDiv>(instruction, Primitive::kPrimFloat, dex_pc);
break;
}
case Instruction::DIV_DOUBLE_2ADDR: {
- Binop_12x<HDiv>(instruction, Primitive::kPrimDouble);
+ Binop_12x<HDiv>(instruction, Primitive::kPrimDouble, dex_pc);
+ break;
+ }
+
+ case Instruction::AND_INT_2ADDR: {
+ Binop_12x<HAnd>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::AND_LONG_2ADDR: {
+ Binop_12x<HAnd>(instruction, Primitive::kPrimLong);
+ break;
+ }
+
+ case Instruction::OR_INT_2ADDR: {
+ Binop_12x<HOr>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::OR_LONG_2ADDR: {
+ Binop_12x<HOr>(instruction, Primitive::kPrimLong);
+ break;
+ }
+
+ case Instruction::XOR_INT_2ADDR: {
+ Binop_12x<HXor>(instruction, Primitive::kPrimInt);
+ break;
+ }
+
+ case Instruction::XOR_LONG_2ADDR: {
+ Binop_12x<HXor>(instruction, Primitive::kPrimLong);
break;
}
@@ -1079,6 +1241,21 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::AND_INT_LIT16: {
+ Binop_22s<HAnd>(instruction, false);
+ break;
+ }
+
+ case Instruction::OR_INT_LIT16: {
+ Binop_22s<HOr>(instruction, false);
+ break;
+ }
+
+ case Instruction::XOR_INT_LIT16: {
+ Binop_22s<HXor>(instruction, false);
+ break;
+ }
+
case Instruction::RSUB_INT: {
Binop_22s<HSub>(instruction, true);
break;
@@ -1094,6 +1271,21 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::AND_INT_LIT8: {
+ Binop_22b<HAnd>(instruction, false);
+ break;
+ }
+
+ case Instruction::OR_INT_LIT8: {
+ Binop_22b<HOr>(instruction, false);
+ break;
+ }
+
+ case Instruction::XOR_INT_LIT8: {
+ Binop_22b<HXor>(instruction, false);
+ break;
+ }
+
case Instruction::RSUB_INT_LIT8: {
Binop_22b<HSub>(instruction, true);
break;
@@ -1107,13 +1299,13 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::DIV_INT_LIT16:
case Instruction::DIV_INT_LIT8: {
BuildCheckedDiv(instruction.VRegA(), instruction.VRegB(), instruction.VRegC(),
- dex_offset, Primitive::kPrimInt, true);
+ dex_pc, Primitive::kPrimInt, true);
break;
}
case Instruction::NEW_INSTANCE: {
current_block_->AddInstruction(
- new (arena_) HNewInstance(dex_offset, instruction.VRegB_21c()));
+ new (arena_) HNewInstance(dex_pc, instruction.VRegB_21c()));
UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction());
break;
}
@@ -1121,7 +1313,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::NEW_ARRAY: {
HInstruction* length = LoadLocal(instruction.VRegB_22c(), Primitive::kPrimInt);
current_block_->AddInstruction(
- new (arena_) HNewArray(length, dex_offset, instruction.VRegC_22c()));
+ new (arena_) HNewArray(length, dex_pc, instruction.VRegC_22c()));
UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction());
break;
}
@@ -1131,7 +1323,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
uint32_t type_index = instruction.VRegB_35c();
uint32_t args[5];
instruction.GetVarArgs(args);
- BuildFilledNewArray(dex_offset, type_index, number_of_vreg_arguments, false, args, 0);
+ BuildFilledNewArray(dex_pc, type_index, number_of_vreg_arguments, false, args, 0);
break;
}
@@ -1140,12 +1332,12 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
uint32_t type_index = instruction.VRegB_3rc();
uint32_t register_index = instruction.VRegC_3rc();
BuildFilledNewArray(
- dex_offset, type_index, number_of_vreg_arguments, true, nullptr, register_index);
+ dex_pc, type_index, number_of_vreg_arguments, true, nullptr, register_index);
break;
}
case Instruction::FILL_ARRAY_DATA: {
- BuildFillArrayData(instruction, dex_offset);
+ BuildFillArrayData(instruction, dex_pc);
break;
}
@@ -1171,7 +1363,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::IGET_BYTE:
case Instruction::IGET_CHAR:
case Instruction::IGET_SHORT: {
- if (!BuildInstanceFieldAccess(instruction, dex_offset, false)) {
+ if (!BuildInstanceFieldAccess(instruction, dex_pc, false)) {
return false;
}
break;
@@ -1184,7 +1376,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::IPUT_BYTE:
case Instruction::IPUT_CHAR:
case Instruction::IPUT_SHORT: {
- if (!BuildInstanceFieldAccess(instruction, dex_offset, true)) {
+ if (!BuildInstanceFieldAccess(instruction, dex_pc, true)) {
return false;
}
break;
@@ -1197,7 +1389,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::SGET_BYTE:
case Instruction::SGET_CHAR:
case Instruction::SGET_SHORT: {
- if (!BuildStaticFieldAccess(instruction, dex_offset, false)) {
+ if (!BuildStaticFieldAccess(instruction, dex_pc, false)) {
return false;
}
break;
@@ -1210,7 +1402,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::SPUT_BYTE:
case Instruction::SPUT_CHAR:
case Instruction::SPUT_SHORT: {
- if (!BuildStaticFieldAccess(instruction, dex_offset, true)) {
+ if (!BuildStaticFieldAccess(instruction, dex_pc, true)) {
return false;
}
break;
@@ -1218,11 +1410,11 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
#define ARRAY_XX(kind, anticipated_type) \
case Instruction::AGET##kind: { \
- BuildArrayAccess(instruction, dex_offset, false, anticipated_type); \
+ BuildArrayAccess(instruction, dex_pc, false, anticipated_type); \
break; \
} \
case Instruction::APUT##kind: { \
- BuildArrayAccess(instruction, dex_offset, true, anticipated_type); \
+ BuildArrayAccess(instruction, dex_pc, true, anticipated_type); \
break; \
}
@@ -1238,7 +1430,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
HInstruction* object = LoadLocal(instruction.VRegB_12x(), Primitive::kPrimNot);
// No need for a temporary for the null check, it is the only input of the following
// instruction.
- object = new (arena_) HNullCheck(object, dex_offset);
+ object = new (arena_) HNullCheck(object, dex_pc);
current_block_->AddInstruction(object);
current_block_->AddInstruction(new (arena_) HArrayLength(object));
UpdateLocal(instruction.VRegA_12x(), current_block_->GetLastInstruction());
@@ -1246,13 +1438,13 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
}
case Instruction::CONST_STRING: {
- current_block_->AddInstruction(new (arena_) HLoadString(instruction.VRegB_21c(), dex_offset));
+ current_block_->AddInstruction(new (arena_) HLoadString(instruction.VRegB_21c(), dex_pc));
UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
break;
}
case Instruction::CONST_STRING_JUMBO: {
- current_block_->AddInstruction(new (arena_) HLoadString(instruction.VRegB_31c(), dex_offset));
+ current_block_->AddInstruction(new (arena_) HLoadString(instruction.VRegB_31c(), dex_pc));
UpdateLocal(instruction.VRegA_31c(), current_block_->GetLastInstruction());
break;
}
@@ -1269,7 +1461,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
return false;
}
current_block_->AddInstruction(
- new (arena_) HLoadClass(type_index, is_referrers_class, dex_offset));
+ new (arena_) HLoadClass(type_index, is_referrers_class, dex_pc));
UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
break;
}
@@ -1282,7 +1474,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::THROW: {
HInstruction* exception = LoadLocal(instruction.VRegA_11x(), Primitive::kPrimNot);
- current_block_->AddInstruction(new (arena_) HThrow(exception, dex_offset));
+ current_block_->AddInstruction(new (arena_) HThrow(exception, dex_pc));
// A throw instruction must branch to the exit block.
current_block_->AddSuccessor(exit_block_);
// We finished building this block. Set the current block to null to avoid
@@ -1292,25 +1484,37 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
}
case Instruction::INSTANCE_OF: {
+ uint8_t destination = instruction.VRegA_22c();
+ uint8_t reference = instruction.VRegB_22c();
uint16_t type_index = instruction.VRegC_22c();
- bool type_known_final;
- bool type_known_abstract;
- bool is_referrers_class;
- bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
- dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index,
- &type_known_final, &type_known_abstract, &is_referrers_class);
- if (!can_access) {
+ if (!BuildTypeCheck(instruction, destination, reference, type_index, dex_pc)) {
return false;
}
- HInstruction* object = LoadLocal(instruction.VRegB_22c(), Primitive::kPrimNot);
- HLoadClass* cls = new (arena_) HLoadClass(type_index, is_referrers_class, dex_offset);
- current_block_->AddInstruction(cls);
- // The class needs a temporary before being used by the type check.
- Temporaries temps(graph_, 1);
- temps.Add(cls);
- current_block_->AddInstruction(
- new (arena_) HTypeCheck(object, cls, type_known_final, dex_offset));
- UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction());
+ break;
+ }
+
+ case Instruction::CHECK_CAST: {
+ uint8_t reference = instruction.VRegA_21c();
+ uint16_t type_index = instruction.VRegB_21c();
+ if (!BuildTypeCheck(instruction, -1, reference, type_index, dex_pc)) {
+ return false;
+ }
+ break;
+ }
+
+ case Instruction::MONITOR_ENTER: {
+ current_block_->AddInstruction(new (arena_) HMonitorOperation(
+ LoadLocal(instruction.VRegA_11x(), Primitive::kPrimNot),
+ HMonitorOperation::kEnter,
+ dex_pc));
+ break;
+ }
+
+ case Instruction::MONITOR_EXIT: {
+ current_block_->AddInstruction(new (arena_) HMonitorOperation(
+ LoadLocal(instruction.VRegA_11x(), Primitive::kPrimNot),
+ HMonitorOperation::kExit,
+ dex_pc));
break;
}
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 09c9a51260..799e628a78 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -76,7 +76,7 @@ class HGraphBuilder : public ValueObject {
// Analyzes the dex instruction and adds HInstruction to the graph
// to execute that instruction. Returns whether the instruction can
// be handled.
- bool AnalyzeDexInstruction(const Instruction& instruction, uint32_t dex_offset);
+ bool AnalyzeDexInstruction(const Instruction& instruction, uint32_t dex_pc);
// Finds all instructions that start a new block, and populates branch_targets_ with
// the newly created blocks.
@@ -92,7 +92,7 @@ class HGraphBuilder : public ValueObject {
HLocal* GetLocalAt(int register_index) const;
void UpdateLocal(int register_index, HInstruction* instruction) const;
HInstruction* LoadLocal(int register_index, Primitive::Type type) const;
- void PotentiallyAddSuspendCheck(int32_t target_offset, uint32_t dex_offset);
+ void PotentiallyAddSuspendCheck(int32_t target_offset, uint32_t dex_pc);
void InitializeParameters(uint16_t number_of_parameters);
template<typename T>
@@ -102,16 +102,22 @@ class HGraphBuilder : public ValueObject {
void Binop_23x(const Instruction& instruction, Primitive::Type type);
template<typename T>
+ void Binop_23x(const Instruction& instruction, Primitive::Type type, uint32_t dex_pc);
+
+ template<typename T>
void Binop_12x(const Instruction& instruction, Primitive::Type type);
template<typename T>
+ void Binop_12x(const Instruction& instruction, Primitive::Type type, uint32_t dex_pc);
+
+ template<typename T>
void Binop_22b(const Instruction& instruction, bool reverse);
template<typename T>
void Binop_22s(const Instruction& instruction, bool reverse);
- template<typename T> void If_21t(const Instruction& instruction, uint32_t dex_offset);
- template<typename T> void If_22t(const Instruction& instruction, uint32_t dex_offset);
+ template<typename T> void If_21t(const Instruction& instruction, uint32_t dex_pc);
+ template<typename T> void If_22t(const Instruction& instruction, uint32_t dex_pc);
void Conversion_12x(const Instruction& instruction,
Primitive::Type input_type,
@@ -119,27 +125,27 @@ class HGraphBuilder : public ValueObject {
void BuildCheckedDiv(uint16_t out_reg,
uint16_t first_reg,
- int32_t second_reg, // can be a constant
- uint32_t dex_offset,
+ int64_t second_reg_or_constant,
+ uint32_t dex_pc,
Primitive::Type type,
bool second_is_lit);
void BuildReturn(const Instruction& instruction, Primitive::Type type);
// Builds an instance field access node and returns whether the instruction is supported.
- bool BuildInstanceFieldAccess(const Instruction& instruction, uint32_t dex_offset, bool is_put);
+ bool BuildInstanceFieldAccess(const Instruction& instruction, uint32_t dex_pc, bool is_put);
// Builds a static field access node and returns whether the instruction is supported.
- bool BuildStaticFieldAccess(const Instruction& instruction, uint32_t dex_offset, bool is_put);
+ bool BuildStaticFieldAccess(const Instruction& instruction, uint32_t dex_pc, bool is_put);
void BuildArrayAccess(const Instruction& instruction,
- uint32_t dex_offset,
+ uint32_t dex_pc,
bool is_get,
Primitive::Type anticipated_type);
// Builds an invocation node and returns whether the instruction is supported.
bool BuildInvoke(const Instruction& instruction,
- uint32_t dex_offset,
+ uint32_t dex_pc,
uint32_t method_idx,
uint32_t number_of_vreg_arguments,
bool is_range,
@@ -147,14 +153,14 @@ class HGraphBuilder : public ValueObject {
uint32_t register_index);
// Builds a new array node and the instructions that fill it.
- void BuildFilledNewArray(uint32_t dex_offset,
+ void BuildFilledNewArray(uint32_t dex_pc,
uint32_t type_index,
uint32_t number_of_vreg_arguments,
bool is_range,
uint32_t* args,
uint32_t register_index);
- void BuildFillArrayData(const Instruction& instruction, uint32_t dex_offset);
+ void BuildFillArrayData(const Instruction& instruction, uint32_t dex_pc);
// Fills the given object with data as specified in the fill-array-data
// instruction. Currently only used for non-reference and non-floating point
@@ -164,14 +170,22 @@ class HGraphBuilder : public ValueObject {
const T* data,
uint32_t element_count,
Primitive::Type anticipated_type,
- uint32_t dex_offset);
+ uint32_t dex_pc);
// Fills the given object with data as specified in the fill-array-data
// instruction. The data must be for long and double arrays.
void BuildFillWideArrayData(HInstruction* object,
const int64_t* data,
uint32_t element_count,
- uint32_t dex_offset);
+ uint32_t dex_pc);
+
+ // Builds a `HInstanceOf`, or a `HCheckCast` instruction.
+ // Returns whether we succeeded in building the instruction.
+ bool BuildTypeCheck(const Instruction& instruction,
+ uint8_t destination,
+ uint8_t reference,
+ uint16_t type_index,
+ uint32_t dex_pc);
ArenaAllocator* const arena_;
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 9d172638e1..4d71cb780a 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -51,7 +51,7 @@ void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) {
MarkNotLeaf();
}
ComputeFrameSize(GetGraph()->GetNumberOfLocalVRegs()
- + GetGraph()->GetNumberOfTemporaries()
+ + GetGraph()->GetTemporariesVRegSlots()
+ 1 /* filler */,
0, /* the baseline compiler does not have live registers at slow path */
GetGraph()->GetMaximumNumberOfOutVRegs()
@@ -150,12 +150,15 @@ void CodeGenerator::ComputeFrameSize(size_t number_of_spill_slots,
Location CodeGenerator::GetTemporaryLocation(HTemporary* temp) const {
uint16_t number_of_locals = GetGraph()->GetNumberOfLocalVRegs();
+ // The type of the previous instruction tells us if we need a single or double stack slot.
+ Primitive::Type type = temp->GetType();
+ int32_t temp_size = (type == Primitive::kPrimLong) || (type == Primitive::kPrimDouble) ? 2 : 1;
// Use the temporary region (right below the dex registers).
int32_t slot = GetFrameSize() - FrameEntrySpillSize()
- kVRegSize // filler
- (number_of_locals * kVRegSize)
- - ((1 + temp->GetIndex()) * kVRegSize);
- return Location::StackSlot(slot);
+ - ((temp_size + temp->GetIndex()) * kVRegSize);
+ return temp_size == 2 ? Location::DoubleStackSlot(slot) : Location::StackSlot(slot);
}
int32_t CodeGenerator::GetStackSlot(HLocal* local) const {
@@ -632,4 +635,13 @@ void CodeGenerator::ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend
}
}
+void CodeGenerator::EmitParallelMoves(Location from1, Location to1, Location from2, Location to2) {
+ MoveOperands move1(from1, to1, nullptr);
+ MoveOperands move2(from2, to2, nullptr);
+ HParallelMove parallel_move(GetGraph()->GetArena());
+ parallel_move.AddMove(&move1);
+ parallel_move.AddMove(&move2);
+ GetMoveResolver()->EmitNativeCode(&parallel_move);
+}
+
} // namespace art
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index fc4ea4b5d3..63bf96ca5a 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -17,9 +17,9 @@
#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
+#include "arch/instruction_set.h"
#include "base/bit_field.h"
#include "globals.h"
-#include "instruction_set.h"
#include "locations.h"
#include "memory_region.h"
#include "nodes.h"
@@ -33,6 +33,7 @@ static size_t constexpr kUninitializedFrameSize = 0;
class Assembler;
class CodeGenerator;
class DexCompilationUnit;
+class ParallelMoveResolver;
class SrcMap;
class CodeAllocator {
@@ -165,6 +166,8 @@ class CodeGenerator : public ArenaObject<kArenaAllocMisc> {
// of the architecture.
static size_t GetCacheOffset(uint32_t index);
+ void EmitParallelMoves(Location from1, Location to1, Location from2, Location to2);
+
protected:
CodeGenerator(HGraph* graph,
size_t number_of_core_registers,
@@ -197,6 +200,8 @@ class CodeGenerator : public ArenaObject<kArenaAllocMisc> {
virtual Location GetStackLocation(HLoadLocal* load) const = 0;
+ virtual ParallelMoveResolver* GetMoveResolver() = 0;
+
// Frame size required for this method.
uint32_t frame_size_;
uint32_t core_spill_mask_;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 467c2a6c29..09e1b97570 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -22,9 +22,9 @@
#include "mirror/art_method.h"
#include "mirror/class.h"
#include "thread.h"
-#include "utils/assembler.h"
#include "utils/arm/assembler_arm.h"
#include "utils/arm/managed_register_arm.h"
+#include "utils/assembler.h"
#include "utils/stack_checks.h"
namespace art {
@@ -41,7 +41,7 @@ static constexpr bool kExplicitStackOverflowCheck = false;
static constexpr int kNumberOfPushedRegistersAtEntry = 1 + 2; // LR, R6, R7
static constexpr int kCurrentMethodStackOffset = 0;
-static constexpr Register kRuntimeParameterCoreRegisters[] = { R0, R1, R2 };
+static constexpr Register kRuntimeParameterCoreRegisters[] = { R0, R1, R2, R3 };
static constexpr size_t kRuntimeParameterCoreRegistersLength =
arraysize(kRuntimeParameterCoreRegisters);
static constexpr SRegister kRuntimeParameterFpuRegisters[] = { };
@@ -169,11 +169,14 @@ class BoundsCheckSlowPathARM : public SlowPathCodeARM {
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
InvokeRuntimeCallingConvention calling_convention;
- arm_codegen->Move32(
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
- arm_codegen->Move32(
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)), length_location_);
+ codegen->EmitParallelMoves(
+ index_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ length_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
arm_codegen->InvokeRuntime(
QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc());
}
@@ -269,13 +272,19 @@ class LoadStringSlowPathARM : public SlowPathCodeARM {
class TypeCheckSlowPathARM : public SlowPathCodeARM {
public:
- explicit TypeCheckSlowPathARM(HTypeCheck* instruction, Location object_class)
+ TypeCheckSlowPathARM(HInstruction* instruction,
+ Location class_to_check,
+ Location object_class,
+ uint32_t dex_pc)
: instruction_(instruction),
- object_class_(object_class) {}
+ class_to_check_(class_to_check),
+ object_class_(object_class),
+ dex_pc_(dex_pc) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ DCHECK(instruction_->IsCheckCast()
+ || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
@@ -284,28 +293,29 @@ class TypeCheckSlowPathARM : public SlowPathCodeARM {
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- MoveOperands move1(locations->InAt(1),
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- nullptr);
- MoveOperands move2(object_class_,
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
- nullptr);
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
- parallel_move.AddMove(&move1);
- parallel_move.AddMove(&move2);
- arm_codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
-
- arm_codegen->InvokeRuntime(
- QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, instruction_->GetDexPc());
- arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
+ codegen->EmitParallelMoves(
+ class_to_check_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ object_class_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+
+ if (instruction_->IsInstanceOf()) {
+ arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc_);
+ arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
+ } else {
+ DCHECK(instruction_->IsCheckCast());
+ arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_);
+ }
codegen->RestoreLiveRegisters(locations);
__ b(GetExitLabel());
}
private:
- HTypeCheck* const instruction_;
+ HInstruction* const instruction_;
+ const Location class_to_check_;
const Location object_class_;
+ uint32_t dex_pc_;
DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM);
};
@@ -660,13 +670,13 @@ void CodeGeneratorARM::Move32(Location destination, Location source) {
__ LoadSFromOffset(destination.As<SRegister>(), SP, source.GetStackIndex());
}
} else {
- DCHECK(destination.IsStackSlot());
+ DCHECK(destination.IsStackSlot()) << destination;
if (source.IsRegister()) {
__ StoreToOffset(kStoreWord, source.As<Register>(), SP, destination.GetStackIndex());
} else if (source.IsFpuRegister()) {
__ StoreSToOffset(source.As<SRegister>(), SP, destination.GetStackIndex());
} else {
- DCHECK(source.IsStackSlot());
+ DCHECK(source.IsStackSlot()) << source;
__ LoadFromOffset(kLoadWord, IP, SP, source.GetStackIndex());
__ StoreToOffset(kStoreWord, IP, SP, destination.GetStackIndex());
}
@@ -768,26 +778,29 @@ void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstr
return;
}
- if (instruction->IsIntConstant()) {
- int32_t value = instruction->AsIntConstant()->GetValue();
- if (location.IsRegister()) {
- __ LoadImmediate(location.As<Register>(), value);
- } else {
- DCHECK(location.IsStackSlot());
- __ LoadImmediate(IP, value);
- __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
- }
- } else if (instruction->IsLongConstant()) {
- int64_t value = instruction->AsLongConstant()->GetValue();
- if (location.IsRegisterPair()) {
- __ LoadImmediate(location.AsRegisterPairLow<Register>(), Low32Bits(value));
- __ LoadImmediate(location.AsRegisterPairHigh<Register>(), High32Bits(value));
- } else {
- DCHECK(location.IsDoubleStackSlot());
- __ LoadImmediate(IP, Low32Bits(value));
- __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
- __ LoadImmediate(IP, High32Bits(value));
- __ StoreToOffset(kStoreWord, IP, SP, location.GetHighStackIndex(kArmWordSize));
+ if (locations != nullptr && locations->Out().IsConstant()) {
+ HConstant* const_to_move = locations->Out().GetConstant();
+ if (const_to_move->IsIntConstant()) {
+ int32_t value = const_to_move->AsIntConstant()->GetValue();
+ if (location.IsRegister()) {
+ __ LoadImmediate(location.As<Register>(), value);
+ } else {
+ DCHECK(location.IsStackSlot());
+ __ LoadImmediate(IP, value);
+ __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
+ }
+ } else if (const_to_move->IsLongConstant()) {
+ int64_t value = const_to_move->AsLongConstant()->GetValue();
+ if (location.IsRegisterPair()) {
+ __ LoadImmediate(location.AsRegisterPairLow<Register>(), Low32Bits(value));
+ __ LoadImmediate(location.AsRegisterPairHigh<Register>(), High32Bits(value));
+ } else {
+ DCHECK(location.IsDoubleStackSlot());
+ __ LoadImmediate(IP, Low32Bits(value));
+ __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
+ __ LoadImmediate(IP, High32Bits(value));
+ __ StoreToOffset(kStoreWord, IP, SP, location.GetHighStackIndex(kArmWordSize));
+ }
}
} else if (instruction->IsLoadLocal()) {
uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
@@ -812,7 +825,12 @@ void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstr
}
} else if (instruction->IsTemporary()) {
Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
- Move32(location, temp_location);
+ if (temp_location.IsStackSlot()) {
+ Move32(location, temp_location);
+ } else {
+ DCHECK(temp_location.IsDoubleStackSlot());
+ Move64(location, temp_location);
+ }
} else {
DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
switch (instruction->GetType()) {
@@ -1333,13 +1351,49 @@ void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-byte' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Processing a Dex `long-to-int' instruction.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimLong:
switch (input_type) {
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
- // int-to-long conversion.
+ // Processing a Dex `int-to-long' instruction.
locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
@@ -1356,7 +1410,23 @@ void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
}
break;
- case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-char' instruction.
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
@@ -1376,13 +1446,57 @@ void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversio
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-byte' instruction.
+ __ sbfx(out.As<Register>(), in.As<Register>(), 0, 8);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Processing a Dex `long-to-int' instruction.
+ DCHECK(out.IsRegister());
+ if (in.IsRegisterPair()) {
+ __ Mov(out.As<Register>(), in.AsRegisterPairLow<Register>());
+ } else if (in.IsDoubleStackSlot()) {
+ __ LoadFromOffset(kLoadWord, out.As<Register>(), SP, in.GetStackIndex());
+ } else {
+ DCHECK(in.IsConstant());
+ DCHECK(in.GetConstant()->IsLongConstant());
+ int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
+ __ LoadImmediate(out.As<Register>(), static_cast<int32_t>(value));
+ }
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimLong:
switch (input_type) {
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
- // int-to-long conversion.
+ // Processing a Dex `int-to-long' instruction.
DCHECK(out.IsRegisterPair());
DCHECK(in.IsRegister());
__ Mov(out.AsRegisterPairLow<Register>(), in.As<Register>());
@@ -1404,7 +1518,22 @@ void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversio
}
break;
- case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-char' instruction.
+ __ ubfx(out.As<Register>(), in.As<Register>(), 0, 16);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
@@ -1636,8 +1765,11 @@ void InstructionCodeGeneratorARM::VisitMul(HMul* mul) {
}
void LocationsBuilderARM::VisitDiv(HDiv* div) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = div->GetResultType() == Primitive::kPrimLong
+ ? LocationSummary::kCall
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
+
switch (div->GetResultType()) {
case Primitive::kPrimInt: {
locations->SetInAt(0, Location::RequiresRegister());
@@ -1646,7 +1778,13 @@ void LocationsBuilderARM::VisitDiv(HDiv* div) {
break;
}
case Primitive::kPrimLong: {
- LOG(FATAL) << "Not implemented div type" << div->GetResultType();
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(1, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
+ // The runtime helper puts the output in R0,R2.
+ locations->SetOut(Location::RegisterPairLocation(R0, R2));
break;
}
case Primitive::kPrimFloat:
@@ -1675,7 +1813,15 @@ void InstructionCodeGeneratorARM::VisitDiv(HDiv* div) {
}
case Primitive::kPrimLong: {
- LOG(FATAL) << "Not implemented div type" << div->GetResultType();
+ InvokeRuntimeCallingConvention calling_convention;
+ DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegisterPairLow<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(1), first.AsRegisterPairHigh<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(2), second.AsRegisterPairLow<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(3), second.AsRegisterPairHigh<Register>());
+ DCHECK_EQ(R0, out.AsRegisterPairLow<Register>());
+ DCHECK_EQ(R2, out.AsRegisterPairHigh<Register>());
+
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pLdiv), div, div->GetDexPc());
break;
}
@@ -1699,7 +1845,7 @@ void InstructionCodeGeneratorARM::VisitDiv(HDiv* div) {
void LocationsBuilderARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
if (instruction->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
}
@@ -1712,9 +1858,36 @@ void InstructionCodeGeneratorARM::VisitDivZeroCheck(HDivZeroCheck* instruction)
LocationSummary* locations = instruction->GetLocations();
Location value = locations->InAt(0);
- DCHECK(value.IsRegister()) << value;
- __ cmp(value.As<Register>(), ShifterOperand(0));
- __ b(slow_path->GetEntryLabel(), EQ);
+ switch (instruction->GetType()) {
+ case Primitive::kPrimInt: {
+ if (value.IsRegister()) {
+ __ cmp(value.As<Register>(), ShifterOperand(0));
+ __ b(slow_path->GetEntryLabel(), EQ);
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
+ __ b(slow_path->GetEntryLabel());
+ }
+ }
+ break;
+ }
+ case Primitive::kPrimLong: {
+ if (value.IsRegisterPair()) {
+ __ orrs(IP,
+ value.AsRegisterPairLow<Register>(),
+ ShifterOperand(value.AsRegisterPairHigh<Register>()));
+ __ b(slow_path->GetEntryLabel(), EQ);
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
+ __ b(slow_path->GetEntryLabel());
+ }
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
+ }
+ }
}
void LocationsBuilderARM::VisitNewInstance(HNewInstance* instruction) {
@@ -2658,7 +2831,7 @@ void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) {
QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc());
}
-void LocationsBuilderARM::VisitTypeCheck(HTypeCheck* instruction) {
+void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary::CallKind call_kind = instruction->IsClassFinal()
? LocationSummary::kNoCall
: LocationSummary::kCallOnSlowPath;
@@ -2668,7 +2841,7 @@ void LocationsBuilderARM::VisitTypeCheck(HTypeCheck* instruction) {
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorARM::VisitTypeCheck(HTypeCheck* instruction) {
+void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary* locations = instruction->GetLocations();
Register obj = locations->InAt(0).As<Register>();
Register cls = locations->InAt(1).As<Register>();
@@ -2693,7 +2866,7 @@ void InstructionCodeGeneratorARM::VisitTypeCheck(HTypeCheck* instruction) {
// If the classes are not equal, we go into a slow path.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
- instruction, Location::RegisterLocation(out));
+ instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
codegen_->AddSlowPath(slow_path);
__ b(slow_path->GetEntryLabel(), NE);
__ LoadImmediate(out, 1);
@@ -2707,5 +2880,121 @@ void InstructionCodeGeneratorARM::VisitTypeCheck(HTypeCheck* instruction) {
__ Bind(&done);
}
+void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).As<Register>();
+ Register cls = locations->InAt(1).As<Register>();
+ Register temp = locations->GetTemp(0).As<Register>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
+ instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
+ codegen_->AddSlowPath(slow_path);
+
+ // TODO: avoid this check if we know obj is not null.
+ __ cmp(obj, ShifterOperand(0));
+ __ b(slow_path->GetExitLabel(), EQ);
+ // Compare the class of `obj` with `cls`.
+ __ LoadFromOffset(kLoadWord, temp, obj, class_offset);
+ __ cmp(temp, ShifterOperand(cls));
+ __ b(slow_path->GetEntryLabel(), NE);
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderARM::VisitMonitorOperation(HMonitorOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorARM::VisitMonitorOperation(HMonitorOperation* instruction) {
+ codegen_->InvokeRuntime(instruction->IsEnter()
+ ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
+ instruction,
+ instruction->GetDexPc());
+}
+
+void LocationsBuilderARM::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
+void LocationsBuilderARM::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
+void LocationsBuilderARM::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
+
+void LocationsBuilderARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt
+ || instruction->GetResultType() == Primitive::kPrimLong);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ bool output_overlaps = (instruction->GetResultType() == Primitive::kPrimLong);
+ locations->SetOut(Location::RequiresRegister(), output_overlaps);
+}
+
+void InstructionCodeGeneratorARM::VisitAnd(HAnd* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorARM::VisitOr(HOr* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorARM::VisitXor(HXor* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+
+ if (instruction->GetResultType() == Primitive::kPrimInt) {
+ Register first = locations->InAt(0).As<Register>();
+ Register second = locations->InAt(1).As<Register>();
+ Register out = locations->Out().As<Register>();
+ if (instruction->IsAnd()) {
+ __ and_(out, first, ShifterOperand(second));
+ } else if (instruction->IsOr()) {
+ __ orr(out, first, ShifterOperand(second));
+ } else {
+ DCHECK(instruction->IsXor());
+ __ eor(out, first, ShifterOperand(second));
+ }
+ } else {
+ DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ Location out = locations->Out();
+ if (instruction->IsAnd()) {
+ __ and_(out.AsRegisterPairLow<Register>(),
+ first.AsRegisterPairLow<Register>(),
+ ShifterOperand(second.AsRegisterPairLow<Register>()));
+ __ and_(out.AsRegisterPairHigh<Register>(),
+ first.AsRegisterPairHigh<Register>(),
+ ShifterOperand(second.AsRegisterPairHigh<Register>()));
+ } else if (instruction->IsOr()) {
+ __ orr(out.AsRegisterPairLow<Register>(),
+ first.AsRegisterPairLow<Register>(),
+ ShifterOperand(second.AsRegisterPairLow<Register>()));
+ __ orr(out.AsRegisterPairHigh<Register>(),
+ first.AsRegisterPairHigh<Register>(),
+ ShifterOperand(second.AsRegisterPairHigh<Register>()));
+ } else {
+ DCHECK(instruction->IsXor());
+ __ eor(out.AsRegisterPairLow<Register>(),
+ first.AsRegisterPairLow<Register>(),
+ ShifterOperand(second.AsRegisterPairLow<Register>()));
+ __ eor(out.AsRegisterPairHigh<Register>(),
+ first.AsRegisterPairHigh<Register>(),
+ ShifterOperand(second.AsRegisterPairHigh<Register>()));
+ }
+ }
+}
+
} // namespace arm
} // namespace art
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 5d519937f4..acc3fd6a25 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -105,9 +105,10 @@ class LocationsBuilderARM : public HGraphVisitor {
#undef DECLARE_VISIT_INSTRUCTION
+ private:
void HandleInvoke(HInvoke* invoke);
+ void HandleBitwiseOperation(HBinaryOperation* operation);
- private:
CodeGeneratorARM* const codegen_;
InvokeDexCallingConventionVisitor parameter_visitor_;
@@ -133,6 +134,7 @@ class InstructionCodeGeneratorARM : public HGraphVisitor {
// the suspend call.
void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
void GenerateClassInitializationCheck(SlowPathCodeARM* slow_path, Register class_reg);
+ void HandleBitwiseOperation(HBinaryOperation* operation);
ArmAssembler* const assembler_;
CodeGeneratorARM* const codegen_;
@@ -186,7 +188,7 @@ class CodeGeneratorARM : public CodeGenerator {
// Blocks all register pairs made out of blocked core registers.
void UpdateBlockedPairRegisters() const;
- ParallelMoveResolverARM* GetMoveResolver() {
+ ParallelMoveResolverARM* GetMoveResolver() OVERRIDE {
return &move_resolver_;
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 4dc836f412..887a4efa19 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -631,18 +631,23 @@ InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
codegen_(codegen) {}
#define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M) \
+ M(And) \
+ M(CheckCast) \
M(ClinitCheck) \
M(DivZeroCheck) \
+ M(InstanceOf) \
M(InvokeInterface) \
M(LoadClass) \
M(LoadException) \
M(LoadString) \
+ M(MonitorOperation) \
+ M(Or) \
M(ParallelMove) \
M(StaticFieldGet) \
M(StaticFieldSet) \
M(Throw) \
- M(TypeCheck) \
M(TypeConversion) \
+ M(Xor) \
#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index f2ead21e15..54e87f4d9c 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -230,6 +230,11 @@ class CodeGeneratorARM64 : public CodeGenerator {
void Load(Primitive::Type type, vixl::Register dst, const vixl::MemOperand& src);
void Store(Primitive::Type type, vixl::Register rt, const vixl::MemOperand& dst);
+ ParallelMoveResolver* GetMoveResolver() OVERRIDE {
+ UNIMPLEMENTED(INFO) << "TODO: MoveResolver";
+ return nullptr;
+ }
+
private:
// Labels for each block that will be compiled.
vixl::Label* block_labels_;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index d66180be32..8a8fec2609 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -36,7 +36,7 @@ static constexpr bool kExplicitStackOverflowCheck = false;
static constexpr int kNumberOfPushedRegistersAtEntry = 1;
static constexpr int kCurrentMethodStackOffset = 0;
-static constexpr Register kRuntimeParameterCoreRegisters[] = { EAX, ECX, EDX };
+static constexpr Register kRuntimeParameterCoreRegisters[] = { EAX, ECX, EDX, EBX };
static constexpr size_t kRuntimeParameterCoreRegistersLength =
arraysize(kRuntimeParameterCoreRegisters);
static constexpr XmmRegister kRuntimeParameterFpuRegisters[] = { };
@@ -140,9 +140,14 @@ class BoundsCheckSlowPathX86 : public SlowPathCodeX86 {
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
InvokeRuntimeCallingConvention calling_convention;
- x86_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
- x86_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(1)), length_location_);
+ x86_codegen->EmitParallelMoves(
+ index_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ length_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
__ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowArrayBounds)));
codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
}
@@ -270,13 +275,19 @@ class LoadClassSlowPathX86 : public SlowPathCodeX86 {
class TypeCheckSlowPathX86 : public SlowPathCodeX86 {
public:
- TypeCheckSlowPathX86(HTypeCheck* instruction, Location object_class)
+ TypeCheckSlowPathX86(HInstruction* instruction,
+ Location class_to_check,
+ Location object_class,
+ uint32_t dex_pc)
: instruction_(instruction),
- object_class_(object_class) {}
+ class_to_check_(class_to_check),
+ object_class_(object_class),
+ dex_pc_(dex_pc) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ DCHECK(instruction_->IsCheckCast()
+ || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
@@ -285,28 +296,33 @@ class TypeCheckSlowPathX86 : public SlowPathCodeX86 {
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- MoveOperands move1(locations->InAt(1),
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- nullptr);
- MoveOperands move2(object_class_,
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
- nullptr);
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
- parallel_move.AddMove(&move1);
- parallel_move.AddMove(&move2);
- x86_codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
-
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInstanceofNonTrivial)));
- codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
- x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
+ x86_codegen->EmitParallelMoves(
+ class_to_check_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ object_class_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+
+ if (instruction_->IsInstanceOf()) {
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInstanceofNonTrivial)));
+ } else {
+ DCHECK(instruction_->IsCheckCast());
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pCheckCast)));
+ }
+
+ codegen->RecordPcInfo(instruction_, dex_pc_);
+ if (instruction_->IsInstanceOf()) {
+ x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
+ }
codegen->RestoreLiveRegisters(locations);
__ jmp(GetExitLabel());
}
private:
- HTypeCheck* const instruction_;
+ HInstruction* const instruction_;
+ const Location class_to_check_;
const Location object_class_;
+ const uint32_t dex_pc_;
DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86);
};
@@ -559,7 +575,7 @@ void CodeGeneratorX86::Move32(Location destination, Location source) {
__ movss(destination.As<XmmRegister>(), Address(ESP, source.GetStackIndex()));
}
} else {
- DCHECK(destination.IsStackSlot());
+ DCHECK(destination.IsStackSlot()) << destination;
if (source.IsRegister()) {
__ movl(Address(ESP, destination.GetStackIndex()), source.As<Register>());
} else if (source.IsFpuRegister()) {
@@ -620,7 +636,7 @@ void CodeGeneratorX86::Move64(Location destination, Location source) {
LOG(FATAL) << "Unimplemented";
}
} else {
- DCHECK(destination.IsDoubleStackSlot());
+ DCHECK(destination.IsDoubleStackSlot()) << destination;
if (source.IsRegisterPair()) {
__ movl(Address(ESP, destination.GetStackIndex()), source.AsRegisterPairLow<Register>());
__ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)),
@@ -646,31 +662,44 @@ void CodeGeneratorX86::Move64(Location destination, Location source) {
}
void CodeGeneratorX86::Move(HInstruction* instruction, Location location, HInstruction* move_for) {
- if (instruction->IsIntConstant()) {
- Immediate imm(instruction->AsIntConstant()->GetValue());
- if (location.IsRegister()) {
- __ movl(location.As<Register>(), imm);
- } else if (location.IsStackSlot()) {
- __ movl(Address(ESP, location.GetStackIndex()), imm);
- } else {
- DCHECK(location.IsConstant());
- DCHECK_EQ(location.GetConstant(), instruction);
- }
- } else if (instruction->IsLongConstant()) {
- int64_t value = instruction->AsLongConstant()->GetValue();
- if (location.IsRegister()) {
- __ movl(location.AsRegisterPairLow<Register>(), Immediate(Low32Bits(value)));
- __ movl(location.AsRegisterPairHigh<Register>(), Immediate(High32Bits(value)));
- } else if (location.IsDoubleStackSlot()) {
- __ movl(Address(ESP, location.GetStackIndex()), Immediate(Low32Bits(value)));
- __ movl(Address(ESP, location.GetHighStackIndex(kX86WordSize)), Immediate(High32Bits(value)));
- } else {
- DCHECK(location.IsConstant());
- DCHECK_EQ(location.GetConstant(), instruction);
+ LocationSummary* locations = instruction->GetLocations();
+ if (locations != nullptr && locations->Out().Equals(location)) {
+ return;
+ }
+
+ if (locations != nullptr && locations->Out().IsConstant()) {
+ HConstant* const_to_move = locations->Out().GetConstant();
+ if (const_to_move->IsIntConstant()) {
+ Immediate imm(const_to_move->AsIntConstant()->GetValue());
+ if (location.IsRegister()) {
+ __ movl(location.As<Register>(), imm);
+ } else if (location.IsStackSlot()) {
+ __ movl(Address(ESP, location.GetStackIndex()), imm);
+ } else {
+ DCHECK(location.IsConstant());
+ DCHECK_EQ(location.GetConstant(), const_to_move);
+ }
+ } else if (const_to_move->IsLongConstant()) {
+ int64_t value = const_to_move->AsLongConstant()->GetValue();
+ if (location.IsRegisterPair()) {
+ __ movl(location.AsRegisterPairLow<Register>(), Immediate(Low32Bits(value)));
+ __ movl(location.AsRegisterPairHigh<Register>(), Immediate(High32Bits(value)));
+ } else if (location.IsDoubleStackSlot()) {
+ __ movl(Address(ESP, location.GetStackIndex()), Immediate(Low32Bits(value)));
+ __ movl(Address(ESP, location.GetHighStackIndex(kX86WordSize)), Immediate(High32Bits(value)));
+ } else {
+ DCHECK(location.IsConstant());
+ DCHECK_EQ(location.GetConstant(), instruction);
+ }
}
} else if (instruction->IsTemporary()) {
Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
- Move32(location, temp_location);
+ if (temp_location.IsStackSlot()) {
+ Move32(location, temp_location);
+ } else {
+ DCHECK(temp_location.IsDoubleStackSlot());
+ Move64(location, temp_location);
+ }
} else if (instruction->IsLoadLocal()) {
int slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
switch (instruction->GetType()) {
@@ -702,12 +731,12 @@ void CodeGeneratorX86::Move(HInstruction* instruction, Location location, HInstr
case Primitive::kPrimInt:
case Primitive::kPrimNot:
case Primitive::kPrimFloat:
- Move32(location, instruction->GetLocations()->Out());
+ Move32(location, locations->Out());
break;
case Primitive::kPrimLong:
case Primitive::kPrimDouble:
- Move64(location, instruction->GetLocations()->Out());
+ Move64(location, locations->Out());
break;
default:
@@ -1261,13 +1290,49 @@ void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) {
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-byte' instruction.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Processing a Dex `long-to-int' instruction.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimLong:
switch (input_type) {
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
- // int-to-long conversion.
+ // Processing a Dex `int-to-long' instruction.
locations->SetInAt(0, Location::RegisterLocation(EAX));
locations->SetOut(Location::RegisterPairLocation(EAX, EDX));
break;
@@ -1284,7 +1349,23 @@ void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) {
}
break;
- case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-char' instruction.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
@@ -1304,13 +1385,64 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-byte' instruction.
+ if (in.IsRegister()) {
+ __ movsxb(out.As<Register>(), in.As<ByteRegister>());
+ } else if (in.IsStackSlot()) {
+ __ movsxb(out.As<Register>(), Address(ESP, in.GetStackIndex()));
+ } else {
+ DCHECK(in.GetConstant()->IsIntConstant());
+ int32_t value = in.GetConstant()->AsIntConstant()->GetValue();
+ __ movl(out.As<Register>(), Immediate(static_cast<int8_t>(value)));
+ }
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Processing a Dex `long-to-int' instruction.
+ if (in.IsRegisterPair()) {
+ __ movl(out.As<Register>(), in.AsRegisterPairLow<Register>());
+ } else if (in.IsDoubleStackSlot()) {
+ __ movl(out.As<Register>(), Address(ESP, in.GetStackIndex()));
+ } else {
+ DCHECK(in.IsConstant());
+ DCHECK(in.GetConstant()->IsLongConstant());
+ int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
+ __ movl(out.As<Register>(), Immediate(static_cast<int32_t>(value)));
+ }
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimLong:
switch (input_type) {
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
- // int-to-long conversion.
+ // Processing a Dex `int-to-long' instruction.
DCHECK_EQ(out.AsRegisterPairLow<Register>(), EAX);
DCHECK_EQ(out.AsRegisterPairHigh<Register>(), EDX);
DCHECK_EQ(in.As<Register>(), EAX);
@@ -1329,7 +1461,30 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio
}
break;
- case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `Process a Dex `int-to-char'' instruction.
+ if (in.IsRegister()) {
+ __ movzxw(out.As<Register>(), in.As<Register>());
+ } else if (in.IsStackSlot()) {
+ __ movzxw(out.As<Register>(), Address(ESP, in.GetStackIndex()));
+ } else {
+ DCHECK(in.GetConstant()->IsIntConstant());
+ int32_t value = in.GetConstant()->AsIntConstant()->GetValue();
+ __ movl(out.As<Register>(), Immediate(static_cast<uint16_t>(value)));
+ }
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
@@ -1386,7 +1541,7 @@ void InstructionCodeGeneratorX86::VisitAdd(HAdd* add) {
}
case Primitive::kPrimLong: {
- if (second.IsRegister()) {
+ if (second.IsRegisterPair()) {
__ addl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
__ adcl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
} else {
@@ -1462,7 +1617,7 @@ void InstructionCodeGeneratorX86::VisitSub(HSub* sub) {
}
case Primitive::kPrimLong: {
- if (second.IsRegister()) {
+ if (second.IsRegisterPair()) {
__ subl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
__ sbbl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
} else {
@@ -1599,8 +1754,11 @@ void InstructionCodeGeneratorX86::VisitMul(HMul* mul) {
}
void LocationsBuilderX86::VisitDiv(HDiv* div) {
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
+ LocationSummary::CallKind call_kind = div->GetResultType() == Primitive::kPrimLong
+ ? LocationSummary::kCall
+ : LocationSummary::kNoCall;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
+
switch (div->GetResultType()) {
case Primitive::kPrimInt: {
locations->SetInAt(0, Location::RegisterLocation(EAX));
@@ -1611,7 +1769,13 @@ void LocationsBuilderX86::VisitDiv(HDiv* div) {
break;
}
case Primitive::kPrimLong: {
- LOG(FATAL) << "Not implemented div type" << div->GetResultType();
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(1, Location::RegisterPairLocation(
+ calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
+ // Runtime helper puts the result in EAX, EDX.
+ locations->SetOut(Location::RegisterPairLocation(EAX, EDX));
break;
}
case Primitive::kPrimFloat:
@@ -1629,12 +1793,13 @@ void LocationsBuilderX86::VisitDiv(HDiv* div) {
void InstructionCodeGeneratorX86::VisitDiv(HDiv* div) {
LocationSummary* locations = div->GetLocations();
+ Location out = locations->Out();
Location first = locations->InAt(0);
Location second = locations->InAt(1);
- DCHECK(first.Equals(locations->Out()));
switch (div->GetResultType()) {
case Primitive::kPrimInt: {
+ DCHECK(first.Equals(out));
Register first_reg = first.As<Register>();
Register second_reg = second.As<Register>();
DCHECK_EQ(EAX, first_reg);
@@ -1661,16 +1826,28 @@ void InstructionCodeGeneratorX86::VisitDiv(HDiv* div) {
}
case Primitive::kPrimLong: {
- LOG(FATAL) << "Not implemented div type" << div->GetResultType();
+ InvokeRuntimeCallingConvention calling_convention;
+ DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegisterPairLow<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(1), first.AsRegisterPairHigh<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(2), second.AsRegisterPairLow<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(3), second.AsRegisterPairHigh<Register>());
+ DCHECK_EQ(EAX, out.AsRegisterPairLow<Register>());
+ DCHECK_EQ(EDX, out.AsRegisterPairHigh<Register>());
+
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pLdiv)));
+ codegen_->RecordPcInfo(div, div->GetDexPc());
+
break;
}
case Primitive::kPrimFloat: {
+ DCHECK(first.Equals(out));
__ divss(first.As<XmmRegister>(), second.As<XmmRegister>());
break;
}
case Primitive::kPrimDouble: {
+ DCHECK(first.Equals(out));
__ divsd(first.As<XmmRegister>(), second.As<XmmRegister>());
break;
}
@@ -1683,7 +1860,21 @@ void InstructionCodeGeneratorX86::VisitDiv(HDiv* div) {
void LocationsBuilderX86::VisitDivZeroCheck(HDivZeroCheck* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
- locations->SetInAt(0, Location::Any());
+ switch (instruction->GetType()) {
+ case Primitive::kPrimInt: {
+ locations->SetInAt(0, Location::Any());
+ break;
+ }
+ case Primitive::kPrimLong: {
+ locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
+ if (!instruction->IsConstant()) {
+ locations->AddTemp(Location::RequiresRegister());
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
+ }
if (instruction->HasUses()) {
locations->SetOut(Location::SameAsFirstInput());
}
@@ -1696,18 +1887,39 @@ void InstructionCodeGeneratorX86::VisitDivZeroCheck(HDivZeroCheck* instruction)
LocationSummary* locations = instruction->GetLocations();
Location value = locations->InAt(0);
- if (value.IsRegister()) {
- __ testl(value.As<Register>(), value.As<Register>());
- } else if (value.IsStackSlot()) {
- __ cmpl(Address(ESP, value.GetStackIndex()), Immediate(0));
- } else {
- DCHECK(value.IsConstant()) << value;
- if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
- __ jmp(slow_path->GetEntryLabel());
+ switch (instruction->GetType()) {
+ case Primitive::kPrimInt: {
+ if (value.IsRegister()) {
+ __ testl(value.As<Register>(), value.As<Register>());
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else if (value.IsStackSlot()) {
+ __ cmpl(Address(ESP, value.GetStackIndex()), Immediate(0));
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
+ __ jmp(slow_path->GetEntryLabel());
+ }
+ }
+ break;
}
- return;
+ case Primitive::kPrimLong: {
+ if (value.IsRegisterPair()) {
+ Register temp = locations->GetTemp(0).As<Register>();
+ __ movl(temp, value.AsRegisterPairLow<Register>());
+ __ orl(temp, value.AsRegisterPairHigh<Register>());
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
+ __ jmp(slow_path->GetEntryLabel());
+ }
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected type for HDivZeroCheck" << instruction->GetType();
}
- __ j(kEqual, slow_path->GetEntryLabel());
}
void LocationsBuilderX86::VisitNewInstance(HNewInstance* instruction) {
@@ -2753,7 +2965,7 @@ void InstructionCodeGeneratorX86::VisitThrow(HThrow* instruction) {
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
}
-void LocationsBuilderX86::VisitTypeCheck(HTypeCheck* instruction) {
+void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary::CallKind call_kind = instruction->IsClassFinal()
? LocationSummary::kNoCall
: LocationSummary::kCallOnSlowPath;
@@ -2763,7 +2975,7 @@ void LocationsBuilderX86::VisitTypeCheck(HTypeCheck* instruction) {
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorX86::VisitTypeCheck(HTypeCheck* instruction) {
+void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary* locations = instruction->GetLocations();
Register obj = locations->InAt(0).As<Register>();
Location cls = locations->InAt(1);
@@ -2794,7 +3006,7 @@ void InstructionCodeGeneratorX86::VisitTypeCheck(HTypeCheck* instruction) {
// If the classes are not equal, we go into a slow path.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(
- instruction, Location::RegisterLocation(out));
+ instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -2808,5 +3020,148 @@ void InstructionCodeGeneratorX86::VisitTypeCheck(HTypeCheck* instruction) {
__ Bind(&done);
}
+void LocationsBuilderX86::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).As<Register>();
+ Location cls = locations->InAt(1);
+ Register temp = locations->GetTemp(0).As<Register>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(
+ instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
+ codegen_->AddSlowPath(slow_path);
+
+ // TODO: avoid this check if we know obj is not null.
+ __ testl(obj, obj);
+ __ j(kEqual, slow_path->GetExitLabel());
+ __ movl(temp, Address(obj, class_offset));
+
+ // Compare the class of `obj` with `cls`.
+ if (cls.IsRegister()) {
+ __ cmpl(temp, cls.As<Register>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(temp, Address(ESP, cls.GetStackIndex()));
+ }
+
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderX86::VisitMonitorOperation(HMonitorOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorX86::VisitMonitorOperation(HMonitorOperation* instruction) {
+ __ fs()->call(Address::Absolute(instruction->IsEnter()
+ ? QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pLockObject)
+ : QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pUnlockObject)));
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+}
+
+void LocationsBuilderX86::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
+void LocationsBuilderX86::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
+void LocationsBuilderX86::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
+
+void LocationsBuilderX86::HandleBitwiseOperation(HBinaryOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt
+ || instruction->GetResultType() == Primitive::kPrimLong);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->SetOut(Location::SameAsFirstInput());
+}
+
+void InstructionCodeGeneratorX86::VisitAnd(HAnd* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorX86::VisitOr(HOr* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorX86::VisitXor(HXor* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorX86::HandleBitwiseOperation(HBinaryOperation* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ DCHECK(first.Equals(locations->Out()));
+
+ if (instruction->GetResultType() == Primitive::kPrimInt) {
+ if (second.IsRegister()) {
+ if (instruction->IsAnd()) {
+ __ andl(first.As<Register>(), second.As<Register>());
+ } else if (instruction->IsOr()) {
+ __ orl(first.As<Register>(), second.As<Register>());
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.As<Register>(), second.As<Register>());
+ }
+ } else if (second.IsConstant()) {
+ if (instruction->IsAnd()) {
+ __ andl(first.As<Register>(), Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
+ } else if (instruction->IsOr()) {
+ __ orl(first.As<Register>(), Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.As<Register>(), Immediate(second.GetConstant()->AsIntConstant()->GetValue()));
+ }
+ } else {
+ if (instruction->IsAnd()) {
+ __ andl(first.As<Register>(), Address(ESP, second.GetStackIndex()));
+ } else if (instruction->IsOr()) {
+ __ orl(first.As<Register>(), Address(ESP, second.GetStackIndex()));
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.As<Register>(), Address(ESP, second.GetStackIndex()));
+ }
+ }
+ } else {
+ DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
+ if (second.IsRegisterPair()) {
+ if (instruction->IsAnd()) {
+ __ andl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
+ __ andl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
+ } else if (instruction->IsOr()) {
+ __ orl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
+ __ orl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
+ __ xorl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
+ }
+ } else {
+ if (instruction->IsAnd()) {
+ __ andl(first.AsRegisterPairLow<Register>(), Address(ESP, second.GetStackIndex()));
+ __ andl(first.AsRegisterPairHigh<Register>(),
+ Address(ESP, second.GetHighStackIndex(kX86WordSize)));
+ } else if (instruction->IsOr()) {
+ __ orl(first.AsRegisterPairLow<Register>(), Address(ESP, second.GetStackIndex()));
+ __ orl(first.AsRegisterPairHigh<Register>(),
+ Address(ESP, second.GetHighStackIndex(kX86WordSize)));
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.AsRegisterPairLow<Register>(), Address(ESP, second.GetStackIndex()));
+ __ xorl(first.AsRegisterPairHigh<Register>(),
+ Address(ESP, second.GetHighStackIndex(kX86WordSize)));
+ }
+ }
+ }
+}
+
} // namespace x86
} // namespace art
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 85fe21ca76..841b28b158 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -100,9 +100,10 @@ class LocationsBuilderX86 : public HGraphVisitor {
#undef DECLARE_VISIT_INSTRUCTION
+ private:
+ void HandleBitwiseOperation(HBinaryOperation* instruction);
void HandleInvoke(HInvoke* invoke);
- private:
CodeGeneratorX86* const codegen_;
InvokeDexCallingConventionVisitor parameter_visitor_;
@@ -128,6 +129,7 @@ class InstructionCodeGeneratorX86 : public HGraphVisitor {
// the suspend call.
void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
void GenerateClassInitializationCheck(SlowPathCodeX86* slow_path, Register class_reg);
+ void HandleBitwiseOperation(HBinaryOperation* instruction);
X86Assembler* const assembler_;
CodeGeneratorX86* const codegen_;
@@ -181,7 +183,7 @@ class CodeGeneratorX86 : public CodeGenerator {
// Blocks all register pairs made out of blocked core registers.
void UpdateBlockedPairRegisters() const;
- ParallelMoveResolverX86* GetMoveResolver() {
+ ParallelMoveResolverX86* GetMoveResolver() OVERRIDE {
return &move_resolver_;
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index e09b6cab08..5aa1c4a6c8 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -108,16 +108,23 @@ class DivZeroCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
class DivMinusOneSlowPathX86_64 : public SlowPathCodeX86_64 {
public:
- explicit DivMinusOneSlowPathX86_64(Register reg) : reg_(reg) {}
+ explicit DivMinusOneSlowPathX86_64(Register reg, Primitive::Type type)
+ : reg_(reg), type_(type) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
__ Bind(GetEntryLabel());
- __ negl(CpuRegister(reg_));
+ if (type_ == Primitive::kPrimInt) {
+ __ negl(CpuRegister(reg_));
+ } else {
+ DCHECK_EQ(Primitive::kPrimLong, type_);
+ __ negq(CpuRegister(reg_));
+ }
__ jmp(GetExitLabel());
}
private:
- Register reg_;
+ const Register reg_;
+ const Primitive::Type type_;
DISALLOW_COPY_AND_ASSIGN(DivMinusOneSlowPathX86_64);
};
@@ -179,13 +186,15 @@ class BoundsCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
length_location_(length_location) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
InvokeRuntimeCallingConvention calling_convention;
- x64_codegen->Move(
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
- x64_codegen->Move(
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)), length_location_);
+ codegen->EmitParallelMoves(
+ index_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ length_location_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
__ gs()->call(Address::Absolute(
QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowArrayBounds), true));
codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
@@ -284,13 +293,19 @@ class LoadStringSlowPathX86_64 : public SlowPathCodeX86_64 {
class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
public:
- TypeCheckSlowPathX86_64(HTypeCheck* instruction, Location object_class)
+ TypeCheckSlowPathX86_64(HInstruction* instruction,
+ Location class_to_check,
+ Location object_class,
+ uint32_t dex_pc)
: instruction_(instruction),
- object_class_(object_class) {}
+ class_to_check_(class_to_check),
+ object_class_(object_class),
+ dex_pc_(dex_pc) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ DCHECK(instruction_->IsCheckCast()
+ || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
@@ -299,29 +314,35 @@ class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
// We're moving two locations to locations that could overlap, so we need a parallel
// move resolver.
InvokeRuntimeCallingConvention calling_convention;
- MoveOperands move1(locations->InAt(1),
- Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
- nullptr);
- MoveOperands move2(object_class_,
- Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
- nullptr);
- HParallelMove parallel_move(codegen->GetGraph()->GetArena());
- parallel_move.AddMove(&move1);
- parallel_move.AddMove(&move2);
- x64_codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
+ codegen->EmitParallelMoves(
+ class_to_check_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ object_class_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+
+ if (instruction_->IsInstanceOf()) {
+ __ gs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInstanceofNonTrivial), true));
+ } else {
+ DCHECK(instruction_->IsCheckCast());
+ __ gs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pCheckCast), true));
+ }
+ codegen->RecordPcInfo(instruction_, dex_pc_);
- __ gs()->call(
- Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInstanceofNonTrivial), true));
- codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
- x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
+ if (instruction_->IsInstanceOf()) {
+ x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
+ }
codegen->RestoreLiveRegisters(locations);
__ jmp(GetExitLabel());
}
private:
- HTypeCheck* const instruction_;
+ HInstruction* const instruction_;
+ const Location class_to_check_;
const Location object_class_;
+ const uint32_t dex_pc_;
DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86_64);
};
@@ -563,26 +584,34 @@ void CodeGeneratorX86_64::Move(Location destination, Location source) {
void CodeGeneratorX86_64::Move(HInstruction* instruction,
Location location,
HInstruction* move_for) {
- if (instruction->IsIntConstant()) {
- Immediate imm(instruction->AsIntConstant()->GetValue());
- if (location.IsRegister()) {
- __ movl(location.As<CpuRegister>(), imm);
- } else if (location.IsStackSlot()) {
- __ movl(Address(CpuRegister(RSP), location.GetStackIndex()), imm);
- } else {
- DCHECK(location.IsConstant());
- DCHECK_EQ(location.GetConstant(), instruction);
- }
- } else if (instruction->IsLongConstant()) {
- int64_t value = instruction->AsLongConstant()->GetValue();
- if (location.IsRegister()) {
- __ movq(location.As<CpuRegister>(), Immediate(value));
- } else if (location.IsDoubleStackSlot()) {
- __ movq(CpuRegister(TMP), Immediate(value));
- __ movq(Address(CpuRegister(RSP), location.GetStackIndex()), CpuRegister(TMP));
- } else {
- DCHECK(location.IsConstant());
- DCHECK_EQ(location.GetConstant(), instruction);
+ LocationSummary* locations = instruction->GetLocations();
+ if (locations != nullptr && locations->Out().Equals(location)) {
+ return;
+ }
+
+ if (locations != nullptr && locations->Out().IsConstant()) {
+ HConstant* const_to_move = locations->Out().GetConstant();
+ if (const_to_move->IsIntConstant()) {
+ Immediate imm(const_to_move->AsIntConstant()->GetValue());
+ if (location.IsRegister()) {
+ __ movl(location.As<CpuRegister>(), imm);
+ } else if (location.IsStackSlot()) {
+ __ movl(Address(CpuRegister(RSP), location.GetStackIndex()), imm);
+ } else {
+ DCHECK(location.IsConstant());
+ DCHECK_EQ(location.GetConstant(), const_to_move);
+ }
+ } else if (const_to_move->IsLongConstant()) {
+ int64_t value = const_to_move->AsLongConstant()->GetValue();
+ if (location.IsRegister()) {
+ __ movq(location.As<CpuRegister>(), Immediate(value));
+ } else if (location.IsDoubleStackSlot()) {
+ __ movq(CpuRegister(TMP), Immediate(value));
+ __ movq(Address(CpuRegister(RSP), location.GetStackIndex()), CpuRegister(TMP));
+ } else {
+ DCHECK(location.IsConstant());
+ DCHECK_EQ(location.GetConstant(), const_to_move);
+ }
}
} else if (instruction->IsLoadLocal()) {
switch (instruction->GetType()) {
@@ -619,7 +648,7 @@ void CodeGeneratorX86_64::Move(HInstruction* instruction,
case Primitive::kPrimLong:
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- Move(location, instruction->GetLocations()->Out());
+ Move(location, locations->Out());
break;
default:
@@ -1259,13 +1288,49 @@ void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) {
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-byte' instruction.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Processing a Dex `long-to-int' instruction.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimLong:
switch (input_type) {
case Primitive::kPrimByte:
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
- // int-to-long conversion.
+ // Processing a Dex `int-to-long' instruction.
// TODO: We would benefit from a (to-be-implemented)
// Location::RegisterOrStackSlot requirement for this input.
locations->SetInAt(0, Location::RequiresRegister());
@@ -1284,7 +1349,23 @@ void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) {
}
break;
- case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-char' instruction.
+ locations->SetInAt(0, Location::Any());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
@@ -1304,6 +1385,59 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver
Primitive::Type result_type = conversion->GetResultType();
Primitive::Type input_type = conversion->GetInputType();
switch (result_type) {
+ case Primitive::kPrimByte:
+ switch (input_type) {
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-byte' instruction.
+ if (in.IsRegister()) {
+ __ movsxb(out.As<CpuRegister>(), in.As<CpuRegister>());
+ } else if (in.IsStackSlot()) {
+ __ movsxb(out.As<CpuRegister>(),
+ Address(CpuRegister(RSP), in.GetStackIndex()));
+ } else {
+ DCHECK(in.GetConstant()->IsIntConstant());
+ __ movl(out.As<CpuRegister>(),
+ Immediate(static_cast<int8_t>(in.GetConstant()->AsIntConstant()->GetValue())));
+ }
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
+ case Primitive::kPrimInt:
+ switch (input_type) {
+ case Primitive::kPrimLong:
+ // Processing a Dex `long-to-int' instruction.
+ if (in.IsRegister()) {
+ __ movl(out.As<CpuRegister>(), in.As<CpuRegister>());
+ } else if (in.IsDoubleStackSlot()) {
+ __ movl(out.As<CpuRegister>(),
+ Address(CpuRegister(RSP), in.GetStackIndex()));
+ } else {
+ DCHECK(in.IsConstant());
+ DCHECK(in.GetConstant()->IsLongConstant());
+ int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
+ __ movl(out.As<CpuRegister>(), Immediate(static_cast<int32_t>(value)));
+ }
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Type conversion from " << input_type
+ << " to " << result_type << " not yet implemented";
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimLong:
switch (input_type) {
DCHECK(out.IsRegister());
@@ -1311,7 +1445,7 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimChar:
- // int-to-long conversion.
+ // Processing a Dex `int-to-long' instruction.
DCHECK(in.IsRegister());
__ movsxd(out.As<CpuRegister>(), in.As<CpuRegister>());
break;
@@ -1328,7 +1462,31 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver
}
break;
- case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ switch (input_type) {
+ case Primitive::kPrimByte:
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ // Processing a Dex `int-to-char' instruction.
+ if (in.IsRegister()) {
+ __ movzxw(out.As<CpuRegister>(), in.As<CpuRegister>());
+ } else if (in.IsStackSlot()) {
+ __ movzxw(out.As<CpuRegister>(),
+ Address(CpuRegister(RSP), in.GetStackIndex()));
+ } else {
+ DCHECK(in.GetConstant()->IsIntConstant());
+ __ movl(out.As<CpuRegister>(),
+ Immediate(static_cast<uint16_t>(in.GetConstant()->AsIntConstant()->GetValue())));
+ }
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type conversion from " << input_type
+ << " to " << result_type;
+ }
+ break;
+
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
LOG(FATAL) << "Type conversion from " << input_type
@@ -1547,7 +1705,8 @@ void LocationsBuilderX86_64::VisitDiv(HDiv* div) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
switch (div->GetResultType()) {
- case Primitive::kPrimInt: {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RegisterLocation(RAX));
locations->SetInAt(1, Location::RequiresRegister());
locations->SetOut(Location::SameAsFirstInput());
@@ -1555,10 +1714,7 @@ void LocationsBuilderX86_64::VisitDiv(HDiv* div) {
locations->AddTemp(Location::RegisterLocation(RDX));
break;
}
- case Primitive::kPrimLong: {
- LOG(FATAL) << "Not implemented div type" << div->GetResultType();
- break;
- }
+
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
locations->SetInAt(0, Location::RequiresFpuRegister());
@@ -1578,38 +1734,42 @@ void InstructionCodeGeneratorX86_64::VisitDiv(HDiv* div) {
Location second = locations->InAt(1);
DCHECK(first.Equals(locations->Out()));
- switch (div->GetResultType()) {
- case Primitive::kPrimInt: {
+ Primitive::Type type = div->GetResultType();
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong: {
CpuRegister first_reg = first.As<CpuRegister>();
CpuRegister second_reg = second.As<CpuRegister>();
DCHECK_EQ(RAX, first_reg.AsRegister());
DCHECK_EQ(RDX, locations->GetTemp(0).As<CpuRegister>().AsRegister());
SlowPathCodeX86_64* slow_path =
- new (GetGraph()->GetArena()) DivMinusOneSlowPathX86_64(first_reg.AsRegister());
+ new (GetGraph()->GetArena()) DivMinusOneSlowPathX86_64(first_reg.AsRegister(), type);
codegen_->AddSlowPath(slow_path);
- // 0x80000000/-1 triggers an arithmetic exception!
- // Dividing by -1 is actually negation and -0x800000000 = 0x80000000 so
- // it's safe to just use negl instead of more complex comparisons.
+ // 0x80000000(00000000)/-1 triggers an arithmetic exception!
+ // Dividing by -1 is actually negation and -0x800000000(00000000) = 0x80000000(00000000)
+ // so it's safe to just use negl instead of more complex comparisons.
__ cmpl(second_reg, Immediate(-1));
__ j(kEqual, slow_path->GetEntryLabel());
- // edx:eax <- sign-extended of eax
- __ cdq();
- // eax = quotient, edx = remainder
- __ idivl(second_reg);
+ if (type == Primitive::kPrimInt) {
+ // edx:eax <- sign-extended of eax
+ __ cdq();
+ // eax = quotient, edx = remainder
+ __ idivl(second_reg);
+ } else {
+ // rdx:rax <- sign-extended of rax
+ __ cqo();
+ // rax = quotient, rdx = remainder
+ __ idivq(second_reg);
+ }
__ Bind(slow_path->GetExitLabel());
break;
}
- case Primitive::kPrimLong: {
- LOG(FATAL) << "Not implemented div type" << div->GetResultType();
- break;
- }
-
case Primitive::kPrimFloat: {
__ divss(first.As<XmmRegister>(), second.As<XmmRegister>());
break;
@@ -1642,18 +1802,40 @@ void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instructio
LocationSummary* locations = instruction->GetLocations();
Location value = locations->InAt(0);
- if (value.IsRegister()) {
- __ testl(value.As<CpuRegister>(), value.As<CpuRegister>());
- } else if (value.IsStackSlot()) {
- __ cmpl(Address(CpuRegister(RSP), value.GetStackIndex()), Immediate(0));
- } else {
- DCHECK(value.IsConstant()) << value;
- if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
- __ jmp(slow_path->GetEntryLabel());
+ switch (instruction->GetType()) {
+ case Primitive::kPrimInt: {
+ if (value.IsRegister()) {
+ __ testl(value.As<CpuRegister>(), value.As<CpuRegister>());
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else if (value.IsStackSlot()) {
+ __ cmpl(Address(CpuRegister(RSP), value.GetStackIndex()), Immediate(0));
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
+ __ jmp(slow_path->GetEntryLabel());
+ }
+ }
+ break;
}
- return;
+ case Primitive::kPrimLong: {
+ if (value.IsRegister()) {
+ __ testq(value.As<CpuRegister>(), value.As<CpuRegister>());
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else if (value.IsDoubleStackSlot()) {
+ __ cmpq(Address(CpuRegister(RSP), value.GetStackIndex()), Immediate(0));
+ __ j(kEqual, slow_path->GetEntryLabel());
+ } else {
+ DCHECK(value.IsConstant()) << value;
+ if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
+ __ jmp(slow_path->GetEntryLabel());
+ }
+ }
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
}
- __ j(kEqual, slow_path->GetEntryLabel());
}
void LocationsBuilderX86_64::VisitNewInstance(HNewInstance* instruction) {
@@ -2743,7 +2925,7 @@ void InstructionCodeGeneratorX86_64::VisitThrow(HThrow* instruction) {
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
}
-void LocationsBuilderX86_64::VisitTypeCheck(HTypeCheck* instruction) {
+void LocationsBuilderX86_64::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary::CallKind call_kind = instruction->IsClassFinal()
? LocationSummary::kNoCall
: LocationSummary::kCallOnSlowPath;
@@ -2753,7 +2935,7 @@ void LocationsBuilderX86_64::VisitTypeCheck(HTypeCheck* instruction) {
locations->SetOut(Location::RequiresRegister());
}
-void InstructionCodeGeneratorX86_64::VisitTypeCheck(HTypeCheck* instruction) {
+void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary* locations = instruction->GetLocations();
CpuRegister obj = locations->InAt(0).As<CpuRegister>();
Location cls = locations->InAt(1);
@@ -2783,7 +2965,7 @@ void InstructionCodeGeneratorX86_64::VisitTypeCheck(HTypeCheck* instruction) {
// If the classes are not equal, we go into a slow path.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(
- instruction, Location::RegisterLocation(out.AsRegister()));
+ instruction, locations->InAt(1), locations->Out(), instruction->GetDexPc());
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -2797,5 +2979,135 @@ void InstructionCodeGeneratorX86_64::VisitTypeCheck(HTypeCheck* instruction) {
__ Bind(&done);
}
+void LocationsBuilderX86_64::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
+ instruction, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ CpuRegister obj = locations->InAt(0).As<CpuRegister>();
+ Location cls = locations->InAt(1);
+ CpuRegister temp = locations->GetTemp(0).As<CpuRegister>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(
+ instruction, locations->InAt(1), locations->GetTemp(0), instruction->GetDexPc());
+ codegen_->AddSlowPath(slow_path);
+
+ // TODO: avoid this check if we know obj is not null.
+ __ testl(obj, obj);
+ __ j(kEqual, slow_path->GetExitLabel());
+ // Compare the class of `obj` with `cls`.
+ __ movl(temp, Address(obj, class_offset));
+ if (cls.IsRegister()) {
+ __ cmpl(temp, cls.As<CpuRegister>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(temp, Address(CpuRegister(RSP), cls.GetStackIndex()));
+ }
+ // Classes must be equal for the checkcast to succeed.
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void LocationsBuilderX86_64::VisitMonitorOperation(HMonitorOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+}
+
+void InstructionCodeGeneratorX86_64::VisitMonitorOperation(HMonitorOperation* instruction) {
+ __ gs()->call(Address::Absolute(instruction->IsEnter()
+ ? QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pLockObject)
+ : QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pUnlockObject),
+ true));
+ codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
+}
+
+void LocationsBuilderX86_64::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
+void LocationsBuilderX86_64::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
+void LocationsBuilderX86_64::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
+
+void LocationsBuilderX86_64::HandleBitwiseOperation(HBinaryOperation* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ DCHECK(instruction->GetResultType() == Primitive::kPrimInt
+ || instruction->GetResultType() == Primitive::kPrimLong);
+ locations->SetInAt(0, Location::RequiresRegister());
+ if (instruction->GetType() == Primitive::kPrimInt) {
+ locations->SetInAt(1, Location::Any());
+ } else {
+ // Request a register to avoid loading a 64bits constant.
+ locations->SetInAt(1, Location::RequiresRegister());
+ }
+ locations->SetOut(Location::SameAsFirstInput());
+}
+
+void InstructionCodeGeneratorX86_64::VisitAnd(HAnd* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorX86_64::VisitOr(HOr* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorX86_64::VisitXor(HXor* instruction) {
+ HandleBitwiseOperation(instruction);
+}
+
+void InstructionCodeGeneratorX86_64::HandleBitwiseOperation(HBinaryOperation* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ DCHECK(first.Equals(locations->Out()));
+
+ if (instruction->GetResultType() == Primitive::kPrimInt) {
+ if (second.IsRegister()) {
+ if (instruction->IsAnd()) {
+ __ andl(first.As<CpuRegister>(), second.As<CpuRegister>());
+ } else if (instruction->IsOr()) {
+ __ orl(first.As<CpuRegister>(), second.As<CpuRegister>());
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.As<CpuRegister>(), second.As<CpuRegister>());
+ }
+ } else if (second.IsConstant()) {
+ Immediate imm(second.GetConstant()->AsIntConstant()->GetValue());
+ if (instruction->IsAnd()) {
+ __ andl(first.As<CpuRegister>(), imm);
+ } else if (instruction->IsOr()) {
+ __ orl(first.As<CpuRegister>(), imm);
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.As<CpuRegister>(), imm);
+ }
+ } else {
+ Address address(CpuRegister(RSP), second.GetStackIndex());
+ if (instruction->IsAnd()) {
+ __ andl(first.As<CpuRegister>(), address);
+ } else if (instruction->IsOr()) {
+ __ orl(first.As<CpuRegister>(), address);
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorl(first.As<CpuRegister>(), address);
+ }
+ }
+ } else {
+ DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
+ if (instruction->IsAnd()) {
+ __ andq(first.As<CpuRegister>(), second.As<CpuRegister>());
+ } else if (instruction->IsOr()) {
+ __ orq(first.As<CpuRegister>(), second.As<CpuRegister>());
+ } else {
+ DCHECK(instruction->IsXor());
+ __ xorq(first.As<CpuRegister>(), second.As<CpuRegister>());
+ }
+ }
+}
+
} // namespace x86_64
} // namespace art
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 9565b6f876..4c6e4750d7 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -104,9 +104,10 @@ class LocationsBuilderX86_64 : public HGraphVisitor {
#undef DECLARE_VISIT_INSTRUCTION
+ private:
void HandleInvoke(HInvoke* invoke);
+ void HandleBitwiseOperation(HBinaryOperation* operation);
- private:
CodeGeneratorX86_64* const codegen_;
InvokeDexCallingConventionVisitor parameter_visitor_;
@@ -132,6 +133,7 @@ class InstructionCodeGeneratorX86_64 : public HGraphVisitor {
// the suspend call.
void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor);
void GenerateClassInitializationCheck(SlowPathCodeX86_64* slow_path, CpuRegister class_reg);
+ void HandleBitwiseOperation(HBinaryOperation* operation);
X86_64Assembler* const assembler_;
CodeGeneratorX86_64* const codegen_;
@@ -171,7 +173,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
return &assembler_;
}
- ParallelMoveResolverX86_64* GetMoveResolver() {
+ ParallelMoveResolverX86_64* GetMoveResolver() OVERRIDE {
return &move_resolver_;
}
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index ecee44392e..fee3ea6f8c 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -16,6 +16,7 @@
#include <functional>
+#include "arch/instruction_set.h"
#include "base/macros.h"
#include "builder.h"
#include "code_generator_arm.h"
@@ -25,7 +26,6 @@
#include "common_compiler_test.h"
#include "dex_file.h"
#include "dex_instruction.h"
-#include "instruction_set.h"
#include "nodes.h"
#include "optimizing_unit_test.h"
#include "prepare_for_register_allocation.h"
@@ -39,7 +39,7 @@ namespace art {
class InternalCodeAllocator : public CodeAllocator {
public:
- InternalCodeAllocator() { }
+ InternalCodeAllocator() : size_(0) { }
virtual uint8_t* Allocate(size_t size) {
size_ = size;
@@ -362,6 +362,27 @@ NOT_LONG_TEST(ReturnNotLongINT64_MAX,
#undef NOT_LONG_TEST
+#if defined(__aarch64__)
+TEST(CodegenTest, DISABLED_IntToLongOfLongToInt) {
+#else
+TEST(CodegenTest, IntToLongOfLongToInt) {
+#endif
+ const int64_t input = INT64_C(4294967296); // 2^32
+ const uint16_t word0 = Low16Bits(Low32Bits(input)); // LSW.
+ const uint16_t word1 = High16Bits(Low32Bits(input));
+ const uint16_t word2 = Low16Bits(High32Bits(input));
+ const uint16_t word3 = High16Bits(High32Bits(input)); // MSW.
+ const uint16_t data[] = FIVE_REGISTERS_CODE_ITEM(
+ Instruction::CONST_WIDE | 0 << 8, word0, word1, word2, word3,
+ Instruction::CONST_WIDE | 2 << 8, 1, 0, 0, 0,
+ Instruction::ADD_LONG | 0, 0 << 8 | 2, // v0 <- 2^32 + 1
+ Instruction::LONG_TO_INT | 4 << 8 | 0 << 12,
+ Instruction::INT_TO_LONG | 2 << 8 | 4 << 12,
+ Instruction::RETURN_WIDE | 2 << 8);
+
+ TestCodeLong(data, true, 1);
+}
+
TEST(CodegenTest, ReturnAdd1) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 2dab605465..5af3cdd2d6 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -90,7 +90,7 @@ class HGraph : public ArenaObject<kArenaAllocMisc> {
maximum_number_of_out_vregs_(0),
number_of_vregs_(0),
number_of_in_vregs_(0),
- number_of_temporaries_(0),
+ temporaries_vreg_slots_(0),
current_instruction_id_(0) {}
ArenaAllocator* GetArena() const { return arena_; }
@@ -129,12 +129,12 @@ class HGraph : public ArenaObject<kArenaAllocMisc> {
maximum_number_of_out_vregs_ = std::max(new_value, maximum_number_of_out_vregs_);
}
- void UpdateNumberOfTemporaries(size_t count) {
- number_of_temporaries_ = std::max(count, number_of_temporaries_);
+ void UpdateTemporariesVRegSlots(size_t slots) {
+ temporaries_vreg_slots_ = std::max(slots, temporaries_vreg_slots_);
}
- size_t GetNumberOfTemporaries() const {
- return number_of_temporaries_;
+ size_t GetTemporariesVRegSlots() const {
+ return temporaries_vreg_slots_;
}
void SetNumberOfVRegs(uint16_t number_of_vregs) {
@@ -192,8 +192,8 @@ class HGraph : public ArenaObject<kArenaAllocMisc> {
// The number of virtual registers used by parameters of this method.
uint16_t number_of_in_vregs_;
- // The number of temporaries that will be needed for the baseline compiler.
- size_t number_of_temporaries_;
+ // Number of vreg size slots that the temporaries use (used in baseline compiler).
+ size_t temporaries_vreg_slots_;
// The current id to assign to a newly added instruction. See HInstruction.id_.
int current_instruction_id_;
@@ -475,10 +475,12 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
#define FOR_EACH_CONCRETE_INSTRUCTION(M) \
M(Add, BinaryOperation) \
+ M(And, BinaryOperation) \
M(ArrayGet, Instruction) \
M(ArrayLength, Instruction) \
M(ArraySet, Instruction) \
M(BoundsCheck, Instruction) \
+ M(CheckCast, Instruction) \
M(ClinitCheck, Instruction) \
M(Compare, BinaryOperation) \
M(Condition, BinaryOperation) \
@@ -494,6 +496,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
M(If, Instruction) \
M(InstanceFieldGet, Instruction) \
M(InstanceFieldSet, Instruction) \
+ M(InstanceOf, Instruction) \
M(IntConstant, Constant) \
M(InvokeInterface, Invoke) \
M(InvokeStatic, Invoke) \
@@ -506,6 +509,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
M(LoadString, Instruction) \
M(Local, Instruction) \
M(LongConstant, Constant) \
+ M(MonitorOperation, Instruction) \
M(Mul, BinaryOperation) \
M(Neg, UnaryOperation) \
M(NewArray, Instruction) \
@@ -513,6 +517,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
M(Not, UnaryOperation) \
M(NotEqual, Condition) \
M(NullCheck, Instruction) \
+ M(Or, BinaryOperation) \
M(ParallelMove, Instruction) \
M(ParameterValue, Instruction) \
M(Phi, Instruction) \
@@ -525,8 +530,8 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
M(SuspendCheck, Instruction) \
M(Temporary, Instruction) \
M(Throw, Instruction) \
- M(TypeCheck, Instruction) \
M(TypeConversion, Instruction) \
+ M(Xor, BinaryOperation) \
#define FOR_EACH_INSTRUCTION(M) \
FOR_EACH_CONCRETE_INSTRUCTION(M) \
@@ -1745,8 +1750,8 @@ class HMul : public HBinaryOperation {
class HDiv : public HBinaryOperation {
public:
- HDiv(Primitive::Type result_type, HInstruction* left, HInstruction* right)
- : HBinaryOperation(result_type, left, right) {}
+ HDiv(Primitive::Type result_type, HInstruction* left, HInstruction* right, uint32_t dex_pc)
+ : HBinaryOperation(result_type, left, right), dex_pc_(dex_pc) {}
virtual int32_t Evaluate(int32_t x, int32_t y) const {
// Our graph structure ensures we never have 0 for `y` during constant folding.
@@ -1756,9 +1761,13 @@ class HDiv : public HBinaryOperation {
}
virtual int64_t Evaluate(int64_t x, int64_t y) const { return x / y; }
+ uint32_t GetDexPc() const { return dex_pc_; }
+
DECLARE_INSTRUCTION(Div);
private:
+ const uint32_t dex_pc_;
+
DISALLOW_COPY_AND_ASSIGN(HDiv);
};
@@ -1789,6 +1798,54 @@ class HDivZeroCheck : public HExpression<1> {
DISALLOW_COPY_AND_ASSIGN(HDivZeroCheck);
};
+class HAnd : public HBinaryOperation {
+ public:
+ HAnd(Primitive::Type result_type, HInstruction* left, HInstruction* right)
+ : HBinaryOperation(result_type, left, right) {}
+
+ bool IsCommutative() OVERRIDE { return true; }
+
+ int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { return x & y; }
+ int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { return x & y; }
+
+ DECLARE_INSTRUCTION(And);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HAnd);
+};
+
+class HOr : public HBinaryOperation {
+ public:
+ HOr(Primitive::Type result_type, HInstruction* left, HInstruction* right)
+ : HBinaryOperation(result_type, left, right) {}
+
+ bool IsCommutative() OVERRIDE { return true; }
+
+ int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { return x | y; }
+ int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { return x | y; }
+
+ DECLARE_INSTRUCTION(Or);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HOr);
+};
+
+class HXor : public HBinaryOperation {
+ public:
+ HXor(Primitive::Type result_type, HInstruction* left, HInstruction* right)
+ : HBinaryOperation(result_type, left, right) {}
+
+ bool IsCommutative() OVERRIDE { return true; }
+
+ int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { return x ^ y; }
+ int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { return x ^ y; }
+
+ DECLARE_INSTRUCTION(Xor);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HXor);
+};
+
// The value of a parameter in this method. Its location depends on
// the calling convention.
class HParameterValue : public HExpression<0> {
@@ -2105,8 +2162,8 @@ class HBoundsCheck : public HExpression<2> {
* Some DEX instructions are folded into multiple HInstructions that need
* to stay live until the last HInstruction. This class
* is used as a marker for the baseline compiler to ensure its preceding
- * HInstruction stays live. `index` is the temporary number that is used
- * for knowing the stack offset where to store the instruction.
+ * HInstruction stays live. `index` represents the stack location index of the
+ * instruction (the actual offset is computed as index * vreg_size).
*/
class HTemporary : public HTemplateInstruction<0> {
public:
@@ -2114,7 +2171,11 @@ class HTemporary : public HTemplateInstruction<0> {
size_t GetIndex() const { return index_; }
- Primitive::Type GetType() const OVERRIDE { return GetPrevious()->GetType(); }
+ Primitive::Type GetType() const OVERRIDE {
+ // The previous instruction is the one that will be stored in the temporary location.
+ DCHECK(GetPrevious() != nullptr);
+ return GetPrevious()->GetType();
+ }
DECLARE_INSTRUCTION(Temporary);
@@ -2351,12 +2412,12 @@ class HThrow : public HTemplateInstruction<1> {
DISALLOW_COPY_AND_ASSIGN(HThrow);
};
-class HTypeCheck : public HExpression<2> {
+class HInstanceOf : public HExpression<2> {
public:
- explicit HTypeCheck(HInstruction* object,
- HLoadClass* constant,
- bool class_is_final,
- uint32_t dex_pc)
+ HInstanceOf(HInstruction* object,
+ HLoadClass* constant,
+ bool class_is_final,
+ uint32_t dex_pc)
: HExpression(Primitive::kPrimBoolean, SideEffects::None()),
class_is_final_(class_is_final),
dex_pc_(dex_pc) {
@@ -2366,13 +2427,11 @@ class HTypeCheck : public HExpression<2> {
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
bool NeedsEnvironment() const OVERRIDE {
- // TODO: Can we debug when doing a runtime instanceof check?
return false;
}
@@ -2380,13 +2439,82 @@ class HTypeCheck : public HExpression<2> {
bool IsClassFinal() const { return class_is_final_; }
- DECLARE_INSTRUCTION(TypeCheck);
+ DECLARE_INSTRUCTION(InstanceOf);
+
+ private:
+ const bool class_is_final_;
+ const uint32_t dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(HInstanceOf);
+};
+
+class HCheckCast : public HTemplateInstruction<2> {
+ public:
+ HCheckCast(HInstruction* object,
+ HLoadClass* constant,
+ bool class_is_final,
+ uint32_t dex_pc)
+ : HTemplateInstruction(SideEffects::None()),
+ class_is_final_(class_is_final),
+ dex_pc_(dex_pc) {
+ SetRawInputAt(0, object);
+ SetRawInputAt(1, constant);
+ }
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
+ return true;
+ }
+
+ bool NeedsEnvironment() const OVERRIDE {
+ // Instruction may throw a CheckCastError.
+ return true;
+ }
+
+ bool CanThrow() const OVERRIDE { return true; }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+
+ bool IsClassFinal() const { return class_is_final_; }
+
+ DECLARE_INSTRUCTION(CheckCast);
private:
const bool class_is_final_;
const uint32_t dex_pc_;
- DISALLOW_COPY_AND_ASSIGN(HTypeCheck);
+ DISALLOW_COPY_AND_ASSIGN(HCheckCast);
+};
+
+class HMonitorOperation : public HTemplateInstruction<1> {
+ public:
+ enum OperationKind {
+ kEnter,
+ kExit,
+ };
+
+ HMonitorOperation(HInstruction* object, OperationKind kind, uint32_t dex_pc)
+ : HTemplateInstruction(SideEffects::None()), kind_(kind), dex_pc_(dex_pc) {
+ SetRawInputAt(0, object);
+ }
+
+ // Instruction may throw a Java exception, so we need an environment.
+ bool NeedsEnvironment() const OVERRIDE { return true; }
+ bool CanThrow() const OVERRIDE { return true; }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+
+ bool IsEnter() const { return kind_ == kEnter; }
+
+ DECLARE_INSTRUCTION(MonitorOperation);
+
+ protected:
+ const OperationKind kind_;
+ const uint32_t dex_pc_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HMonitorOperation);
};
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 3d81362851..ba4be34ca3 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -713,7 +713,7 @@ static HGraph* BuildDiv(ArenaAllocator* allocator,
graph->AddBlock(block);
entry->AddSuccessor(block);
- *div = new (allocator) HDiv(Primitive::kPrimInt, first, second);
+ *div = new (allocator) HDiv(Primitive::kPrimInt, first, second, 0); // don't care about dex_pc.
block->AddInstruction(*div);
block->AddInstruction(new (allocator) HExit());
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index e83c528fab..fec40f93c7 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -253,4 +253,9 @@ void SsaBuilder::VisitInstruction(HInstruction* instruction) {
instruction->SetEnvironment(environment);
}
+void SsaBuilder::VisitTemporary(HTemporary* temp) {
+ // Temporaries are only used by the baseline register allocator.
+ temp->GetBlock()->RemoveInstruction(temp);
+}
+
} // namespace art
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index 24f5ac55f7..2207cd6bfa 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -51,6 +51,7 @@ class SsaBuilder : public HGraphVisitor {
void VisitLoadLocal(HLoadLocal* load);
void VisitStoreLocal(HStoreLocal* store);
void VisitInstruction(HInstruction* instruction);
+ void VisitTemporary(HTemporary* instruction);
static HInstruction* GetFloatOrDoubleEquivalent(HInstruction* user,
HInstruction* instruction,