summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/block_builder.cc30
-rw-r--r--compiler/optimizing/bounds_check_elimination.cc8
-rw-r--r--compiler/optimizing/bounds_check_elimination_test.cc15
-rw-r--r--compiler/optimizing/builder.cc12
-rw-r--r--compiler/optimizing/builder.h3
-rw-r--r--compiler/optimizing/code_generator.cc68
-rw-r--r--compiler/optimizing/code_generator.h11
-rw-r--r--compiler/optimizing/code_generator_arm64.cc164
-rw-r--r--compiler/optimizing/code_generator_arm64.h6
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc247
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h2
-rw-r--r--compiler/optimizing/code_generator_mips.cc130
-rw-r--r--compiler/optimizing/code_generator_mips.h2
-rw-r--r--compiler/optimizing/code_generator_mips64.cc104
-rw-r--r--compiler/optimizing/code_generator_mips64.h2
-rw-r--r--compiler/optimizing/code_generator_vector_arm64.cc72
-rw-r--r--compiler/optimizing/code_generator_vector_arm_vixl.cc10
-rw-r--r--compiler/optimizing/code_generator_vector_mips.cc22
-rw-r--r--compiler/optimizing/code_generator_vector_mips64.cc20
-rw-r--r--compiler/optimizing/code_generator_vector_x86.cc41
-rw-r--r--compiler/optimizing/code_generator_vector_x86_64.cc41
-rw-r--r--compiler/optimizing/code_generator_x86.cc140
-rw-r--r--compiler/optimizing/code_generator_x86.h5
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc206
-rw-r--r--compiler/optimizing/code_generator_x86_64.h5
-rw-r--r--compiler/optimizing/code_sinking.cc12
-rw-r--r--compiler/optimizing/codegen_test.cc27
-rw-r--r--compiler/optimizing/constant_folding_test.cc2
-rw-r--r--compiler/optimizing/constructor_fence_redundancy_elimination.cc2
-rw-r--r--compiler/optimizing/data_type.h15
-rw-r--r--compiler/optimizing/dead_code_elimination_test.cc2
-rw-r--r--compiler/optimizing/graph_checker.cc6
-rw-r--r--compiler/optimizing/graph_visualizer.cc21
-rw-r--r--compiler/optimizing/gvn.cc6
-rw-r--r--compiler/optimizing/induction_var_analysis.cc4
-rw-r--r--compiler/optimizing/induction_var_range.cc34
-rw-r--r--compiler/optimizing/induction_var_range_test.cc14
-rw-r--r--compiler/optimizing/inliner.cc148
-rw-r--r--compiler/optimizing/inliner.h5
-rw-r--r--compiler/optimizing/instruction_builder.cc310
-rw-r--r--compiler/optimizing/instruction_builder.h30
-rw-r--r--compiler/optimizing/instruction_simplifier.cc35
-rw-r--r--compiler/optimizing/instruction_simplifier_arm.cc9
-rw-r--r--compiler/optimizing/instruction_simplifier_arm64.cc4
-rw-r--r--compiler/optimizing/instruction_simplifier_shared.cc4
-rw-r--r--compiler/optimizing/instruction_simplifier_x86.cc88
-rw-r--r--compiler/optimizing/instruction_simplifier_x86.h44
-rw-r--r--compiler/optimizing/instruction_simplifier_x86_64.cc82
-rw-r--r--compiler/optimizing/instruction_simplifier_x86_64.h48
-rw-r--r--compiler/optimizing/instruction_simplifier_x86_shared.cc137
-rw-r--r--compiler/optimizing/instruction_simplifier_x86_shared.h29
-rw-r--r--compiler/optimizing/intrinsic_objects.cc3
-rw-r--r--compiler/optimizing/intrinsics.cc179
-rw-r--r--compiler/optimizing/intrinsics.h33
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc353
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc196
-rw-r--r--compiler/optimizing/intrinsics_mips.cc94
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc93
-rw-r--r--compiler/optimizing/intrinsics_x86.cc242
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc208
-rw-r--r--compiler/optimizing/load_store_elimination.cc2
-rw-r--r--compiler/optimizing/loop_optimization.cc134
-rw-r--r--compiler/optimizing/loop_optimization.h6
-rw-r--r--compiler/optimizing/loop_optimization_test.cc2
-rw-r--r--compiler/optimizing/nodes.cc98
-rw-r--r--compiler/optimizing/nodes.h103
-rw-r--r--compiler/optimizing/nodes_vector.h82
-rw-r--r--compiler/optimizing/nodes_vector_test.cc6
-rw-r--r--compiler/optimizing/nodes_x86.h86
-rw-r--r--compiler/optimizing/optimization.cc38
-rw-r--r--compiler/optimizing/optimization.h7
-rw-r--r--compiler/optimizing/optimizing_cfi_test.cc2
-rw-r--r--compiler/optimizing/optimizing_compiler.cc295
-rw-r--r--compiler/optimizing/optimizing_compiler.h7
-rw-r--r--compiler/optimizing/optimizing_compiler_stats.h4
-rw-r--r--compiler/optimizing/optimizing_unit_test.h12
-rw-r--r--compiler/optimizing/parallel_move_test.cc4
-rw-r--r--compiler/optimizing/pc_relative_fixups_x86.cc3
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.cc13
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.h1
-rw-r--r--compiler/optimizing/reference_type_propagation.cc52
-rw-r--r--compiler/optimizing/register_allocation_resolver.cc8
-rw-r--r--compiler/optimizing/register_allocator_linear_scan.cc4
-rw-r--r--compiler/optimizing/register_allocator_test.cc20
-rw-r--r--compiler/optimizing/scheduler.cc2
-rw-r--r--compiler/optimizing/scheduler_arm.cc6
-rw-r--r--compiler/optimizing/scheduler_test.cc2
-rw-r--r--compiler/optimizing/sharpening.cc37
-rw-r--r--compiler/optimizing/sharpening.h26
-rw-r--r--compiler/optimizing/side_effects_test.cc26
-rw-r--r--compiler/optimizing/ssa_builder.cc116
-rw-r--r--compiler/optimizing/ssa_builder.h9
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.cc2
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h10
-rw-r--r--compiler/optimizing/ssa_liveness_analysis_test.cc32
-rw-r--r--compiler/optimizing/ssa_phi_elimination.cc2
-rw-r--r--compiler/optimizing/superblock_cloner.h4
97 files changed, 3216 insertions, 1932 deletions
diff --git a/compiler/optimizing/block_builder.cc b/compiler/optimizing/block_builder.cc
index d9df23fd47..a5f78cafe0 100644
--- a/compiler/optimizing/block_builder.cc
+++ b/compiler/optimizing/block_builder.cc
@@ -68,7 +68,7 @@ bool HBasicBlockBuilder::CreateBranchTargets() {
// places where the program might fall through into/out of the a block and
// where TryBoundary instructions will be inserted later. Other edges which
// enter/exit the try blocks are a result of branches/switches.
- for (const DexFile::TryItem& try_item : code_item_accessor_.TryItems()) {
+ for (const dex::TryItem& try_item : code_item_accessor_.TryItems()) {
uint32_t dex_pc_start = try_item.start_addr_;
uint32_t dex_pc_end = dex_pc_start + try_item.insn_count_;
MaybeCreateBlockAt(dex_pc_start);
@@ -222,9 +222,9 @@ void HBasicBlockBuilder::ConnectBasicBlocks() {
}
// Returns the TryItem stored for `block` or nullptr if there is no info for it.
-static const DexFile::TryItem* GetTryItem(
+static const dex::TryItem* GetTryItem(
HBasicBlock* block,
- const ScopedArenaSafeMap<uint32_t, const DexFile::TryItem*>& try_block_info) {
+ const ScopedArenaSafeMap<uint32_t, const dex::TryItem*>& try_block_info) {
auto iterator = try_block_info.find(block->GetBlockId());
return (iterator == try_block_info.end()) ? nullptr : iterator->second;
}
@@ -235,7 +235,7 @@ static const DexFile::TryItem* GetTryItem(
// for a handler.
static void LinkToCatchBlocks(HTryBoundary* try_boundary,
const CodeItemDataAccessor& accessor,
- const DexFile::TryItem* try_item,
+ const dex::TryItem* try_item,
const ScopedArenaSafeMap<uint32_t, HBasicBlock*>& catch_blocks) {
for (CatchHandlerIterator it(accessor.GetCatchHandlerData(try_item->handler_off_));
it.HasNext();
@@ -279,7 +279,7 @@ void HBasicBlockBuilder::InsertTryBoundaryBlocks() {
// Keep a map of all try blocks and their respective TryItems. We do not use
// the block's pointer but rather its id to ensure deterministic iteration.
- ScopedArenaSafeMap<uint32_t, const DexFile::TryItem*> try_block_info(
+ ScopedArenaSafeMap<uint32_t, const dex::TryItem*> try_block_info(
std::less<uint32_t>(), local_allocator_->Adapter(kArenaAllocGraphBuilder));
// Obtain TryItem information for blocks with throwing instructions, and split
@@ -295,7 +295,7 @@ void HBasicBlockBuilder::InsertTryBoundaryBlocks() {
// loop for synchronized blocks.
if (ContainsElement(throwing_blocks_, block)) {
// Try to find a TryItem covering the block.
- const DexFile::TryItem* try_item = code_item_accessor_.FindTryItem(block->GetDexPc());
+ const dex::TryItem* try_item = code_item_accessor_.FindTryItem(block->GetDexPc());
if (try_item != nullptr) {
// Block throwing and in a TryItem. Store the try block information.
try_block_info.Put(block->GetBlockId(), try_item);
@@ -315,8 +315,16 @@ void HBasicBlockBuilder::InsertTryBoundaryBlocks() {
CatchHandlerIterator iterator(handlers_ptr);
for (; iterator.HasNext(); iterator.Next()) {
uint32_t address = iterator.GetHandlerAddress();
- if (catch_blocks.find(address) != catch_blocks.end()) {
+ auto existing = catch_blocks.find(address);
+ if (existing != catch_blocks.end()) {
// Catch block already processed.
+ TryCatchInformation* info = existing->second->GetTryCatchInformation();
+ if (iterator.GetHandlerTypeIndex() != info->GetCatchTypeIndex()) {
+ // The handler is for multiple types. We could record all the types, but
+ // doing class resolution here isn't ideal, and it's unclear whether wasting
+ // the space in TryCatchInformation is worth it.
+ info->SetInvalidTypeIndex();
+ }
continue;
}
@@ -337,7 +345,7 @@ void HBasicBlockBuilder::InsertTryBoundaryBlocks() {
catch_blocks.Put(address, catch_block);
catch_block->SetTryCatchInformation(
- new (allocator_) TryCatchInformation(iterator.GetHandlerTypeIndex(), *dex_file_));
+ new (allocator_) TryCatchInformation(iterator.GetHandlerTypeIndex(), *dex_file_));
}
handlers_ptr = iterator.EndDataPointer();
}
@@ -348,7 +356,7 @@ void HBasicBlockBuilder::InsertTryBoundaryBlocks() {
// that all predecessors are relinked to. This preserves loop headers (b/23895756).
for (const auto& entry : try_block_info) {
uint32_t block_id = entry.first;
- const DexFile::TryItem* try_item = entry.second;
+ const dex::TryItem* try_item = entry.second;
HBasicBlock* try_block = graph_->GetBlocks()[block_id];
for (HBasicBlock* predecessor : try_block->GetPredecessors()) {
if (GetTryItem(predecessor, try_block_info) != try_item) {
@@ -367,7 +375,7 @@ void HBasicBlockBuilder::InsertTryBoundaryBlocks() {
// the successor is not in the same TryItem.
for (const auto& entry : try_block_info) {
uint32_t block_id = entry.first;
- const DexFile::TryItem* try_item = entry.second;
+ const dex::TryItem* try_item = entry.second;
HBasicBlock* try_block = graph_->GetBlocks()[block_id];
// NOTE: Do not use iterators because SplitEdge would invalidate them.
for (size_t i = 0, e = try_block->GetSuccessors().size(); i < e; ++i) {
@@ -415,7 +423,7 @@ void HBasicBlockBuilder::BuildIntrinsic() {
// Create blocks.
HBasicBlock* entry_block = new (allocator_) HBasicBlock(graph_, kNoDexPc);
HBasicBlock* exit_block = new (allocator_) HBasicBlock(graph_, kNoDexPc);
- HBasicBlock* body = MaybeCreateBlockAt(/* semantic_dex_pc */ kNoDexPc, /* store_dex_pc */ 0u);
+ HBasicBlock* body = MaybeCreateBlockAt(/* semantic_dex_pc= */ kNoDexPc, /* store_dex_pc= */ 0u);
// Add blocks to the graph.
graph_->AddBlock(entry_block);
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 1c3660c0a7..e35d50220e 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -845,8 +845,10 @@ class BCEVisitor : public HGraphVisitor {
// make one more attempt to get a constant in the array range.
ValueRange* existing_range = LookupValueRange(array_length, block);
if (existing_range != nullptr &&
- existing_range->IsConstantValueRange()) {
- ValueRange constant_array_range(&allocator_, lower, existing_range->GetLower());
+ existing_range->IsConstantValueRange() &&
+ existing_range->GetLower().GetConstant() > 0) {
+ ValueBound constant_upper(nullptr, existing_range->GetLower().GetConstant() - 1);
+ ValueRange constant_array_range(&allocator_, lower, constant_upper);
if (index_range->FitsIn(&constant_array_range)) {
ReplaceInstruction(bounds_check, index);
return;
@@ -1634,7 +1636,7 @@ class BCEVisitor : public HGraphVisitor {
HBasicBlock* block = GetPreHeader(loop, check);
HInstruction* cond =
new (GetGraph()->GetAllocator()) HEqual(array, GetGraph()->GetNullConstant());
- InsertDeoptInLoop(loop, block, cond, /* is_null_check */ true);
+ InsertDeoptInLoop(loop, block, cond, /* is_null_check= */ true);
ReplaceInstruction(check, array);
return true;
}
diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc
index 7c29df877a..5927d681b2 100644
--- a/compiler/optimizing/bounds_check_elimination_test.cc
+++ b/compiler/optimizing/bounds_check_elimination_test.cc
@@ -43,7 +43,7 @@ class BoundsCheckEliminationTest : public OptimizingUnitTest {
void RunBCE() {
graph_->BuildDominatorTree();
- InstructionSimplifier(graph_, /* codegen */ nullptr).Run();
+ InstructionSimplifier(graph_, /* codegen= */ nullptr).Run();
SideEffectsAnalysis side_effects(graph_);
side_effects.Run();
@@ -598,9 +598,10 @@ static HInstruction* BuildSSAGraph3(HGraph* graph,
entry->AddSuccessor(block);
// We pass a bogus constant for the class to avoid mocking one.
HInstruction* new_array = new (allocator) HNewArray(
- constant_10,
- constant_10,
- 0);
+ /* cls= */ constant_10,
+ /* length= */ constant_10,
+ /* dex_pc= */ 0,
+ /* component_size_shift= */ 0);
block->AddInstruction(new_array);
block->AddInstruction(new (allocator) HGoto());
@@ -977,7 +978,11 @@ TEST_F(BoundsCheckEliminationTest, ModArrayBoundsElimination) {
graph_->AddBlock(block);
entry->AddSuccessor(block);
// We pass a bogus constant for the class to avoid mocking one.
- HInstruction* new_array = new (GetAllocator()) HNewArray(constant_10, constant_10, 0);
+ HInstruction* new_array = new (GetAllocator()) HNewArray(
+ /* cls= */ constant_10,
+ /* length= */ constant_10,
+ /* dex_pc= */ 0,
+ /* component_size_shift= */ 0);
block->AddInstruction(new_array);
block->AddInstruction(new (GetAllocator()) HGoto());
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index a1a5692ef6..64aa1b9358 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -21,6 +21,7 @@
#include "base/bit_vector-inl.h"
#include "base/logging.h"
#include "block_builder.h"
+#include "code_generator.h"
#include "data_type-inl.h"
#include "dex/verified_method.h"
#include "driver/compiler_options.h"
@@ -40,7 +41,6 @@ HGraphBuilder::HGraphBuilder(HGraph* graph,
const CodeItemDebugInfoAccessor& accessor,
const DexCompilationUnit* dex_compilation_unit,
const DexCompilationUnit* outer_compilation_unit,
- CompilerDriver* driver,
CodeGenerator* code_generator,
OptimizingCompilerStats* compiler_stats,
ArrayRef<const uint8_t> interpreter_metadata,
@@ -50,7 +50,6 @@ HGraphBuilder::HGraphBuilder(HGraph* graph,
code_item_accessor_(accessor),
dex_compilation_unit_(dex_compilation_unit),
outer_compilation_unit_(outer_compilation_unit),
- compiler_driver_(driver),
code_generator_(code_generator),
compilation_stats_(compiler_stats),
interpreter_metadata_(interpreter_metadata),
@@ -67,19 +66,18 @@ HGraphBuilder::HGraphBuilder(HGraph* graph,
code_item_accessor_(accessor),
dex_compilation_unit_(dex_compilation_unit),
outer_compilation_unit_(nullptr),
- compiler_driver_(nullptr),
code_generator_(nullptr),
compilation_stats_(nullptr),
handles_(handles),
return_type_(return_type) {}
bool HGraphBuilder::SkipCompilation(size_t number_of_branches) {
- if (compiler_driver_ == nullptr) {
- // Note that the compiler driver is null when unit testing.
+ if (code_generator_ == nullptr) {
+ // Note that the codegen is null when unit testing.
return false;
}
- const CompilerOptions& compiler_options = compiler_driver_->GetCompilerOptions();
+ const CompilerOptions& compiler_options = code_generator_->GetCompilerOptions();
CompilerFilter::Filter compiler_filter = compiler_options.GetCompilerFilter();
if (compiler_filter == CompilerFilter::kEverything) {
return false;
@@ -131,7 +129,6 @@ GraphAnalysisResult HGraphBuilder::BuildGraph() {
return_type_,
dex_compilation_unit_,
outer_compilation_unit_,
- compiler_driver_,
code_generator_,
interpreter_metadata_,
compilation_stats_,
@@ -203,7 +200,6 @@ void HGraphBuilder::BuildIntrinsicGraph(ArtMethod* method) {
return_type_,
dex_compilation_unit_,
outer_compilation_unit_,
- compiler_driver_,
code_generator_,
interpreter_metadata_,
compilation_stats_,
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 5a1914ce08..6152740324 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -22,7 +22,6 @@
#include "dex/code_item_accessors.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file.h"
-#include "driver/compiler_driver.h"
#include "nodes.h"
namespace art {
@@ -38,7 +37,6 @@ class HGraphBuilder : public ValueObject {
const CodeItemDebugInfoAccessor& accessor,
const DexCompilationUnit* dex_compilation_unit,
const DexCompilationUnit* outer_compilation_unit,
- CompilerDriver* driver,
CodeGenerator* code_generator,
OptimizingCompilerStats* compiler_stats,
ArrayRef<const uint8_t> interpreter_metadata,
@@ -70,7 +68,6 @@ class HGraphBuilder : public ValueObject {
// The compilation unit of the enclosing method being compiled.
const DexCompilationUnit* const outer_compilation_unit_;
- CompilerDriver* const compiler_driver_;
CodeGenerator* const code_generator_;
OptimizingCompilerStats* const compilation_stats_;
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index e84896b113..9e2f5cd508 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -197,7 +197,7 @@ class CodeGenerator::CodeGenerationData : public DeletableArenaObject<kArenaAllo
return GetNumberOfJitStringRoots() + GetNumberOfJitClassRoots();
}
- void EmitJitRoots(Handle<mirror::ObjectArray<mirror::Object>> roots)
+ void EmitJitRoots(/*out*/std::vector<Handle<mirror::Object>>* roots)
REQUIRES_SHARED(Locks::mutator_lock_);
private:
@@ -230,29 +230,31 @@ class CodeGenerator::CodeGenerationData : public DeletableArenaObject<kArenaAllo
};
void CodeGenerator::CodeGenerationData::EmitJitRoots(
- Handle<mirror::ObjectArray<mirror::Object>> roots) {
- DCHECK_EQ(static_cast<size_t>(roots->GetLength()), GetNumberOfJitRoots());
+ /*out*/std::vector<Handle<mirror::Object>>* roots) {
+ DCHECK(roots->empty());
+ roots->reserve(GetNumberOfJitRoots());
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
size_t index = 0;
for (auto& entry : jit_string_roots_) {
// Update the `roots` with the string, and replace the address temporarily
// stored to the index in the table.
uint64_t address = entry.second;
- roots->Set(index, reinterpret_cast<StackReference<mirror::String>*>(address)->AsMirrorPtr());
- DCHECK(roots->Get(index) != nullptr);
+ roots->emplace_back(reinterpret_cast<StackReference<mirror::Object>*>(address));
+ DCHECK(roots->back() != nullptr);
+ DCHECK(roots->back()->IsString());
entry.second = index;
// Ensure the string is strongly interned. This is a requirement on how the JIT
// handles strings. b/32995596
- class_linker->GetInternTable()->InternStrong(
- reinterpret_cast<mirror::String*>(roots->Get(index)));
+ class_linker->GetInternTable()->InternStrong(roots->back()->AsString());
++index;
}
for (auto& entry : jit_class_roots_) {
// Update the `roots` with the class, and replace the address temporarily
// stored to the index in the table.
uint64_t address = entry.second;
- roots->Set(index, reinterpret_cast<StackReference<mirror::Class>*>(address)->AsMirrorPtr());
- DCHECK(roots->Get(index) != nullptr);
+ roots->emplace_back(reinterpret_cast<StackReference<mirror::Object>*>(address));
+ DCHECK(roots->back() != nullptr);
+ DCHECK(roots->back()->IsClass());
entry.second = index;
++index;
}
@@ -412,7 +414,7 @@ void CodeGenerator::Compile(CodeAllocator* allocator) {
// This ensures that we have correct native line mapping for all native instructions.
// It is necessary to make stepping over a statement work. Otherwise, any initial
// instructions (e.g. moves) would be assumed to be the start of next statement.
- MaybeRecordNativeDebugInfo(nullptr /* instruction */, block->GetDexPc());
+ MaybeRecordNativeDebugInfo(/* instruction= */ nullptr, block->GetDexPc());
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
if (current->HasEnvironment()) {
@@ -985,7 +987,7 @@ static void CheckCovers(uint32_t dex_pc,
// dex branch instructions.
static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
const CodeInfo& code_info,
- const DexFile::CodeItem& code_item) {
+ const dex::CodeItem& code_item) {
if (graph.HasTryCatch()) {
// One can write loops through try/catch, which we do not support for OSR anyway.
return;
@@ -1027,7 +1029,7 @@ static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
}
}
-ScopedArenaVector<uint8_t> CodeGenerator::BuildStackMaps(const DexFile::CodeItem* code_item) {
+ScopedArenaVector<uint8_t> CodeGenerator::BuildStackMaps(const dex::CodeItem* code_item) {
ScopedArenaVector<uint8_t> stack_map = GetStackMapStream()->Encode();
if (kIsDebugBuild && code_item != nullptr) {
CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map.data()), *code_item);
@@ -1083,7 +1085,7 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
// call). Therefore register_mask contains both callee-save and caller-save
// registers that hold objects. We must remove the spilled caller-save from the
// mask, since they will be overwritten by the callee.
- uint32_t spills = GetSlowPathSpills(locations, /* core_registers */ true);
+ uint32_t spills = GetSlowPathSpills(locations, /* core_registers= */ true);
register_mask &= ~spills;
} else {
// The register mask must be a subset of callee-save registers.
@@ -1124,6 +1126,7 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
if (osr) {
DCHECK_EQ(info->GetSuspendCheck(), instruction);
DCHECK(info->IsIrreducible());
+ DCHECK(environment != nullptr);
if (kIsDebugBuild) {
for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
HInstruction* in_environment = environment->GetInstructionAt(i);
@@ -1161,7 +1164,7 @@ void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction,
// Ensure that we do not collide with the stack map of the previous instruction.
GenerateNop();
}
- RecordPcInfo(instruction, dex_pc, slow_path, /* native_debug_info */ true);
+ RecordPcInfo(instruction, dex_pc, slow_path, /* native_debug_info= */ true);
}
}
@@ -1179,8 +1182,8 @@ void CodeGenerator::RecordCatchBlockInfo() {
stack_map_stream->BeginStackMapEntry(dex_pc,
native_pc,
- /* register_mask */ 0,
- /* stack_mask */ nullptr,
+ /* register_mask= */ 0,
+ /* sp_mask= */ nullptr,
StackMap::Kind::Catch);
HInstruction* current_phi = block->GetFirstPhi();
@@ -1552,7 +1555,7 @@ void CodeGenerator::ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction* in
void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
- const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+ const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
for (uint32_t i : LowToHighBits(core_spills)) {
// If the register holds an object, update the stack mask.
if (locations->RegisterContainsObject(i)) {
@@ -1564,7 +1567,7 @@ void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* lo
stack_offset += codegen->SaveCoreRegister(stack_offset, i);
}
- const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+ const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
for (uint32_t i : LowToHighBits(fp_spills)) {
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
@@ -1576,14 +1579,14 @@ void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* lo
void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
- const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+ const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
for (uint32_t i : LowToHighBits(core_spills)) {
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
stack_offset += codegen->RestoreCoreRegister(stack_offset, i);
}
- const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+ const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
for (uint32_t i : LowToHighBits(fp_spills)) {
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
@@ -1645,28 +1648,21 @@ void CodeGenerator::CreateSystemArrayCopyLocationSummary(HInvoke* invoke) {
}
void CodeGenerator::EmitJitRoots(uint8_t* code,
- Handle<mirror::ObjectArray<mirror::Object>> roots,
- const uint8_t* roots_data) {
+ const uint8_t* roots_data,
+ /*out*/std::vector<Handle<mirror::Object>>* roots) {
code_generation_data_->EmitJitRoots(roots);
EmitJitRootPatches(code, roots_data);
}
-QuickEntrypointEnum CodeGenerator::GetArrayAllocationEntrypoint(Handle<mirror::Class> array_klass) {
- ScopedObjectAccess soa(Thread::Current());
- if (array_klass == nullptr) {
- // This can only happen for non-primitive arrays, as primitive arrays can always
- // be resolved.
- return kQuickAllocArrayResolved32;
- }
-
- switch (array_klass->GetComponentSize()) {
- case 1: return kQuickAllocArrayResolved8;
- case 2: return kQuickAllocArrayResolved16;
- case 4: return kQuickAllocArrayResolved32;
- case 8: return kQuickAllocArrayResolved64;
+QuickEntrypointEnum CodeGenerator::GetArrayAllocationEntrypoint(HNewArray* new_array) {
+ switch (new_array->GetComponentSizeShift()) {
+ case 0: return kQuickAllocArrayResolved8;
+ case 1: return kQuickAllocArrayResolved16;
+ case 2: return kQuickAllocArrayResolved32;
+ case 3: return kQuickAllocArrayResolved64;
}
LOG(FATAL) << "Unreachable";
- return kQuickAllocArrayResolved;
+ UNREACHABLE();
}
} // namespace art
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index e77d621b58..f70ecb612d 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -59,7 +59,6 @@ static constexpr ReadBarrierOption kCompilerReadBarrierOption =
class Assembler;
class CodeGenerator;
-class CompilerDriver;
class CompilerOptions;
class StackMapStream;
class ParallelMoveResolver;
@@ -350,14 +349,14 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
void AddSlowPath(SlowPathCode* slow_path);
- ScopedArenaVector<uint8_t> BuildStackMaps(const DexFile::CodeItem* code_item_for_osr_check);
+ ScopedArenaVector<uint8_t> BuildStackMaps(const dex::CodeItem* code_item_for_osr_check);
size_t GetNumberOfJitRoots() const;
// Fills the `literals` array with literals collected during code generation.
// Also emits literal patches.
void EmitJitRoots(uint8_t* code,
- Handle<mirror::ObjectArray<mirror::Object>> roots,
- const uint8_t* roots_data)
+ const uint8_t* roots_data,
+ /*out*/std::vector<Handle<mirror::Object>>* roots)
REQUIRES_SHARED(Locks::mutator_lock_);
bool IsLeafMethod() const {
@@ -622,7 +621,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
// otherwise return a fall-back info that should be used instead.
virtual HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) = 0;
+ ArtMethod* method) = 0;
// Generate a call to a static or direct method.
virtual void GenerateStaticOrDirectCall(
@@ -636,7 +635,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
virtual void GenerateNop() = 0;
- static QuickEntrypointEnum GetArrayAllocationEntrypoint(Handle<mirror::Class> array_klass);
+ static QuickEntrypointEnum GetArrayAllocationEntrypoint(HNewArray* new_array);
protected:
// Patch info used for recording locations of required linker patches and their targets,
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index d56f7aaca1..ff99a3eff2 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -168,8 +168,8 @@ static void SaveRestoreLiveRegistersHelper(CodeGenerator* codegen,
LocationSummary* locations,
int64_t spill_offset,
bool is_save) {
- const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
- const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+ const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
+ const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
DCHECK(ArtVixlRegCodeCoherentForRegSet(core_spills,
codegen->GetNumberOfCoreRegisters(),
fp_spills,
@@ -212,7 +212,7 @@ static void SaveRestoreLiveRegistersHelper(CodeGenerator* codegen,
void SlowPathCodeARM64::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
- const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+ const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
for (uint32_t i : LowToHighBits(core_spills)) {
// If the register holds an object, update the stack mask.
if (locations->RegisterContainsObject(i)) {
@@ -224,7 +224,7 @@ void SlowPathCodeARM64::SaveLiveRegisters(CodeGenerator* codegen, LocationSummar
stack_offset += kXRegSizeInBytes;
}
- const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+ const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
for (uint32_t i : LowToHighBits(fp_spills)) {
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
@@ -234,13 +234,13 @@ void SlowPathCodeARM64::SaveLiveRegisters(CodeGenerator* codegen, LocationSummar
SaveRestoreLiveRegistersHelper(codegen,
locations,
- codegen->GetFirstRegisterSlotInSlowPath(), true /* is_save */);
+ codegen->GetFirstRegisterSlotInSlowPath(), /* is_save= */ true);
}
void SlowPathCodeARM64::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
SaveRestoreLiveRegistersHelper(codegen,
locations,
- codegen->GetFirstRegisterSlotInSlowPath(), false /* is_save */);
+ codegen->GetFirstRegisterSlotInSlowPath(), /* is_save= */ false);
}
class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
@@ -885,7 +885,8 @@ CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph,
location_builder_(graph, this),
instruction_visitor_(graph, this),
move_resolver_(graph->GetAllocator(), this),
- assembler_(graph->GetAllocator()),
+ assembler_(graph->GetAllocator(),
+ compiler_options.GetInstructionSetFeatures()->AsArm64InstructionSetFeatures()),
uint32_literals_(std::less<uint32_t>(),
graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
uint64_literals_(std::less<uint64_t>(),
@@ -925,7 +926,7 @@ void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) {
uint32_t encoded_data = entry.first;
vixl::aarch64::Label* slow_path_entry = &entry.second.label;
__ Bind(slow_path_entry);
- CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name */ nullptr);
+ CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name= */ nullptr);
}
// Ensure we emit the literal pool.
@@ -1117,7 +1118,7 @@ void CodeGeneratorARM64::GenerateFrameEntry() {
}
}
- MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void CodeGeneratorARM64::GenerateFrameExit() {
@@ -1205,6 +1206,7 @@ void CodeGeneratorARM64::SetupBlockedRegisters() const {
// mr : Runtime reserved.
// ip1 : VIXL core temp.
// ip0 : VIXL core temp.
+ // x18 : Platform register.
//
// Blocked fp registers:
// d31 : VIXL fp temp.
@@ -1213,6 +1215,7 @@ void CodeGeneratorARM64::SetupBlockedRegisters() const {
while (!reserved_core_registers.IsEmpty()) {
blocked_core_registers_[reserved_core_registers.PopLowestIndex().GetCode()] = true;
}
+ blocked_core_registers_[X18] = true;
CPURegList reserved_fp_registers = vixl_reserved_fp_registers;
while (!reserved_fp_registers.IsEmpty()) {
@@ -1885,7 +1888,7 @@ void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction,
base,
offset,
maybe_temp,
- /* needs_null_check */ true,
+ /* needs_null_check= */ true,
field_info.IsVolatile());
} else {
// General case.
@@ -1894,7 +1897,7 @@ void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction,
// CodeGeneratorARM64::LoadAcquire call.
// NB: LoadAcquire will record the pc info if needed.
codegen_->LoadAcquire(
- instruction, OutputCPURegister(instruction), field, /* needs_null_check */ true);
+ instruction, OutputCPURegister(instruction), field, /* needs_null_check= */ true);
} else {
// Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
@@ -1949,7 +1952,7 @@ void InstructionCodeGeneratorARM64::HandleFieldSet(HInstruction* instruction,
if (field_info.IsVolatile()) {
codegen_->StoreRelease(
- instruction, field_type, source, HeapOperand(obj, offset), /* needs_null_check */ true);
+ instruction, field_type, source, HeapOperand(obj, offset), /* needs_null_check= */ true);
} else {
// Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
@@ -2317,9 +2320,10 @@ void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) {
if (offset >= kReferenceLoadMinFarOffset) {
locations->AddTemp(FixedTempLocation());
}
- } else {
+ } else if (!instruction->GetArray()->IsIntermediateAddress()) {
// We need a non-scratch temporary for the array data pointer in
- // CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier().
+ // CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier() for the case with no
+ // intermediate address.
locations->AddTemp(Location::RequiresRegister());
}
}
@@ -2349,11 +2353,12 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
MacroAssembler* masm = GetVIXLAssembler();
UseScratchRegisterScope temps(masm);
- // The read barrier instrumentation of object ArrayGet instructions
+ // The non-Baker read barrier instrumentation of object ArrayGet instructions
// does not support the HIntermediateAddress instruction.
DCHECK(!((type == DataType::Type::kReference) &&
instruction->GetArray()->IsIntermediateAddress() &&
- kEmitCompilerReadBarrier));
+ kEmitCompilerReadBarrier &&
+ !kUseBakerReadBarrier));
if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// Object ArrayGet with Baker's read barrier case.
@@ -2361,6 +2366,7 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
// CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier call.
DCHECK(!instruction->CanDoImplicitNullCheckOn(instruction->InputAt(0)));
if (index.IsConstant()) {
+ DCHECK(!instruction->GetArray()->IsIntermediateAddress());
// Array load with a constant index can be treated as a field load.
offset += Int64FromLocation(index) << DataType::SizeShift(type);
Location maybe_temp =
@@ -2370,12 +2376,11 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
obj.W(),
offset,
maybe_temp,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
} else {
- Register temp = WRegisterFrom(locations->GetTemp(0));
codegen_->GenerateArrayLoadWithBakerReadBarrier(
- out, obj.W(), offset, index, temp, /* needs_null_check */ false);
+ instruction, out, obj.W(), offset, index, /* needs_null_check= */ false);
}
} else {
// General case.
@@ -2424,8 +2429,8 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
// input instruction has done it already. See the comment in
// `TryExtractArrayAccessAddress()`.
if (kIsDebugBuild) {
- HIntermediateAddress* tmp = instruction->GetArray()->AsIntermediateAddress();
- DCHECK_EQ(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64(), offset);
+ HIntermediateAddress* interm_addr = instruction->GetArray()->AsIntermediateAddress();
+ DCHECK_EQ(interm_addr->GetOffset()->AsIntConstant()->GetValueAsUint64(), offset);
}
temp = obj;
} else {
@@ -2537,8 +2542,8 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
// input instruction has done it already. See the comment in
// `TryExtractArrayAccessAddress()`.
if (kIsDebugBuild) {
- HIntermediateAddress* tmp = instruction->GetArray()->AsIntermediateAddress();
- DCHECK(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64() == offset);
+ HIntermediateAddress* interm_addr = instruction->GetArray()->AsIntermediateAddress();
+ DCHECK(interm_addr->GetOffset()->AsIntConstant()->GetValueAsUint64() == offset);
}
temp = array;
} else {
@@ -2920,7 +2925,7 @@ void InstructionCodeGeneratorARM64::GenerateDivRemWithAnyConstant(HBinaryOperati
int64_t magic;
int shift;
CalculateMagicAndShiftForDivRem(
- imm, type == DataType::Type::kInt64 /* is_long */, &magic, &shift);
+ imm, /* is_long= */ type == DataType::Type::kInt64, &magic, &shift);
UseScratchRegisterScope temps(GetVIXLAssembler());
Register temp = temps.AcquireSameSizeAs(out);
@@ -3042,7 +3047,7 @@ void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction
if (!DataType::IsIntegralType(type)) {
LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
- return;
+ UNREACHABLE();
}
if (value.IsConstant()) {
@@ -3111,7 +3116,7 @@ void InstructionCodeGeneratorARM64::HandleGoto(HInstruction* got, HBasicBlock* s
}
if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
if (!codegen_->GoesToNextBlock(block, successor)) {
__ B(codegen_->GetLabelOf(successor));
@@ -3261,7 +3266,7 @@ void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) {
if (codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor)) {
false_target = nullptr;
}
- GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+ GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
}
void LocationsBuilderARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -3280,9 +3285,9 @@ void InstructionCodeGeneratorARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
SlowPathCodeARM64* slow_path =
deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathARM64>(deoptimize);
GenerateTestAndBranch(deoptimize,
- /* condition_input_index */ 0,
+ /* condition_input_index= */ 0,
slow_path->GetEntryLabel(),
- /* false_target */ nullptr);
+ /* false_target= */ nullptr);
}
void LocationsBuilderARM64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
@@ -3622,7 +3627,7 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
__ Cmp(out, cls);
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ B(ne, slow_path->GetEntryLabel());
__ Mov(out, 1);
@@ -3654,7 +3659,7 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
if (zero.IsLinked()) {
@@ -3947,7 +3952,7 @@ void LocationsBuilderARM64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
void InstructionCodeGeneratorARM64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void LocationsBuilderARM64::HandleInvoke(HInvoke* invoke) {
@@ -4017,7 +4022,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
@@ -4053,7 +4058,7 @@ static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM64* codege
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARM64::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
+ ArtMethod* method ATTRIBUTE_UNUSED) {
// On ARM64 we support all dispatch types.
return desired_dispatch_info;
}
@@ -4196,7 +4201,7 @@ void LocationsBuilderARM64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
void InstructionCodeGeneratorARM64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
codegen_->GenerateInvokePolymorphicCall(invoke);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void LocationsBuilderARM64::VisitInvokeCustom(HInvokeCustom* invoke) {
@@ -4205,21 +4210,21 @@ void LocationsBuilderARM64::VisitInvokeCustom(HInvokeCustom* invoke) {
void InstructionCodeGeneratorARM64::VisitInvokeCustom(HInvokeCustom* invoke) {
codegen_->GenerateInvokeCustomCall(invoke);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageIntrinsicPatch(
uint32_t intrinsic_data,
vixl::aarch64::Label* adrp_label) {
return NewPcRelativePatch(
- /* dex_file */ nullptr, intrinsic_data, adrp_label, &boot_image_intrinsic_patches_);
+ /* dex_file= */ nullptr, intrinsic_data, adrp_label, &boot_image_intrinsic_patches_);
}
vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageRelRoPatch(
uint32_t boot_image_offset,
vixl::aarch64::Label* adrp_label) {
return NewPcRelativePatch(
- /* dex_file */ nullptr, boot_image_offset, adrp_label, &boot_image_method_patches_);
+ /* dex_file= */ nullptr, boot_image_offset, adrp_label, &boot_image_method_patches_);
}
vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageMethodPatch(
@@ -4303,7 +4308,7 @@ vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitStringLitera
ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
return jit_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
- [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
+ [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ 0u); });
}
vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitClassLiteral(
@@ -4311,7 +4316,7 @@ vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitClassLiteral
ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
return jit_class_patches_.GetOrCreate(
TypeReference(&dex_file, type_index),
- [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
+ [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ 0u); });
}
void CodeGeneratorARM64::EmitAdrpPlaceholder(vixl::aarch64::Label* fixup_label,
@@ -4350,7 +4355,7 @@ void CodeGeneratorARM64::LoadBootImageAddress(vixl::aarch64::Register reg,
// Add ADD with its PC-relative type patch.
vixl::aarch64::Label* add_label = NewBootImageIntrinsicPatch(boot_image_reference, adrp_label);
EmitAddPlaceholder(add_label, reg.X(), reg.X());
- } else if (Runtime::Current()->IsAotCompiler()) {
+ } else if (GetCompilerOptions().GetCompilePic()) {
// Add ADRP with its PC-relative .data.bimg.rel.ro patch.
vixl::aarch64::Label* adrp_label = NewBootImageRelRoPatch(boot_image_reference);
EmitAdrpPlaceholder(adrp_label, reg.X());
@@ -4508,7 +4513,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDir
DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
return;
}
@@ -4521,12 +4526,12 @@ void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDir
invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
return;
}
@@ -4538,7 +4543,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
DCHECK(!codegen_->IsLeafMethod());
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
HLoadClass::LoadKind CodeGeneratorARM64::GetSupportedLoadClassKind(
@@ -4606,7 +4611,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
codegen_->GenerateLoadClassRuntimeCall(cls);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
return;
}
DCHECK(!cls->NeedsAccessCheck());
@@ -4628,7 +4633,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
out_loc,
current_method,
ArtMethod::DeclaringClassOffset().Int32Value(),
- /* fixup_label */ nullptr,
+ /* fixup_label= */ nullptr,
read_barrier_option);
break;
}
@@ -4691,8 +4696,8 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
codegen_->GenerateGcRootFieldLoad(cls,
out_loc,
out.X(),
- /* offset */ 0,
- /* fixup_label */ nullptr,
+ /* offset= */ 0,
+ /* fixup_label= */ nullptr,
read_barrier_option);
break;
}
@@ -4716,7 +4721,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
} else {
__ Bind(slow_path->GetExitLabel());
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
}
@@ -4854,7 +4859,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD
codegen_->AddSlowPath(slow_path);
__ Cbz(out.X(), slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
return;
}
case HLoadString::LoadKind::kJitBootImageAddress: {
@@ -4870,8 +4875,8 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD
codegen_->GenerateGcRootFieldLoad(load,
out_loc,
out.X(),
- /* offset */ 0,
- /* fixup_label */ nullptr,
+ /* offset= */ 0,
+ /* fixup_label= */ nullptr,
kCompilerReadBarrierOption);
return;
}
@@ -4885,7 +4890,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD
__ Mov(calling_convention.GetRegisterAt(0).W(), load->GetStringIndex().index_);
codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
@@ -4913,7 +4918,7 @@ void InstructionCodeGeneratorARM64::VisitMonitorOperation(HMonitorOperation* ins
} else {
CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>();
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void LocationsBuilderARM64::VisitMul(HMul* mul) {
@@ -5004,13 +5009,11 @@ void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) {
}
void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) {
- // Note: if heap poisoning is enabled, the entry point takes cares
- // of poisoning the reference.
- QuickEntrypointEnum entrypoint =
- CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+ QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
@@ -5024,7 +5027,7 @@ void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void LocationsBuilderARM64::VisitNot(HNot* instruction) {
@@ -5499,7 +5502,7 @@ void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction
return;
}
GenerateSuspendCheck(instruction, nullptr);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void LocationsBuilderARM64::VisitThrow(HThrow* instruction) {
@@ -5712,8 +5715,8 @@ void InstructionCodeGeneratorARM64::GenerateReferenceLoadOneRegister(
out_reg,
offset,
maybe_temp,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
} else {
// Load with slow path based read barrier.
// Save the value of `out` into `maybe_temp` before overwriting it
@@ -5753,8 +5756,8 @@ void InstructionCodeGeneratorARM64::GenerateReferenceLoadTwoRegisters(
obj_reg,
offset,
maybe_temp,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
} else {
// Load with slow path based read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
@@ -5839,7 +5842,7 @@ void CodeGeneratorARM64::GenerateGcRootFieldLoad(
// Note that GC roots are not affected by heap poisoning, thus we
// do not have to unpoison `root_reg` here.
}
- MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void CodeGeneratorARM64::GenerateUnsafeCasOldValueMovWithBakerReadBarrier(
@@ -5928,7 +5931,7 @@ void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* ins
}
__ bind(&return_address);
}
- MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__, /* temp_loc */ LocationFrom(ip1));
+ MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__, /* temp_loc= */ LocationFrom(ip1));
}
void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
@@ -5957,11 +5960,11 @@ void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* ins
instruction, ref, obj, src, needs_null_check, use_load_acquire);
}
-void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(Location ref,
+void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(HArrayGet* instruction,
+ Location ref,
Register obj,
uint32_t data_offset,
Location index,
- Register temp,
bool needs_null_check) {
DCHECK(kEmitCompilerReadBarrier);
DCHECK(kUseBakerReadBarrier);
@@ -6000,9 +6003,24 @@ void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(Location ref,
DCHECK(temps.IsAvailable(ip0));
DCHECK(temps.IsAvailable(ip1));
temps.Exclude(ip0, ip1);
+
+ Register temp;
+ if (instruction->GetArray()->IsIntermediateAddress()) {
+ // We do not need to compute the intermediate address from the array: the
+ // input instruction has done it already. See the comment in
+ // `TryExtractArrayAccessAddress()`.
+ if (kIsDebugBuild) {
+ HIntermediateAddress* interm_addr = instruction->GetArray()->AsIntermediateAddress();
+ DCHECK_EQ(interm_addr->GetOffset()->AsIntConstant()->GetValueAsUint64(), data_offset);
+ }
+ temp = obj;
+ } else {
+ temp = WRegisterFrom(instruction->GetLocations()->GetTemp(0));
+ __ Add(temp.X(), obj.X(), Operand(data_offset));
+ }
+
uint32_t custom_data = EncodeBakerReadBarrierArrayData(temp.GetCode());
- __ Add(temp.X(), obj.X(), Operand(data_offset));
{
ExactAssemblyScope guard(GetVIXLAssembler(),
(kPoisonHeapReferences ? 4u : 3u) * vixl::aarch64::kInstructionSize);
@@ -6021,7 +6039,7 @@ void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(Location ref,
}
__ bind(&return_address);
}
- MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__, /* temp_loc */ LocationFrom(ip1));
+ MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__, /* temp_loc= */ LocationFrom(ip1));
}
void CodeGeneratorARM64::MaybeGenerateMarkingRegisterCheck(int code, Location temp_loc) {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 2e7a20b553..ada5742fc0 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -557,7 +557,7 @@ class CodeGeneratorARM64 : public CodeGenerator {
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) override;
+ ArtMethod* method) override;
void GenerateStaticOrDirectCall(
HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
@@ -694,11 +694,11 @@ class CodeGeneratorARM64 : public CodeGenerator {
bool use_load_acquire);
// Fast path implementation of ReadBarrier::Barrier for a heap
// reference array load when Baker's read barriers are used.
- void GenerateArrayLoadWithBakerReadBarrier(Location ref,
+ void GenerateArrayLoadWithBakerReadBarrier(HArrayGet* instruction,
+ Location ref,
vixl::aarch64::Register obj,
uint32_t data_offset,
Location index,
- vixl::aarch64::Register temp,
bool needs_null_check);
// Emit code checking the status of the Marking Register, and
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 3580975c62..8204f1eecb 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -319,7 +319,7 @@ void SlowPathCodeARMVIXL::SaveLiveRegisters(CodeGenerator* codegen, LocationSumm
size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
size_t orig_offset = stack_offset;
- const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+ const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
for (uint32_t i : LowToHighBits(core_spills)) {
// If the register holds an object, update the stack mask.
if (locations->RegisterContainsObject(i)) {
@@ -334,7 +334,7 @@ void SlowPathCodeARMVIXL::SaveLiveRegisters(CodeGenerator* codegen, LocationSumm
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
arm_codegen->GetAssembler()->StoreRegisterList(core_spills, orig_offset);
- uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+ uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
orig_offset = stack_offset;
for (uint32_t i : LowToHighBits(fp_spills)) {
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
@@ -357,7 +357,7 @@ void SlowPathCodeARMVIXL::RestoreLiveRegisters(CodeGenerator* codegen, LocationS
size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
size_t orig_offset = stack_offset;
- const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+ const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
for (uint32_t i : LowToHighBits(core_spills)) {
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
@@ -368,7 +368,7 @@ void SlowPathCodeARMVIXL::RestoreLiveRegisters(CodeGenerator* codegen, LocationS
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
arm_codegen->GetAssembler()->LoadRegisterList(core_spills, orig_offset);
- uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+ uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
while (fp_spills != 0u) {
uint32_t begin = CTZ(fp_spills);
uint32_t tmp = fp_spills + (1u << begin);
@@ -1037,26 +1037,26 @@ static uint32_t ComputeSRegisterListMask(const SRegisterList& regs) {
size_t CodeGeneratorARMVIXL::SaveCoreRegister(size_t stack_index ATTRIBUTE_UNUSED,
uint32_t reg_id ATTRIBUTE_UNUSED) {
TODO_VIXL32(FATAL);
- return 0;
+ UNREACHABLE();
}
// Restores the register from the stack. Returns the size taken on stack.
size_t CodeGeneratorARMVIXL::RestoreCoreRegister(size_t stack_index ATTRIBUTE_UNUSED,
uint32_t reg_id ATTRIBUTE_UNUSED) {
TODO_VIXL32(FATAL);
- return 0;
+ UNREACHABLE();
}
size_t CodeGeneratorARMVIXL::SaveFloatingPointRegister(size_t stack_index ATTRIBUTE_UNUSED,
uint32_t reg_id ATTRIBUTE_UNUSED) {
TODO_VIXL32(FATAL);
- return 0;
+ UNREACHABLE();
}
size_t CodeGeneratorARMVIXL::RestoreFloatingPointRegister(size_t stack_index ATTRIBUTE_UNUSED,
uint32_t reg_id ATTRIBUTE_UNUSED) {
TODO_VIXL32(FATAL);
- return 0;
+ UNREACHABLE();
}
static void GenerateDataProcInstruction(HInstruction::InstructionKind kind,
@@ -1539,7 +1539,7 @@ static void GenerateConditionGeneric(HCondition* cond, CodeGeneratorARMVIXL* cod
vixl32::Label done_label;
vixl32::Label* const final_label = codegen->GetFinalLabel(cond, &done_label);
- __ B(condition.second, final_label, /* far_target */ false);
+ __ B(condition.second, final_label, /* is_far_target= */ false);
__ Mov(out, 1);
if (done_label.IsReferenced()) {
@@ -1934,7 +1934,7 @@ void CodeGeneratorARMVIXL::Finalize(CodeAllocator* allocator) {
uint32_t encoded_data = entry.first;
vixl::aarch32::Label* slow_path_entry = &entry.second.label;
__ Bind(slow_path_entry);
- CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name */ nullptr);
+ CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name= */ nullptr);
}
GetAssembler()->FinalizeCode();
@@ -2159,7 +2159,7 @@ void CodeGeneratorARMVIXL::GenerateFrameEntry() {
GetAssembler()->StoreToOffset(kStoreWord, temp, sp, GetStackOffsetOfShouldDeoptimizeFlag());
}
- MaybeGenerateMarkingRegisterCheck(/* code */ 1);
+ MaybeGenerateMarkingRegisterCheck(/* code= */ 1);
}
void CodeGeneratorARMVIXL::GenerateFrameExit() {
@@ -2268,7 +2268,7 @@ Location InvokeDexCallingConventionVisitorARMVIXL::GetNextLocation(DataType::Typ
case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected parameter type " << type;
- break;
+ UNREACHABLE();
}
return Location::NoLocation();
}
@@ -2427,7 +2427,7 @@ void InstructionCodeGeneratorARMVIXL::HandleGoto(HInstruction* got, HBasicBlock*
}
if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 2);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 2);
}
if (!codegen_->GoesToNextBlock(block, successor)) {
__ B(codegen_->GetLabelOf(successor));
@@ -2606,7 +2606,7 @@ void InstructionCodeGeneratorARMVIXL::VisitIf(HIf* if_instr) {
nullptr : codegen_->GetLabelOf(true_successor);
vixl32::Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
nullptr : codegen_->GetLabelOf(false_successor);
- GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+ GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
}
void LocationsBuilderARMVIXL::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -2625,9 +2625,9 @@ void InstructionCodeGeneratorARMVIXL::VisitDeoptimize(HDeoptimize* deoptimize) {
SlowPathCodeARMVIXL* slow_path =
deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathARMVIXL>(deoptimize);
GenerateTestAndBranch(deoptimize,
- /* condition_input_index */ 0,
+ /* condition_input_index= */ 0,
slow_path->GetEntryLabel(),
- /* false_target */ nullptr);
+ /* false_target= */ nullptr);
}
void LocationsBuilderARMVIXL::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
@@ -2677,6 +2677,18 @@ void InstructionCodeGeneratorARMVIXL::VisitSelect(HSelect* select) {
const Location first = locations->InAt(0);
const Location out = locations->Out();
const Location second = locations->InAt(1);
+
+ // In the unlucky case the output of this instruction overlaps
+ // with an input of an "emitted-at-use-site" condition, and
+ // the output of this instruction is not one of its inputs, we'll
+ // need to fallback to branches instead of conditional ARM instructions.
+ bool output_overlaps_with_condition_inputs =
+ !IsBooleanValueOrMaterializedCondition(condition) &&
+ !out.Equals(first) &&
+ !out.Equals(second) &&
+ (condition->GetLocations()->InAt(0).Equals(out) ||
+ condition->GetLocations()->InAt(1).Equals(out));
+ DCHECK(!output_overlaps_with_condition_inputs || condition->IsCondition());
Location src;
if (condition->IsIntConstant()) {
@@ -2690,7 +2702,7 @@ void InstructionCodeGeneratorARMVIXL::VisitSelect(HSelect* select) {
return;
}
- if (!DataType::IsFloatingPointType(type)) {
+ if (!DataType::IsFloatingPointType(type) && !output_overlaps_with_condition_inputs) {
bool invert = false;
if (out.Equals(second)) {
@@ -2762,6 +2774,7 @@ void InstructionCodeGeneratorARMVIXL::VisitSelect(HSelect* select) {
vixl32::Label* false_target = nullptr;
vixl32::Label* true_target = nullptr;
vixl32::Label select_end;
+ vixl32::Label other_case;
vixl32::Label* const target = codegen_->GetFinalLabel(select, &select_end);
if (out.Equals(second)) {
@@ -2772,12 +2785,21 @@ void InstructionCodeGeneratorARMVIXL::VisitSelect(HSelect* select) {
src = second;
if (!out.Equals(first)) {
- codegen_->MoveLocation(out, first, type);
+ if (output_overlaps_with_condition_inputs) {
+ false_target = &other_case;
+ } else {
+ codegen_->MoveLocation(out, first, type);
+ }
}
}
- GenerateTestAndBranch(select, 2, true_target, false_target, /* far_target */ false);
+ GenerateTestAndBranch(select, 2, true_target, false_target, /* far_target= */ false);
codegen_->MoveLocation(out, src, type);
+ if (output_overlaps_with_condition_inputs) {
+ __ B(target);
+ __ Bind(&other_case);
+ codegen_->MoveLocation(out, first, type);
+ }
if (select_end.IsReferenced()) {
__ Bind(&select_end);
@@ -2876,31 +2898,16 @@ void CodeGeneratorARMVIXL::GenerateConditionWithZero(IfCondition condition,
void LocationsBuilderARMVIXL::HandleCondition(HCondition* cond) {
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(cond, LocationSummary::kNoCall);
- // Handle the long/FP comparisons made in instruction simplification.
- switch (cond->InputAt(0)->GetType()) {
- case DataType::Type::kInt64:
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
- if (!cond->IsEmittedAtUseSite()) {
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- }
- break;
-
- case DataType::Type::kFloat32:
- case DataType::Type::kFloat64:
- locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetInAt(1, ArithmeticZeroOrFpuRegister(cond->InputAt(1)));
- if (!cond->IsEmittedAtUseSite()) {
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- }
- break;
-
- default:
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
- if (!cond->IsEmittedAtUseSite()) {
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- }
+ const DataType::Type type = cond->InputAt(0)->GetType();
+ if (DataType::IsFloatingPointType(type)) {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, ArithmeticZeroOrFpuRegister(cond->InputAt(1)));
+ } else {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
+ }
+ if (!cond->IsEmittedAtUseSite()) {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
}
@@ -3128,7 +3135,7 @@ void LocationsBuilderARMVIXL::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
void InstructionCodeGeneratorARMVIXL::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 3);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 3);
}
void LocationsBuilderARMVIXL::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
@@ -3159,7 +3166,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokeStaticOrDirect(HInvokeStaticOrD
DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 4);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 4);
return;
}
@@ -3167,7 +3174,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokeStaticOrDirect(HInvokeStaticOrD
codegen_->GenerateStaticOrDirectCall(
invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 5);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 5);
}
void LocationsBuilderARMVIXL::HandleInvoke(HInvoke* invoke) {
@@ -3186,14 +3193,14 @@ void LocationsBuilderARMVIXL::VisitInvokeVirtual(HInvokeVirtual* invoke) {
void InstructionCodeGeneratorARMVIXL::VisitInvokeVirtual(HInvokeVirtual* invoke) {
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 6);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 6);
return;
}
codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
DCHECK(!codegen_->IsLeafMethod());
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 7);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 7);
}
void LocationsBuilderARMVIXL::VisitInvokeInterface(HInvokeInterface* invoke) {
@@ -3271,7 +3278,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokeInterface(HInvokeInterface* inv
DCHECK(!codegen_->IsLeafMethod());
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 8);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 8);
}
void LocationsBuilderARMVIXL::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
@@ -3280,7 +3287,7 @@ void LocationsBuilderARMVIXL::VisitInvokePolymorphic(HInvokePolymorphic* invoke)
void InstructionCodeGeneratorARMVIXL::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
codegen_->GenerateInvokePolymorphicCall(invoke);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 9);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 9);
}
void LocationsBuilderARMVIXL::VisitInvokeCustom(HInvokeCustom* invoke) {
@@ -3289,7 +3296,7 @@ void LocationsBuilderARMVIXL::VisitInvokeCustom(HInvokeCustom* invoke) {
void InstructionCodeGeneratorARMVIXL::VisitInvokeCustom(HInvokeCustom* invoke) {
codegen_->GenerateInvokeCustomCall(invoke);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 10);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 10);
}
void LocationsBuilderARMVIXL::VisitNeg(HNeg* neg) {
@@ -4006,7 +4013,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateDivRemWithAnyConstant(HBinaryOpera
int64_t magic;
int shift;
- CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
+ CalculateMagicAndShiftForDivRem(imm, /* is_long= */ false, &magic, &shift);
// TODO(VIXL): Change the static cast to Operand::From() after VIXL is fixed.
__ Mov(temp1, static_cast<int32_t>(magic));
@@ -4414,7 +4421,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateMinMaxFloat(HInstruction* minmax,
__ Vcmp(op1, op2);
__ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
- __ B(vs, &nan, /* far_target */ false); // if un-ordered, go to NaN handling.
+ __ B(vs, &nan, /* is_far_target= */ false); // if un-ordered, go to NaN handling.
// op1 <> op2
vixl32::ConditionType cond = is_min ? gt : lt;
@@ -4426,7 +4433,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateMinMaxFloat(HInstruction* minmax,
__ vmov(cond, F32, out, op2);
}
// for <>(not equal), we've done min/max calculation.
- __ B(ne, final_label, /* far_target */ false);
+ __ B(ne, final_label, /* is_far_target= */ false);
// handle op1 == op2, max(+0.0,-0.0), min(+0.0,-0.0).
__ Vmov(temp1, op1);
@@ -4471,7 +4478,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateMinMaxDouble(HInstruction* minmax,
__ Vcmp(op1, op2);
__ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
- __ B(vs, &handle_nan_eq, /* far_target */ false); // if un-ordered, go to NaN handling.
+ __ B(vs, &handle_nan_eq, /* is_far_target= */ false); // if un-ordered, go to NaN handling.
// op1 <> op2
vixl32::ConditionType cond = is_min ? gt : lt;
@@ -4483,7 +4490,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateMinMaxDouble(HInstruction* minmax,
__ vmov(cond, F64, out, op2);
}
// for <>(not equal), we've done min/max calculation.
- __ B(ne, final_label, /* far_target */ false);
+ __ B(ne, final_label, /* is_far_target= */ false);
// handle op1 == op2, max(+0.0,-0.0).
if (!is_min) {
@@ -4707,7 +4714,7 @@ void InstructionCodeGeneratorARMVIXL::HandleLongRotate(HRor* ror) {
__ And(shift_right, RegisterFrom(rhs), 0x1F);
__ Lsrs(shift_left, RegisterFrom(rhs), 6);
__ Rsb(LeaveFlags, shift_left, shift_right, Operand::From(kArmBitsPerWord));
- __ B(cc, &shift_by_32_plus_shift_right, /* far_target */ false);
+ __ B(cc, &shift_by_32_plus_shift_right, /* is_far_target= */ false);
// out_reg_hi = (reg_hi << shift_left) | (reg_lo >> shift_right).
// out_reg_lo = (reg_lo << shift_left) | (reg_hi >> shift_right).
@@ -4964,7 +4971,7 @@ void InstructionCodeGeneratorARMVIXL::HandleShift(HBinaryOperation* op) {
__ Rrx(o_l, low);
}
} else {
- DCHECK(2 <= shift_value && shift_value < 32) << shift_value;
+ DCHECK(0 <= shift_value && shift_value < 32) << shift_value;
if (op->IsShl()) {
__ Lsl(o_h, high, shift_value);
__ Orr(o_h, o_h, Operand(low, ShiftType::LSR, 32 - shift_value));
@@ -5023,7 +5030,7 @@ void LocationsBuilderARMVIXL::VisitNewInstance(HNewInstance* instruction) {
void InstructionCodeGeneratorARMVIXL::VisitNewInstance(HNewInstance* instruction) {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 11);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 11);
}
void LocationsBuilderARMVIXL::VisitNewArray(HNewArray* instruction) {
@@ -5036,14 +5043,12 @@ void LocationsBuilderARMVIXL::VisitNewArray(HNewArray* instruction) {
}
void InstructionCodeGeneratorARMVIXL::VisitNewArray(HNewArray* instruction) {
- // Note: if heap poisoning is enabled, the entry point takes cares
- // of poisoning the reference.
- QuickEntrypointEnum entrypoint =
- CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+ QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 12);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 12);
}
void LocationsBuilderARMVIXL::VisitParameterValue(HParameterValue* instruction) {
@@ -5165,8 +5170,8 @@ void InstructionCodeGeneratorARMVIXL::VisitCompare(HCompare* compare) {
}
case DataType::Type::kInt64: {
__ Cmp(HighRegisterFrom(left), HighRegisterFrom(right)); // Signed compare.
- __ B(lt, &less, /* far_target */ false);
- __ B(gt, &greater, /* far_target */ false);
+ __ B(lt, &less, /* is_far_target= */ false);
+ __ B(gt, &greater, /* is_far_target= */ false);
// Emit move to `out` before the last `Cmp`, as `Mov` might affect the status flags.
__ Mov(out, 0);
__ Cmp(LowRegisterFrom(left), LowRegisterFrom(right)); // Unsigned compare.
@@ -5187,8 +5192,8 @@ void InstructionCodeGeneratorARMVIXL::VisitCompare(HCompare* compare) {
UNREACHABLE();
}
- __ B(eq, final_label, /* far_target */ false);
- __ B(less_cond, &less, /* far_target */ false);
+ __ B(eq, final_label, /* is_far_target= */ false);
+ __ B(less_cond, &less, /* is_far_target= */ false);
__ Bind(&greater);
__ Mov(out, 1);
@@ -5603,7 +5608,7 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction,
// Note that a potential implicit null check is handled in this
// CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier call.
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, out, base, offset, maybe_temp, /* needs_null_check */ true);
+ instruction, out, base, offset, maybe_temp, /* needs_null_check= */ true);
if (is_volatile) {
codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
}
@@ -5959,7 +5964,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) {
__ Lsrs(length, length, 1u); // LSRS has a 16-bit encoding, TST (immediate) does not.
static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
"Expecting 0=compressed, 1=uncompressed");
- __ B(cs, &uncompressed_load, /* far_target */ false);
+ __ B(cs, &uncompressed_load, /* is_far_target= */ false);
GetAssembler()->LoadFromOffset(kLoadUnsignedByte,
RegisterFrom(out_loc),
obj,
@@ -6001,7 +6006,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) {
__ Lsrs(length, length, 1u); // LSRS has a 16-bit encoding, TST (immediate) does not.
static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
"Expecting 0=compressed, 1=uncompressed");
- __ B(cs, &uncompressed_load, /* far_target */ false);
+ __ B(cs, &uncompressed_load, /* is_far_target= */ false);
__ Ldrb(RegisterFrom(out_loc), MemOperand(temp, RegisterFrom(index), vixl32::LSL, 0));
__ B(final_label);
__ Bind(&uncompressed_load);
@@ -6041,11 +6046,11 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) {
obj,
data_offset,
maybe_temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
} else {
Location temp = locations->GetTemp(0);
codegen_->GenerateArrayLoadWithBakerReadBarrier(
- out_loc, obj, data_offset, index, temp, /* needs_null_check */ false);
+ out_loc, obj, data_offset, index, temp, /* needs_null_check= */ false);
}
} else {
vixl32::Register out = OutputRegister(instruction);
@@ -6320,7 +6325,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) {
if (instruction->StaticTypeOfArrayIsObjectArray()) {
vixl32::Label do_put;
- __ B(eq, &do_put, /* far_target */ false);
+ __ B(eq, &do_put, /* is_far_target= */ false);
// If heap poisoning is enabled, the `temp1` reference has
// not been unpoisoned yet; unpoison it now.
GetAssembler()->MaybeUnpoisonHeapReference(temp1);
@@ -6622,7 +6627,7 @@ void InstructionCodeGeneratorARMVIXL::VisitSuspendCheck(HSuspendCheck* instructi
return;
}
GenerateSuspendCheck(instruction, nullptr);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 13);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 13);
}
void InstructionCodeGeneratorARMVIXL::GenerateSuspendCheck(HSuspendCheck* instruction,
@@ -6970,7 +6975,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
codegen_->GenerateLoadClassRuntimeCall(cls);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 14);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 14);
return;
}
DCHECK(!cls->NeedsAccessCheck());
@@ -7009,14 +7014,14 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
codegen_->NewBootImageRelRoPatch(codegen_->GetBootImageOffset(cls));
codegen_->EmitMovwMovtPlaceholder(labels, out);
- __ Ldr(out, MemOperand(out, /* offset */ 0));
+ __ Ldr(out, MemOperand(out, /* offset= */ 0));
break;
}
case HLoadClass::LoadKind::kBssEntry: {
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
codegen_->EmitMovwMovtPlaceholder(labels, out);
- codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option);
+ codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset= */ 0, read_barrier_option);
generate_null_check = true;
break;
}
@@ -7032,7 +7037,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
cls->GetTypeIndex(),
cls->GetClass()));
// /* GcRoot<mirror::Class> */ out = *out
- codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option);
+ codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset= */ 0, read_barrier_option);
break;
}
case HLoadClass::LoadKind::kRuntimeCall:
@@ -7054,7 +7059,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
} else {
__ Bind(slow_path->GetExitLabel());
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 15);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 15);
}
}
@@ -7235,7 +7240,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
codegen_->NewBootImageRelRoPatch(codegen_->GetBootImageOffset(load));
codegen_->EmitMovwMovtPlaceholder(labels, out);
- __ Ldr(out, MemOperand(out, /* offset */ 0));
+ __ Ldr(out, MemOperand(out, /* offset= */ 0));
return;
}
case HLoadString::LoadKind::kBssEntry: {
@@ -7244,13 +7249,13 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex());
codegen_->EmitMovwMovtPlaceholder(labels, out);
codegen_->GenerateGcRootFieldLoad(
- load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
+ load, out_loc, out, /* offset= */ 0, kCompilerReadBarrierOption);
LoadStringSlowPathARMVIXL* slow_path =
new (codegen_->GetScopedAllocator()) LoadStringSlowPathARMVIXL(load);
codegen_->AddSlowPath(slow_path);
__ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 16);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 16);
return;
}
case HLoadString::LoadKind::kJitBootImageAddress: {
@@ -7265,7 +7270,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
load->GetString()));
// /* GcRoot<mirror::String> */ out = *out
codegen_->GenerateGcRootFieldLoad(
- load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
+ load, out_loc, out, /* offset= */ 0, kCompilerReadBarrierOption);
return;
}
default:
@@ -7278,7 +7283,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
__ Mov(calling_convention.GetRegisterAt(0), load->GetStringIndex().index_);
codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 17);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 17);
}
static int32_t GetExceptionTlsOffset() {
@@ -7410,7 +7415,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
if (instruction->MustDoNullCheck()) {
DCHECK(!out.Is(obj));
__ Mov(out, 0);
- __ CompareAndBranchIfZero(obj, final_label, /* far_target */ false);
+ __ CompareAndBranchIfZero(obj, final_label, /* is_far_target= */ false);
}
switch (type_check_kind) {
@@ -7442,7 +7447,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
__ it(eq);
__ mov(eq, out, 1);
} else {
- __ B(ne, final_label, /* far_target */ false);
+ __ B(ne, final_label, /* is_far_target= */ false);
__ Mov(out, 1);
}
@@ -7470,9 +7475,9 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
maybe_temp_loc,
read_barrier_option);
// If `out` is null, we use it for the result, and jump to the final label.
- __ CompareAndBranchIfZero(out, final_label, /* far_target */ false);
+ __ CompareAndBranchIfZero(out, final_label, /* is_far_target= */ false);
__ Cmp(out, cls);
- __ B(ne, &loop, /* far_target */ false);
+ __ B(ne, &loop, /* is_far_target= */ false);
__ Mov(out, 1);
break;
}
@@ -7491,7 +7496,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
vixl32::Label loop, success;
__ Bind(&loop);
__ Cmp(out, cls);
- __ B(eq, &success, /* far_target */ false);
+ __ B(eq, &success, /* is_far_target= */ false);
// /* HeapReference<Class> */ out = out->super_class_
GenerateReferenceLoadOneRegister(instruction,
out_loc,
@@ -7501,7 +7506,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
// This is essentially a null check, but it sets the condition flags to the
// proper value for the code that follows the loop, i.e. not `eq`.
__ Cmp(out, 1);
- __ B(hs, &loop, /* far_target */ false);
+ __ B(hs, &loop, /* is_far_target= */ false);
// Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
// we check that the output is in a low register, so that a 16-bit MOV
@@ -7546,7 +7551,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
// Do an exact check.
vixl32::Label exact_check;
__ Cmp(out, cls);
- __ B(eq, &exact_check, /* far_target */ false);
+ __ B(eq, &exact_check, /* is_far_target= */ false);
// Otherwise, we need to check that the object's class is a non-primitive array.
// /* HeapReference<Class> */ out = out->component_type_
GenerateReferenceLoadOneRegister(instruction,
@@ -7555,7 +7560,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
maybe_temp_loc,
read_barrier_option);
// If `out` is null, we use it for the result, and jump to the final label.
- __ CompareAndBranchIfZero(out, final_label, /* far_target */ false);
+ __ CompareAndBranchIfZero(out, final_label, /* is_far_target= */ false);
GetAssembler()->LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
__ Cmp(out, 0);
@@ -7577,7 +7582,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
__ it(eq);
__ mov(eq, out, 1);
} else {
- __ B(ne, final_label, /* far_target */ false);
+ __ B(ne, final_label, /* is_far_target= */ false);
__ Bind(&exact_check);
__ Mov(out, 1);
}
@@ -7597,7 +7602,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
__ Cmp(out, cls);
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ B(ne, slow_path->GetEntryLabel());
__ Mov(out, 1);
@@ -7626,7 +7631,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
break;
@@ -7711,7 +7716,7 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) {
vixl32::Label* final_label = codegen_->GetFinalLabel(instruction, &done);
// Avoid null check if we know obj is not null.
if (instruction->MustDoNullCheck()) {
- __ CompareAndBranchIfZero(obj, final_label, /* far_target */ false);
+ __ CompareAndBranchIfZero(obj, final_label, /* is_far_target= */ false);
}
switch (type_check_kind) {
@@ -7758,7 +7763,7 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) {
// Otherwise, compare the classes.
__ Cmp(temp, cls);
- __ B(ne, &loop, /* far_target */ false);
+ __ B(ne, &loop, /* is_far_target= */ false);
break;
}
@@ -7775,7 +7780,7 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) {
vixl32::Label loop;
__ Bind(&loop);
__ Cmp(temp, cls);
- __ B(eq, final_label, /* far_target */ false);
+ __ B(eq, final_label, /* is_far_target= */ false);
// /* HeapReference<Class> */ temp = temp->super_class_
GenerateReferenceLoadOneRegister(instruction,
@@ -7803,7 +7808,7 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) {
// Do an exact check.
__ Cmp(temp, cls);
- __ B(eq, final_label, /* far_target */ false);
+ __ B(eq, final_label, /* is_far_target= */ false);
// Otherwise, we need to check that the object's class is a non-primitive array.
// /* HeapReference<Class> */ temp = temp->component_type_
@@ -7867,7 +7872,7 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) {
__ Sub(RegisterFrom(maybe_temp2_loc), RegisterFrom(maybe_temp2_loc), 2);
// Compare the classes and continue the loop if they do not match.
__ Cmp(cls, RegisterFrom(maybe_temp3_loc));
- __ B(ne, &start_loop, /* far_target */ false);
+ __ B(ne, &start_loop, /* is_far_target= */ false);
break;
}
@@ -7908,7 +7913,7 @@ void InstructionCodeGeneratorARMVIXL::VisitMonitorOperation(HMonitorOperation* i
} else {
CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>();
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 18);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 18);
}
void LocationsBuilderARMVIXL::VisitAnd(HAnd* instruction) {
@@ -8263,7 +8268,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateReferenceLoadOneRegister(
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(out + offset)
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, out, out_reg, offset, maybe_temp, /* needs_null_check */ false);
+ instruction, out, out_reg, offset, maybe_temp, /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// Save the value of `out` into `maybe_temp` before overwriting it
@@ -8298,7 +8303,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateReferenceLoadTwoRegisters(
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, out, obj_reg, offset, maybe_temp, /* needs_null_check */ false);
+ instruction, out, obj_reg, offset, maybe_temp, /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
@@ -8379,7 +8384,7 @@ void CodeGeneratorARMVIXL::GenerateGcRootFieldLoad(
// Note that GC roots are not affected by heap poisoning, thus we
// do not have to unpoison `root_reg` here.
}
- MaybeGenerateMarkingRegisterCheck(/* code */ 19);
+ MaybeGenerateMarkingRegisterCheck(/* code= */ 19);
}
void CodeGeneratorARMVIXL::GenerateUnsafeCasOldValueAddWithBakerReadBarrier(
@@ -8479,7 +8484,7 @@ void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* i
narrow ? BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_OFFSET
: BAKER_MARK_INTROSPECTION_FIELD_LDR_WIDE_OFFSET);
}
- MaybeGenerateMarkingRegisterCheck(/* code */ 20, /* temp_loc */ LocationFrom(ip));
+ MaybeGenerateMarkingRegisterCheck(/* code= */ 20, /* temp_loc= */ LocationFrom(ip));
}
void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
@@ -8567,7 +8572,7 @@ void CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier(Location ref,
DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(),
BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET);
}
- MaybeGenerateMarkingRegisterCheck(/* code */ 21, /* temp_loc */ LocationFrom(ip));
+ MaybeGenerateMarkingRegisterCheck(/* code= */ 21, /* temp_loc= */ LocationFrom(ip));
}
void CodeGeneratorARMVIXL::MaybeGenerateMarkingRegisterCheck(int code, Location temp_loc) {
@@ -8650,7 +8655,7 @@ void CodeGeneratorARMVIXL::GenerateReadBarrierForRootSlow(HInstruction* instruct
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARMVIXL::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
+ ArtMethod* method ATTRIBUTE_UNUSED) {
return desired_dispatch_info;
}
@@ -8810,12 +8815,12 @@ void CodeGeneratorARMVIXL::GenerateVirtualCall(
CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewBootImageIntrinsicPatch(
uint32_t intrinsic_data) {
- return NewPcRelativePatch(/* dex_file */ nullptr, intrinsic_data, &boot_image_intrinsic_patches_);
+ return NewPcRelativePatch(/* dex_file= */ nullptr, intrinsic_data, &boot_image_intrinsic_patches_);
}
CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewBootImageRelRoPatch(
uint32_t boot_image_offset) {
- return NewPcRelativePatch(/* dex_file */ nullptr,
+ return NewPcRelativePatch(/* dex_file= */ nullptr,
boot_image_offset,
&boot_image_method_patches_);
}
@@ -8886,7 +8891,7 @@ VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitStringLiteral(
return jit_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
[this]() {
- return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u);
+ return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ 0u);
});
}
@@ -8897,7 +8902,7 @@ VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitClassLiteral(const DexFil
return jit_class_patches_.GetOrCreate(
TypeReference(&dex_file, type_index),
[this]() {
- return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u);
+ return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ 0u);
});
}
@@ -8907,11 +8912,11 @@ void CodeGeneratorARMVIXL::LoadBootImageAddress(vixl32::Register reg,
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
NewBootImageIntrinsicPatch(boot_image_reference);
EmitMovwMovtPlaceholder(labels, reg);
- } else if (Runtime::Current()->IsAotCompiler()) {
+ } else if (GetCompilerOptions().GetCompilePic()) {
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
NewBootImageRelRoPatch(boot_image_reference);
EmitMovwMovtPlaceholder(labels, reg);
- __ Ldr(reg, MemOperand(reg, /* offset */ 0));
+ __ Ldr(reg, MemOperand(reg, /* offset= */ 0));
} else {
DCHECK(Runtime::Current()->UseJitCompilation());
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -9056,7 +9061,7 @@ VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateUint32Literal(
return map->GetOrCreate(
value,
[this, value]() {
- return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ value);
+ return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ value);
});
}
@@ -9283,9 +9288,9 @@ void CodeGeneratorARMVIXL::EmitMovwMovtPlaceholder(
CodeBufferCheckScope::kMaximumSize);
// TODO(VIXL): Think about using mov instead of movw.
__ bind(&labels->movw_label);
- __ movw(out, /* placeholder */ 0u);
+ __ movw(out, /* operand= */ 0u);
__ bind(&labels->movt_label);
- __ movt(out, /* placeholder */ 0u);
+ __ movt(out, /* operand= */ 0u);
__ bind(&labels->add_pc_label);
__ add(out, out, pc);
}
@@ -9308,7 +9313,7 @@ static void EmitGrayCheckAndFastPath(ArmVIXLAssembler& assembler,
static_assert(ReadBarrier::NonGrayState() == 0, "Expecting non-gray to have value 0");
static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
__ Tst(ip, Operand(LockWord::kReadBarrierStateMaskShifted));
- __ B(ne, slow_path, /* is_far_target */ false);
+ __ B(ne, slow_path, /* is_far_target= */ false);
// To throw NPE, we return to the fast path; the artificial dependence below does not matter.
if (throw_npe != nullptr) {
__ Bind(throw_npe);
@@ -9355,7 +9360,7 @@ void CodeGeneratorARMVIXL::CompileBakerReadBarrierThunk(ArmVIXLAssembler& assemb
vixl32::Label* throw_npe = nullptr;
if (GetCompilerOptions().GetImplicitNullChecks() && holder_reg.Is(base_reg)) {
throw_npe = &throw_npe_label;
- __ CompareAndBranchIfZero(holder_reg, throw_npe, /* is_far_target */ false);
+ __ CompareAndBranchIfZero(holder_reg, throw_npe, /* is_far_target= */ false);
}
// Check if the holder is gray and, if not, add fake dependency to the base register
// and return to the LDR instruction to load the reference. Otherwise, use introspection
@@ -9432,7 +9437,7 @@ void CodeGeneratorARMVIXL::CompileBakerReadBarrierThunk(ArmVIXLAssembler& assemb
UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
temps.Exclude(ip);
vixl32::Label return_label, not_marked, forwarding_address;
- __ CompareAndBranchIfZero(root_reg, &return_label, /* is_far_target */ false);
+ __ CompareAndBranchIfZero(root_reg, &return_label, /* is_far_target= */ false);
MemOperand lock_word(root_reg, mirror::Object::MonitorOffset().Int32Value());
__ Ldr(ip, lock_word);
__ Tst(ip, LockWord::kMarkBitStateMaskShifted);
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 33502d4f68..5edca87147 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -547,7 +547,7 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) override;
+ ArtMethod* method) override;
void GenerateStaticOrDirectCall(
HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index d74a7a760f..f7f37db26a 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -147,7 +147,7 @@ Location InvokeDexCallingConventionVisitorMIPS::GetNextLocation(DataType::Type t
case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected parameter type " << type;
- break;
+ UNREACHABLE();
}
// Space on the stack is reserved for all arguments.
@@ -587,7 +587,7 @@ class ReadBarrierMarkSlowPathMIPS : public SlowPathCodeMIPS {
mips_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset,
instruction_,
this,
- /* direct */ false);
+ /* direct= */ false);
}
__ B(GetExitLabel());
}
@@ -681,7 +681,7 @@ class ReadBarrierMarkAndUpdateFieldSlowPathMIPS : public SlowPathCodeMIPS {
mips_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset,
instruction_,
this,
- /* direct */ false);
+ /* direct= */ false);
// If the new reference is different from the old reference,
// update the field in the holder (`*(obj_ + field_offset_)`).
@@ -1167,9 +1167,9 @@ void ParallelMoveResolverMIPS::EmitSwap(size_t index) {
__ Move(r2_l, TMP);
__ Move(r2_h, AT);
} else if (loc1.IsStackSlot() && loc2.IsStackSlot()) {
- Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot */ false);
+ Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot= */ false);
} else if (loc1.IsDoubleStackSlot() && loc2.IsDoubleStackSlot()) {
- Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot */ true);
+ Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot= */ true);
} else if (loc1.IsSIMDStackSlot() && loc2.IsSIMDStackSlot()) {
ExchangeQuadSlots(loc1.GetStackIndex(), loc2.GetStackIndex());
} else if ((loc1.IsRegister() && loc2.IsStackSlot()) ||
@@ -1654,14 +1654,14 @@ CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewBootImageIntrinsic
uint32_t intrinsic_data,
const PcRelativePatchInfo* info_high) {
return NewPcRelativePatch(
- /* dex_file */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_);
+ /* dex_file= */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_);
}
CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewBootImageRelRoPatch(
uint32_t boot_image_offset,
const PcRelativePatchInfo* info_high) {
return NewPcRelativePatch(
- /* dex_file */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_);
+ /* dex_file= */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_);
}
CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewBootImageMethodPatch(
@@ -1737,7 +1737,7 @@ void CodeGeneratorMIPS::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo
__ Bind(&info_high->label);
__ Bind(&info_high->pc_rel_label);
// Add the high half of a 32-bit offset to PC.
- __ Auipc(out, /* placeholder */ 0x1234);
+ __ Auipc(out, /* imm16= */ 0x1234);
__ SetReorder(reordering);
} else {
// If base is ZERO, emit NAL to obtain the actual base.
@@ -1746,7 +1746,7 @@ void CodeGeneratorMIPS::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo
__ Nal();
}
__ Bind(&info_high->label);
- __ Lui(out, /* placeholder */ 0x1234);
+ __ Lui(out, /* imm16= */ 0x1234);
// If we emitted the NAL, bind the pc_rel_label, otherwise base is a register holding
// the HMipsComputeBaseMethodAddress which has its own label stored in MipsAssembler.
if (base == ZERO) {
@@ -1764,13 +1764,13 @@ void CodeGeneratorMIPS::LoadBootImageAddress(Register reg, uint32_t boot_image_r
if (GetCompilerOptions().IsBootImage()) {
PcRelativePatchInfo* info_high = NewBootImageIntrinsicPatch(boot_image_reference);
PcRelativePatchInfo* info_low = NewBootImageIntrinsicPatch(boot_image_reference, info_high);
- EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, /* base */ ZERO);
- __ Addiu(reg, TMP, /* placeholder */ 0x5678, &info_low->label);
- } else if (Runtime::Current()->IsAotCompiler()) {
+ EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, /* base= */ ZERO);
+ __ Addiu(reg, TMP, /* imm16= */ 0x5678, &info_low->label);
+ } else if (GetCompilerOptions().GetCompilePic()) {
PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_reference);
PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_reference, info_high);
- EmitPcRelativeAddressPlaceholderHigh(info_high, reg, /* base */ ZERO);
- __ Lw(reg, reg, /* placeholder */ 0x5678, &info_low->label);
+ EmitPcRelativeAddressPlaceholderHigh(info_high, reg, /* base= */ ZERO);
+ __ Lw(reg, reg, /* imm16= */ 0x5678, &info_low->label);
} else {
DCHECK(Runtime::Current()->UseJitCompilation());
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -1793,8 +1793,8 @@ void CodeGeneratorMIPS::AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invo
PcRelativePatchInfo* info_high = NewBootImageTypePatch(*target_method.dex_file, type_idx);
PcRelativePatchInfo* info_low =
NewBootImageTypePatch(*target_method.dex_file, type_idx, info_high);
- EmitPcRelativeAddressPlaceholderHigh(info_high, argument, /* base */ ZERO);
- __ Addiu(argument, argument, /* placeholder */ 0x5678, &info_low->label);
+ EmitPcRelativeAddressPlaceholderHigh(info_high, argument, /* base= */ ZERO);
+ __ Addiu(argument, argument, /* imm16= */ 0x5678, &info_low->label);
} else {
LoadBootImageAddress(argument, boot_image_offset);
}
@@ -2579,7 +2579,7 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
__ Or(dst_high, dst_high, TMP);
__ Andi(TMP, rhs_reg, kMipsBitsPerWord);
if (isR6) {
- __ Beqzc(TMP, &done, /* is_bare */ true);
+ __ Beqzc(TMP, &done, /* is_bare= */ true);
__ Move(dst_high, dst_low);
__ Move(dst_low, ZERO);
} else {
@@ -2595,7 +2595,7 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
__ Or(dst_low, dst_low, TMP);
__ Andi(TMP, rhs_reg, kMipsBitsPerWord);
if (isR6) {
- __ Beqzc(TMP, &done, /* is_bare */ true);
+ __ Beqzc(TMP, &done, /* is_bare= */ true);
__ Move(dst_low, dst_high);
__ Sra(dst_high, dst_high, 31);
} else {
@@ -2612,7 +2612,7 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
__ Or(dst_low, dst_low, TMP);
__ Andi(TMP, rhs_reg, kMipsBitsPerWord);
if (isR6) {
- __ Beqzc(TMP, &done, /* is_bare */ true);
+ __ Beqzc(TMP, &done, /* is_bare= */ true);
__ Move(dst_low, dst_high);
__ Move(dst_high, ZERO);
} else {
@@ -2631,7 +2631,7 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
__ Or(dst_high, dst_high, TMP);
__ Andi(TMP, rhs_reg, kMipsBitsPerWord);
if (isR6) {
- __ Beqzc(TMP, &done, /* is_bare */ true);
+ __ Beqzc(TMP, &done, /* is_bare= */ true);
__ Move(TMP, dst_high);
__ Move(dst_high, dst_low);
__ Move(dst_low, TMP);
@@ -2862,7 +2862,7 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
obj,
offset,
temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
} else {
codegen_->GenerateArrayLoadWithBakerReadBarrier(instruction,
out_loc,
@@ -2870,7 +2870,7 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
data_offset,
index,
temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
}
} else {
Register out = out_loc.AsRegister<Register>();
@@ -4104,7 +4104,7 @@ void InstructionCodeGeneratorMIPS::GenerateDivRemWithAnyConstant(HBinaryOperatio
int64_t magic;
int shift;
- CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
+ CalculateMagicAndShiftForDivRem(imm, false /* is_long= */, &magic, &shift);
bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
@@ -5948,7 +5948,7 @@ void InstructionCodeGeneratorMIPS::VisitIf(HIf* if_instr) {
nullptr : codegen_->GetLabelOf(true_successor);
MipsLabel* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
nullptr : codegen_->GetLabelOf(false_successor);
- GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+ GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
}
void LocationsBuilderMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -5967,9 +5967,9 @@ void InstructionCodeGeneratorMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
SlowPathCodeMIPS* slow_path =
deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathMIPS>(deoptimize);
GenerateTestAndBranch(deoptimize,
- /* condition_input_index */ 0,
+ /* condition_input_index= */ 0,
slow_path->GetEntryLabel(),
- /* false_target */ nullptr);
+ /* false_target= */ nullptr);
}
// This function returns true if a conditional move can be generated for HSelect.
@@ -5983,7 +5983,7 @@ void InstructionCodeGeneratorMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
// of common logic.
static bool CanMoveConditionally(HSelect* select, bool is_r6, LocationSummary* locations_to_set) {
bool materialized = IsBooleanValueOrMaterializedCondition(select->GetCondition());
- HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+ HInstruction* cond = select->InputAt(/* i= */ 2);
HCondition* condition = cond->AsCondition();
DataType::Type cond_type =
@@ -6216,7 +6216,7 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR2(HSelect* select) {
Location src = locations->InAt(1);
Register src_reg = ZERO;
Register src_reg_high = ZERO;
- HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+ HInstruction* cond = select->InputAt(/* i= */ 2);
Register cond_reg = TMP;
int cond_cc = 0;
DataType::Type cond_type = DataType::Type::kInt32;
@@ -6224,7 +6224,7 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR2(HSelect* select) {
DataType::Type dst_type = select->GetType();
if (IsBooleanValueOrMaterializedCondition(cond)) {
- cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<Register>();
+ cond_reg = locations->InAt(/* at= */ 2).AsRegister<Register>();
} else {
HCondition* condition = cond->AsCondition();
LocationSummary* cond_locations = cond->GetLocations();
@@ -6337,7 +6337,7 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR6(HSelect* select) {
Location dst = locations->Out();
Location false_src = locations->InAt(0);
Location true_src = locations->InAt(1);
- HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+ HInstruction* cond = select->InputAt(/* i= */ 2);
Register cond_reg = TMP;
FRegister fcond_reg = FTMP;
DataType::Type cond_type = DataType::Type::kInt32;
@@ -6345,7 +6345,7 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR6(HSelect* select) {
DataType::Type dst_type = select->GetType();
if (IsBooleanValueOrMaterializedCondition(cond)) {
- cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<Register>();
+ cond_reg = locations->InAt(/* at= */ 2).AsRegister<Register>();
} else {
HCondition* condition = cond->AsCondition();
LocationSummary* cond_locations = cond->GetLocations();
@@ -6526,7 +6526,7 @@ void LocationsBuilderMIPS::VisitSelect(HSelect* select) {
void InstructionCodeGeneratorMIPS::VisitSelect(HSelect* select) {
bool is_r6 = codegen_->GetInstructionSetFeatures().IsR6();
- if (CanMoveConditionally(select, is_r6, /* locations_to_set */ nullptr)) {
+ if (CanMoveConditionally(select, is_r6, /* locations_to_set= */ nullptr)) {
if (is_r6) {
GenConditionalMoveR6(select);
} else {
@@ -6536,8 +6536,8 @@ void InstructionCodeGeneratorMIPS::VisitSelect(HSelect* select) {
LocationSummary* locations = select->GetLocations();
MipsLabel false_target;
GenerateTestAndBranch(select,
- /* condition_input_index */ 2,
- /* true_target */ nullptr,
+ /* condition_input_index= */ 2,
+ /* true_target= */ nullptr,
&false_target);
codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
__ Bind(&false_target);
@@ -6696,7 +6696,7 @@ void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction,
obj,
offset,
temp_loc,
- /* needs_null_check */ true);
+ /* needs_null_check= */ true);
if (is_volatile) {
GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
}
@@ -6929,7 +6929,7 @@ void InstructionCodeGeneratorMIPS::GenerateReferenceLoadOneRegister(
out_reg,
offset,
maybe_temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// Save the value of `out` into `maybe_temp` before overwriting it
@@ -6970,7 +6970,7 @@ void InstructionCodeGeneratorMIPS::GenerateReferenceLoadTwoRegisters(
obj_reg,
offset,
maybe_temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
@@ -7061,7 +7061,7 @@ void InstructionCodeGeneratorMIPS::GenerateGcRootFieldLoad(HInstruction* instruc
__ AddUpper(base, obj, offset_high);
}
MipsLabel skip_call;
- __ Beqz(T9, &skip_call, /* is_bare */ true);
+ __ Beqz(T9, &skip_call, /* is_bare= */ true);
if (label_low != nullptr) {
DCHECK(short_offset);
__ Bind(label_low);
@@ -7216,11 +7216,11 @@ void CodeGeneratorMIPS::GenerateFieldLoadWithBakerReadBarrier(HInstruction* inst
MipsLabel skip_call;
if (short_offset) {
if (isR6) {
- __ Beqzc(T9, &skip_call, /* is_bare */ true);
+ __ Beqzc(T9, &skip_call, /* is_bare= */ true);
__ Nop(); // In forbidden slot.
__ Jialc(T9, thunk_disp);
} else {
- __ Beqz(T9, &skip_call, /* is_bare */ true);
+ __ Beqz(T9, &skip_call, /* is_bare= */ true);
__ Addiu(T9, T9, thunk_disp); // In delay slot.
__ Jalr(T9);
__ Nop(); // In delay slot.
@@ -7228,13 +7228,13 @@ void CodeGeneratorMIPS::GenerateFieldLoadWithBakerReadBarrier(HInstruction* inst
__ Bind(&skip_call);
} else {
if (isR6) {
- __ Beqz(T9, &skip_call, /* is_bare */ true);
+ __ Beqz(T9, &skip_call, /* is_bare= */ true);
__ Aui(base, obj, offset_high); // In delay slot.
__ Jialc(T9, thunk_disp);
__ Bind(&skip_call);
} else {
__ Lui(base, offset_high);
- __ Beqz(T9, &skip_call, /* is_bare */ true);
+ __ Beqz(T9, &skip_call, /* is_bare= */ true);
__ Addiu(T9, T9, thunk_disp); // In delay slot.
__ Jalr(T9);
__ Bind(&skip_call);
@@ -7311,7 +7311,7 @@ void CodeGeneratorMIPS::GenerateArrayLoadWithBakerReadBarrier(HInstruction* inst
// We will not do the explicit null check in the thunk as some form of a null check
// must've been done earlier.
DCHECK(!needs_null_check);
- const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset */ false);
+ const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset= */ false);
// Loading the entrypoint does not require a load acquire since it is only changed when
// threads are suspended or running a checkpoint.
__ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset);
@@ -7321,13 +7321,13 @@ void CodeGeneratorMIPS::GenerateArrayLoadWithBakerReadBarrier(HInstruction* inst
: index.AsRegister<Register>();
MipsLabel skip_call;
if (GetInstructionSetFeatures().IsR6()) {
- __ Beqz(T9, &skip_call, /* is_bare */ true);
+ __ Beqz(T9, &skip_call, /* is_bare= */ true);
__ Lsa(TMP, index_reg, obj, scale_factor); // In delay slot.
__ Jialc(T9, thunk_disp);
__ Bind(&skip_call);
} else {
__ Sll(TMP, index_reg, scale_factor);
- __ Beqz(T9, &skip_call, /* is_bare */ true);
+ __ Beqz(T9, &skip_call, /* is_bare= */ true);
__ Addiu(T9, T9, thunk_disp); // In delay slot.
__ Jalr(T9);
__ Bind(&skip_call);
@@ -7442,7 +7442,7 @@ void CodeGeneratorMIPS::GenerateReferenceLoadWithBakerReadBarrier(HInstruction*
ReadBarrierMarkAndUpdateFieldSlowPathMIPS(instruction,
ref,
obj,
- /* field_offset */ index,
+ /* field_offset= */ index,
temp_reg);
} else {
slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS(instruction, ref);
@@ -7705,7 +7705,7 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
kWithoutReadBarrier);
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ Bne(out, cls.AsRegister<Register>(), slow_path->GetEntryLabel());
__ LoadConst32(out, 1);
@@ -7734,7 +7734,7 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
break;
@@ -7964,7 +7964,7 @@ Register CodeGeneratorMIPS::GetInvokeStaticOrDirectExtraParameter(HInvokeStaticO
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
+ ArtMethod* method ATTRIBUTE_UNUSED) {
return desired_dispatch_info;
}
@@ -8001,7 +8001,7 @@ void CodeGeneratorMIPS::GenerateStaticOrDirectCall(
NewBootImageMethodPatch(invoke->GetTargetMethod(), info_high);
Register temp_reg = temp.AsRegister<Register>();
EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base_reg);
- __ Addiu(temp_reg, TMP, /* placeholder */ 0x5678, &info_low->label);
+ __ Addiu(temp_reg, TMP, /* imm16= */ 0x5678, &info_low->label);
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
@@ -8010,7 +8010,7 @@ void CodeGeneratorMIPS::GenerateStaticOrDirectCall(
PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_offset, info_high);
Register temp_reg = temp.AsRegister<Register>();
EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base_reg);
- __ Lw(temp_reg, TMP, /* placeholder */ 0x5678, &info_low->label);
+ __ Lw(temp_reg, TMP, /* imm16= */ 0x5678, &info_low->label);
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
@@ -8020,7 +8020,7 @@ void CodeGeneratorMIPS::GenerateStaticOrDirectCall(
MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()), info_high);
Register temp_reg = temp.AsRegister<Register>();
EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base_reg);
- __ Lw(temp_reg, TMP, /* placeholder */ 0x5678, &info_low->label);
+ __ Lw(temp_reg, TMP, /* imm16= */ 0x5678, &info_low->label);
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress:
@@ -8226,7 +8226,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
out,
base_or_current_method_reg);
- __ Addiu(out, out, /* placeholder */ 0x5678, &info_low->label);
+ __ Addiu(out, out, /* imm16= */ 0x5678, &info_low->label);
break;
}
case HLoadClass::LoadKind::kBootImageRelRo: {
@@ -8239,7 +8239,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
out,
base_or_current_method_reg);
- __ Lw(out, out, /* placeholder */ 0x5678, &info_low->label);
+ __ Lw(out, out, /* imm16= */ 0x5678, &info_low->label);
break;
}
case HLoadClass::LoadKind::kBssEntry: {
@@ -8253,7 +8253,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
GenerateGcRootFieldLoad(cls,
out_loc,
out,
- /* placeholder */ 0x5678,
+ /* offset= */ 0x5678,
read_barrier_option,
&info_low->label);
generate_null_check = true;
@@ -8278,12 +8278,12 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
cls->GetClass());
bool reordering = __ SetReorder(false);
__ Bind(&info->high_label);
- __ Lui(out, /* placeholder */ 0x1234);
+ __ Lui(out, /* imm16= */ 0x1234);
__ SetReorder(reordering);
GenerateGcRootFieldLoad(cls,
out_loc,
out,
- /* placeholder */ 0x5678,
+ /* offset= */ 0x5678,
read_barrier_option,
&info->low_label);
break;
@@ -8432,7 +8432,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
out,
base_or_current_method_reg);
- __ Addiu(out, out, /* placeholder */ 0x5678, &info_low->label);
+ __ Addiu(out, out, /* imm16= */ 0x5678, &info_low->label);
return;
}
case HLoadString::LoadKind::kBootImageRelRo: {
@@ -8445,7 +8445,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
out,
base_or_current_method_reg);
- __ Lw(out, out, /* placeholder */ 0x5678, &info_low->label);
+ __ Lw(out, out, /* imm16= */ 0x5678, &info_low->label);
return;
}
case HLoadString::LoadKind::kBssEntry: {
@@ -8460,7 +8460,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
GenerateGcRootFieldLoad(load,
out_loc,
out,
- /* placeholder */ 0x5678,
+ /* offset= */ 0x5678,
kCompilerReadBarrierOption,
&info_low->label);
SlowPathCodeMIPS* slow_path =
@@ -8489,12 +8489,12 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
load->GetString());
bool reordering = __ SetReorder(false);
__ Bind(&info->high_label);
- __ Lui(out, /* placeholder */ 0x1234);
+ __ Lui(out, /* imm16= */ 0x1234);
__ SetReorder(reordering);
GenerateGcRootFieldLoad(load,
out_loc,
out,
- /* placeholder */ 0x5678,
+ /* offset= */ 0x5678,
kCompilerReadBarrierOption,
&info->low_label);
return;
@@ -8702,10 +8702,8 @@ void LocationsBuilderMIPS::VisitNewArray(HNewArray* instruction) {
}
void InstructionCodeGeneratorMIPS::VisitNewArray(HNewArray* instruction) {
- // Note: if heap poisoning is enabled, the entry point takes care
- // of poisoning the reference.
- QuickEntrypointEnum entrypoint =
- CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+ QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index bf9589331b..50807310b6 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -563,7 +563,7 @@ class CodeGeneratorMIPS : public CodeGenerator {
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) override;
+ ArtMethod* method) override;
void GenerateStaticOrDirectCall(
HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 7c89808d54..8b6328f097 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -953,7 +953,7 @@ CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
: CodeGenerator(graph,
kNumberOfGpuRegisters,
kNumberOfFpuRegisters,
- /* number_of_register_pairs */ 0,
+ /* number_of_register_pairs= */ 0,
ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
arraysize(kCoreCalleeSaves)),
ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
@@ -1581,14 +1581,14 @@ CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewBootImageIntri
uint32_t intrinsic_data,
const PcRelativePatchInfo* info_high) {
return NewPcRelativePatch(
- /* dex_file */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_);
+ /* dex_file= */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_);
}
CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewBootImageRelRoPatch(
uint32_t boot_image_offset,
const PcRelativePatchInfo* info_high) {
return NewPcRelativePatch(
- /* dex_file */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_);
+ /* dex_file= */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_);
}
CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewBootImageMethodPatch(
@@ -1665,7 +1665,7 @@ void CodeGeneratorMIPS64::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchIn
DCHECK(!info_high->patch_info_high);
__ Bind(&info_high->label);
// Add the high half of a 32-bit offset to PC.
- __ Auipc(out, /* placeholder */ 0x1234);
+ __ Auipc(out, /* imm16= */ 0x1234);
// A following instruction will add the sign-extended low half of the 32-bit
// offset to `out` (e.g. ld, jialc, daddiu).
if (info_low != nullptr) {
@@ -1679,13 +1679,13 @@ void CodeGeneratorMIPS64::LoadBootImageAddress(GpuRegister reg, uint32_t boot_im
PcRelativePatchInfo* info_high = NewBootImageIntrinsicPatch(boot_image_reference);
PcRelativePatchInfo* info_low = NewBootImageIntrinsicPatch(boot_image_reference, info_high);
EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
- __ Daddiu(reg, AT, /* placeholder */ 0x5678);
- } else if (Runtime::Current()->IsAotCompiler()) {
+ __ Daddiu(reg, AT, /* imm16= */ 0x5678);
+ } else if (GetCompilerOptions().GetCompilePic()) {
PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_reference);
PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_reference, info_high);
EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
// Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
- __ Lwu(reg, AT, /* placeholder */ 0x5678);
+ __ Lwu(reg, AT, /* imm16= */ 0x5678);
} else {
DCHECK(Runtime::Current()->UseJitCompilation());
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -1710,7 +1710,7 @@ void CodeGeneratorMIPS64::AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* in
PcRelativePatchInfo* info_low =
NewBootImageTypePatch(*target_method.dex_file, type_idx, info_high);
EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
- __ Daddiu(argument, AT, /* placeholder */ 0x5678);
+ __ Daddiu(argument, AT, /* imm16= */ 0x5678);
} else {
LoadBootImageAddress(argument, boot_image_offset);
}
@@ -1724,7 +1724,7 @@ Literal* CodeGeneratorMIPS64::DeduplicateJitStringLiteral(const DexFile& dex_fil
ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
return jit_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
- [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
+ [this]() { return __ NewLiteral<uint32_t>(/* value= */ 0u); });
}
Literal* CodeGeneratorMIPS64::DeduplicateJitClassLiteral(const DexFile& dex_file,
@@ -1733,7 +1733,7 @@ Literal* CodeGeneratorMIPS64::DeduplicateJitClassLiteral(const DexFile& dex_file
ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
return jit_class_patches_.GetOrCreate(
TypeReference(&dex_file, type_index),
- [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
+ [this]() { return __ NewLiteral<uint32_t>(/* value= */ 0u); });
}
void CodeGeneratorMIPS64::PatchJitRootUse(uint8_t* code,
@@ -2458,7 +2458,7 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
obj,
offset,
temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
} else {
codegen_->GenerateArrayLoadWithBakerReadBarrier(instruction,
out_loc,
@@ -2466,7 +2466,7 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
data_offset,
index,
temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
}
} else {
GpuRegister out = out_loc.AsRegister<GpuRegister>();
@@ -3337,10 +3337,10 @@ void InstructionCodeGeneratorMIPS64::HandleCondition(HCondition* instruction) {
switch (type) {
default:
// Integer case.
- GenerateIntLongCompare(instruction->GetCondition(), /* is64bit */ false, locations);
+ GenerateIntLongCompare(instruction->GetCondition(), /* is64bit= */ false, locations);
return;
case DataType::Type::kInt64:
- GenerateIntLongCompare(instruction->GetCondition(), /* is64bit */ true, locations);
+ GenerateIntLongCompare(instruction->GetCondition(), /* is64bit= */ true, locations);
return;
case DataType::Type::kFloat32:
case DataType::Type::kFloat64:
@@ -3642,7 +3642,7 @@ void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instructio
if (!DataType::IsIntegralType(type)) {
LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
- return;
+ UNREACHABLE();
}
if (value.IsConstant()) {
@@ -4449,10 +4449,10 @@ void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruc
switch (type) {
default:
- GenerateIntLongCompareAndBranch(if_cond, /* is64bit */ false, locations, branch_target);
+ GenerateIntLongCompareAndBranch(if_cond, /* is64bit= */ false, locations, branch_target);
break;
case DataType::Type::kInt64:
- GenerateIntLongCompareAndBranch(if_cond, /* is64bit */ true, locations, branch_target);
+ GenerateIntLongCompareAndBranch(if_cond, /* is64bit= */ true, locations, branch_target);
break;
case DataType::Type::kFloat32:
case DataType::Type::kFloat64:
@@ -4482,7 +4482,7 @@ void InstructionCodeGeneratorMIPS64::VisitIf(HIf* if_instr) {
nullptr : codegen_->GetLabelOf(true_successor);
Mips64Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
nullptr : codegen_->GetLabelOf(false_successor);
- GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+ GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
}
void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -4501,9 +4501,9 @@ void InstructionCodeGeneratorMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
SlowPathCodeMIPS64* slow_path =
deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathMIPS64>(deoptimize);
GenerateTestAndBranch(deoptimize,
- /* condition_input_index */ 0,
+ /* condition_input_index= */ 0,
slow_path->GetEntryLabel(),
- /* false_target */ nullptr);
+ /* false_target= */ nullptr);
}
// This function returns true if a conditional move can be generated for HSelect.
@@ -4517,7 +4517,7 @@ void InstructionCodeGeneratorMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
// of common logic.
static bool CanMoveConditionally(HSelect* select, LocationSummary* locations_to_set) {
bool materialized = IsBooleanValueOrMaterializedCondition(select->GetCondition());
- HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+ HInstruction* cond = select->InputAt(/* i= */ 2);
HCondition* condition = cond->AsCondition();
DataType::Type cond_type =
@@ -4660,7 +4660,7 @@ void InstructionCodeGeneratorMIPS64::GenConditionalMove(HSelect* select) {
Location dst = locations->Out();
Location false_src = locations->InAt(0);
Location true_src = locations->InAt(1);
- HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+ HInstruction* cond = select->InputAt(/* i= */ 2);
GpuRegister cond_reg = TMP;
FpuRegister fcond_reg = FTMP;
DataType::Type cond_type = DataType::Type::kInt32;
@@ -4668,7 +4668,7 @@ void InstructionCodeGeneratorMIPS64::GenConditionalMove(HSelect* select) {
DataType::Type dst_type = select->GetType();
if (IsBooleanValueOrMaterializedCondition(cond)) {
- cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<GpuRegister>();
+ cond_reg = locations->InAt(/* at= */ 2).AsRegister<GpuRegister>();
} else {
HCondition* condition = cond->AsCondition();
LocationSummary* cond_locations = cond->GetLocations();
@@ -4677,13 +4677,13 @@ void InstructionCodeGeneratorMIPS64::GenConditionalMove(HSelect* select) {
switch (cond_type) {
default:
cond_inverted = MaterializeIntLongCompare(if_cond,
- /* is64bit */ false,
+ /* is64bit= */ false,
cond_locations,
cond_reg);
break;
case DataType::Type::kInt64:
cond_inverted = MaterializeIntLongCompare(if_cond,
- /* is64bit */ true,
+ /* is64bit= */ true,
cond_locations,
cond_reg);
break;
@@ -4826,14 +4826,14 @@ void LocationsBuilderMIPS64::VisitSelect(HSelect* select) {
}
void InstructionCodeGeneratorMIPS64::VisitSelect(HSelect* select) {
- if (CanMoveConditionally(select, /* locations_to_set */ nullptr)) {
+ if (CanMoveConditionally(select, /* locations_to_set= */ nullptr)) {
GenConditionalMove(select);
} else {
LocationSummary* locations = select->GetLocations();
Mips64Label false_target;
GenerateTestAndBranch(select,
- /* condition_input_index */ 2,
- /* true_target */ nullptr,
+ /* condition_input_index= */ 2,
+ /* true_target= */ nullptr,
&false_target);
codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
__ Bind(&false_target);
@@ -4945,7 +4945,7 @@ void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction,
obj,
offset,
temp_loc,
- /* needs_null_check */ true);
+ /* needs_null_check= */ true);
if (is_volatile) {
GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
}
@@ -5101,7 +5101,7 @@ void InstructionCodeGeneratorMIPS64::GenerateReferenceLoadOneRegister(
out_reg,
offset,
maybe_temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// Save the value of `out` into `maybe_temp` before overwriting it
@@ -5142,7 +5142,7 @@ void InstructionCodeGeneratorMIPS64::GenerateReferenceLoadTwoRegisters(
obj_reg,
offset,
maybe_temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
@@ -5230,7 +5230,7 @@ void InstructionCodeGeneratorMIPS64::GenerateGcRootFieldLoad(HInstruction* instr
__ Daui(base, obj, offset_high);
}
Mips64Label skip_call;
- __ Beqz(T9, &skip_call, /* is_bare */ true);
+ __ Beqz(T9, &skip_call, /* is_bare= */ true);
if (label_low != nullptr) {
DCHECK(short_offset);
__ Bind(label_low);
@@ -5360,7 +5360,7 @@ void CodeGeneratorMIPS64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* in
GpuRegister ref_reg = ref.AsRegister<GpuRegister>();
Mips64Label skip_call;
if (short_offset) {
- __ Beqzc(T9, &skip_call, /* is_bare */ true);
+ __ Beqzc(T9, &skip_call, /* is_bare= */ true);
__ Nop(); // In forbidden slot.
__ Jialc(T9, thunk_disp);
__ Bind(&skip_call);
@@ -5369,7 +5369,7 @@ void CodeGeneratorMIPS64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* in
} else {
int16_t offset_low = Low16Bits(offset);
int16_t offset_high = High16Bits(offset - offset_low); // Accounts for sign extension in lwu.
- __ Beqz(T9, &skip_call, /* is_bare */ true);
+ __ Beqz(T9, &skip_call, /* is_bare= */ true);
__ Daui(TMP, obj, offset_high); // In delay slot.
__ Jialc(T9, thunk_disp);
__ Bind(&skip_call);
@@ -5442,12 +5442,12 @@ void CodeGeneratorMIPS64::GenerateArrayLoadWithBakerReadBarrier(HInstruction* in
// We will not do the explicit null check in the thunk as some form of a null check
// must've been done earlier.
DCHECK(!needs_null_check);
- const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset */ false);
+ const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset= */ false);
// Loading the entrypoint does not require a load acquire since it is only changed when
// threads are suspended or running a checkpoint.
__ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
Mips64Label skip_call;
- __ Beqz(T9, &skip_call, /* is_bare */ true);
+ __ Beqz(T9, &skip_call, /* is_bare= */ true);
GpuRegister ref_reg = ref.AsRegister<GpuRegister>();
GpuRegister index_reg = index.AsRegister<GpuRegister>();
__ Dlsa(TMP, index_reg, obj, scale_factor); // In delay slot.
@@ -5558,7 +5558,7 @@ void CodeGeneratorMIPS64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction
ReadBarrierMarkAndUpdateFieldSlowPathMIPS64(instruction,
ref,
obj,
- /* field_offset */ index,
+ /* field_offset= */ index,
temp_reg);
} else {
slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS64(instruction, ref);
@@ -5821,7 +5821,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
kWithoutReadBarrier);
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ Bnec(out, cls.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
__ LoadConst32(out, 1);
@@ -5850,7 +5850,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ Bc(slow_path->GetEntryLabel());
break;
@@ -6059,7 +6059,7 @@ HLoadClass::LoadKind CodeGeneratorMIPS64::GetSupportedLoadClassKind(
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS64::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
+ ArtMethod* method ATTRIBUTE_UNUSED) {
// On MIPS64 we support all dispatch types.
return desired_dispatch_info;
}
@@ -6092,7 +6092,7 @@ void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(
CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
NewBootImageMethodPatch(invoke->GetTargetMethod(), info_high);
EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
- __ Daddiu(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
+ __ Daddiu(temp.AsRegister<GpuRegister>(), AT, /* imm16= */ 0x5678);
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
@@ -6101,7 +6101,7 @@ void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(
PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_offset, info_high);
EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
// Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
- __ Lwu(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
+ __ Lwu(temp.AsRegister<GpuRegister>(), AT, /* imm16= */ 0x5678);
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
@@ -6110,7 +6110,7 @@ void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(
PcRelativePatchInfo* info_low = NewMethodBssEntryPatch(
MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()), info_high);
EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
- __ Ld(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
+ __ Ld(temp.AsRegister<GpuRegister>(), AT, /* imm16= */ 0x5678);
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress:
@@ -6280,7 +6280,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
codegen_->NewBootImageTypePatch(cls->GetDexFile(), cls->GetTypeIndex(), info_high);
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
- __ Daddiu(out, AT, /* placeholder */ 0x5678);
+ __ Daddiu(out, AT, /* imm16= */ 0x5678);
break;
}
case HLoadClass::LoadKind::kBootImageRelRo: {
@@ -6291,7 +6291,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
codegen_->NewBootImageRelRoPatch(boot_image_offset, info_high);
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
- __ Lwu(out, AT, /* placeholder */ 0x5678);
+ __ Lwu(out, AT, /* imm16= */ 0x5678);
break;
}
case HLoadClass::LoadKind::kBssEntry: {
@@ -6303,7 +6303,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
GenerateGcRootFieldLoad(cls,
out_loc,
out,
- /* placeholder */ 0x5678,
+ /* offset= */ 0x5678,
read_barrier_option,
&info_low->label);
generate_null_check = true;
@@ -6427,7 +6427,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA
CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
codegen_->NewBootImageStringPatch(load->GetDexFile(), load->GetStringIndex(), info_high);
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
- __ Daddiu(out, AT, /* placeholder */ 0x5678);
+ __ Daddiu(out, AT, /* imm16= */ 0x5678);
return;
}
case HLoadString::LoadKind::kBootImageRelRo: {
@@ -6438,7 +6438,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA
CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
codegen_->NewBootImageRelRoPatch(boot_image_offset, info_high);
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
- __ Lwu(out, AT, /* placeholder */ 0x5678);
+ __ Lwu(out, AT, /* imm16= */ 0x5678);
return;
}
case HLoadString::LoadKind::kBssEntry: {
@@ -6451,7 +6451,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA
GenerateGcRootFieldLoad(load,
out_loc,
out,
- /* placeholder */ 0x5678,
+ /* offset= */ 0x5678,
kCompilerReadBarrierOption,
&info_low->label);
SlowPathCodeMIPS64* slow_path =
@@ -6633,10 +6633,8 @@ void LocationsBuilderMIPS64::VisitNewArray(HNewArray* instruction) {
}
void InstructionCodeGeneratorMIPS64::VisitNewArray(HNewArray* instruction) {
- // Note: if heap poisoning is enabled, the entry point takes care
- // of poisoning the reference.
- QuickEntrypointEnum entrypoint =
- CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+ QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index ddc154d40f..52f3a62f33 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -541,7 +541,7 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) override;
+ ArtMethod* method) override;
void GenerateStaticOrDirectCall(
HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
diff --git a/compiler/optimizing/code_generator_vector_arm64.cc b/compiler/optimizing/code_generator_vector_arm64.cc
index 43169ba7eb..5a18c1f72b 100644
--- a/compiler/optimizing/code_generator_vector_arm64.cc
+++ b/compiler/optimizing/code_generator_vector_arm64.cc
@@ -216,7 +216,7 @@ void InstructionCodeGeneratorARM64::VisitVecReduce(HVecReduce* instruction) {
switch (instruction->GetPackedType()) {
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- switch (instruction->GetKind()) {
+ switch (instruction->GetReductionKind()) {
case HVecReduce::kSum:
__ Addv(dst.S(), src.V4S());
break;
@@ -230,7 +230,7 @@ void InstructionCodeGeneratorARM64::VisitVecReduce(HVecReduce* instruction) {
break;
case DataType::Type::kInt64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- switch (instruction->GetKind()) {
+ switch (instruction->GetReductionKind()) {
case HVecReduce::kSum:
__ Addp(dst.D(), src.V2D());
break;
@@ -1277,6 +1277,74 @@ void InstructionCodeGeneratorARM64::VisitVecSADAccumulate(HVecSADAccumulate* ins
}
}
+void LocationsBuilderARM64::VisitVecDotProd(HVecDotProd* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
+ DCHECK(instruction->GetPackedType() == DataType::Type::kInt32);
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetInAt(2, Location::RequiresFpuRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+
+ // For Int8 and Uint8 we need a temp register.
+ if (DataType::Size(instruction->InputAt(1)->AsVecOperation()->GetPackedType()) == 1) {
+ locations->AddTemp(Location::RequiresFpuRegister());
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitVecDotProd(HVecDotProd* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ VRegister acc = VRegisterFrom(locations->InAt(0));
+ VRegister left = VRegisterFrom(locations->InAt(1));
+ VRegister right = VRegisterFrom(locations->InAt(2));
+ HVecOperation* a = instruction->InputAt(1)->AsVecOperation();
+ HVecOperation* b = instruction->InputAt(2)->AsVecOperation();
+ DCHECK_EQ(HVecOperation::ToSignedType(a->GetPackedType()),
+ HVecOperation::ToSignedType(b->GetPackedType()));
+ DCHECK_EQ(instruction->GetPackedType(), DataType::Type::kInt32);
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+
+ size_t inputs_data_size = DataType::Size(a->GetPackedType());
+ switch (inputs_data_size) {
+ case 1u: {
+ DCHECK_EQ(16u, a->GetVectorLength());
+ VRegister tmp = VRegisterFrom(locations->GetTemp(0));
+ if (instruction->IsZeroExtending()) {
+ // TODO: Use Armv8.4-A UDOT instruction when it is available.
+ __ Umull(tmp.V8H(), left.V8B(), right.V8B());
+ __ Uaddw(acc.V4S(), acc.V4S(), tmp.V4H());
+ __ Uaddw2(acc.V4S(), acc.V4S(), tmp.V8H());
+
+ __ Umull2(tmp.V8H(), left.V16B(), right.V16B());
+ __ Uaddw(acc.V4S(), acc.V4S(), tmp.V4H());
+ __ Uaddw2(acc.V4S(), acc.V4S(), tmp.V8H());
+ } else {
+ // TODO: Use Armv8.4-A SDOT instruction when it is available.
+ __ Smull(tmp.V8H(), left.V8B(), right.V8B());
+ __ Saddw(acc.V4S(), acc.V4S(), tmp.V4H());
+ __ Saddw2(acc.V4S(), acc.V4S(), tmp.V8H());
+
+ __ Smull2(tmp.V8H(), left.V16B(), right.V16B());
+ __ Saddw(acc.V4S(), acc.V4S(), tmp.V4H());
+ __ Saddw2(acc.V4S(), acc.V4S(), tmp.V8H());
+ }
+ break;
+ }
+ case 2u:
+ DCHECK_EQ(8u, a->GetVectorLength());
+ if (instruction->IsZeroExtending()) {
+ __ Umlal(acc.V4S(), left.V4H(), right.V4H());
+ __ Umlal2(acc.V4S(), left.V8H(), right.V8H());
+ } else {
+ __ Smlal(acc.V4S(), left.V4H(), right.V4H());
+ __ Smlal2(acc.V4S(), left.V8H(), right.V8H());
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type size: " << inputs_data_size;
+ }
+}
+
// Helper to set up locations for vector memory operations.
static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
diff --git a/compiler/optimizing/code_generator_vector_arm_vixl.cc b/compiler/optimizing/code_generator_vector_arm_vixl.cc
index 7b66b17983..b092961a56 100644
--- a/compiler/optimizing/code_generator_vector_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_vector_arm_vixl.cc
@@ -138,7 +138,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecReduce(HVecReduce* instruction) {
switch (instruction->GetPackedType()) {
case DataType::Type::kInt32:
DCHECK_EQ(2u, instruction->GetVectorLength());
- switch (instruction->GetKind()) {
+ switch (instruction->GetReductionKind()) {
case HVecReduce::kSum:
__ Vpadd(DataTypeValue::I32, dst, src, src);
break;
@@ -854,6 +854,14 @@ void InstructionCodeGeneratorARMVIXL::VisitVecSADAccumulate(HVecSADAccumulate* i
}
}
+void LocationsBuilderARMVIXL::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
// Return whether the vector memory access operation is guaranteed to be word-aligned (ARM word
// size equals to 4).
static bool IsWordAligned(HVecMemoryOperation* instruction) {
diff --git a/compiler/optimizing/code_generator_vector_mips.cc b/compiler/optimizing/code_generator_vector_mips.cc
index df0e1485d6..4e9ba0d3d2 100644
--- a/compiler/optimizing/code_generator_vector_mips.cc
+++ b/compiler/optimizing/code_generator_vector_mips.cc
@@ -74,19 +74,19 @@ void InstructionCodeGeneratorMIPS::VisitVecReplicateScalar(HVecReplicateScalar*
__ InsertW(static_cast<VectorRegister>(FTMP),
locations->InAt(0).AsRegisterPairHigh<Register>(),
1);
- __ ReplicateFPToVectorRegister(dst, FTMP, /* is_double */ true);
+ __ ReplicateFPToVectorRegister(dst, FTMP, /* is_double= */ true);
break;
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
__ ReplicateFPToVectorRegister(dst,
locations->InAt(0).AsFpuRegister<FRegister>(),
- /* is_double */ false);
+ /* is_double= */ false);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
__ ReplicateFPToVectorRegister(dst,
locations->InAt(0).AsFpuRegister<FRegister>(),
- /* is_double */ true);
+ /* is_double= */ true);
break;
default:
LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
@@ -187,7 +187,7 @@ void InstructionCodeGeneratorMIPS::VisitVecReduce(HVecReduce* instruction) {
switch (instruction->GetPackedType()) {
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- switch (instruction->GetKind()) {
+ switch (instruction->GetReductionKind()) {
case HVecReduce::kSum:
__ Hadd_sD(tmp, src, src);
__ IlvlD(dst, tmp, tmp);
@@ -209,7 +209,7 @@ void InstructionCodeGeneratorMIPS::VisitVecReduce(HVecReduce* instruction) {
break;
case DataType::Type::kInt64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- switch (instruction->GetKind()) {
+ switch (instruction->GetReductionKind()) {
case HVecReduce::kSum:
__ IlvlD(dst, src, src);
__ AddvD(dst, dst, src);
@@ -1274,6 +1274,14 @@ void InstructionCodeGeneratorMIPS::VisitVecSADAccumulate(HVecSADAccumulate* inst
}
}
+void LocationsBuilderMIPS::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorMIPS::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
// Helper to set up locations for vector memory operations.
static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
@@ -1336,7 +1344,7 @@ int32_t InstructionCodeGeneratorMIPS::VecAddress(LocationSummary* locations,
}
void LocationsBuilderMIPS::VisitVecLoad(HVecLoad* instruction) {
- CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ true);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ true);
}
void InstructionCodeGeneratorMIPS::VisitVecLoad(HVecLoad* instruction) {
@@ -1379,7 +1387,7 @@ void InstructionCodeGeneratorMIPS::VisitVecLoad(HVecLoad* instruction) {
}
void LocationsBuilderMIPS::VisitVecStore(HVecStore* instruction) {
- CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ false);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ false);
}
void InstructionCodeGeneratorMIPS::VisitVecStore(HVecStore* instruction) {
diff --git a/compiler/optimizing/code_generator_vector_mips64.cc b/compiler/optimizing/code_generator_vector_mips64.cc
index de354b63a1..6467d3e27f 100644
--- a/compiler/optimizing/code_generator_vector_mips64.cc
+++ b/compiler/optimizing/code_generator_vector_mips64.cc
@@ -79,13 +79,13 @@ void InstructionCodeGeneratorMIPS64::VisitVecReplicateScalar(HVecReplicateScalar
DCHECK_EQ(4u, instruction->GetVectorLength());
__ ReplicateFPToVectorRegister(dst,
locations->InAt(0).AsFpuRegister<FpuRegister>(),
- /* is_double */ false);
+ /* is_double= */ false);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
__ ReplicateFPToVectorRegister(dst,
locations->InAt(0).AsFpuRegister<FpuRegister>(),
- /* is_double */ true);
+ /* is_double= */ true);
break;
default:
LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
@@ -185,7 +185,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecReduce(HVecReduce* instruction) {
switch (instruction->GetPackedType()) {
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- switch (instruction->GetKind()) {
+ switch (instruction->GetReductionKind()) {
case HVecReduce::kSum:
__ Hadd_sD(tmp, src, src);
__ IlvlD(dst, tmp, tmp);
@@ -207,7 +207,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecReduce(HVecReduce* instruction) {
break;
case DataType::Type::kInt64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- switch (instruction->GetKind()) {
+ switch (instruction->GetReductionKind()) {
case HVecReduce::kSum:
__ IlvlD(dst, src, src);
__ AddvD(dst, dst, src);
@@ -1272,6 +1272,14 @@ void InstructionCodeGeneratorMIPS64::VisitVecSADAccumulate(HVecSADAccumulate* in
}
}
+void LocationsBuilderMIPS64::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorMIPS64::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
// Helper to set up locations for vector memory operations.
static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
@@ -1334,7 +1342,7 @@ int32_t InstructionCodeGeneratorMIPS64::VecAddress(LocationSummary* locations,
}
void LocationsBuilderMIPS64::VisitVecLoad(HVecLoad* instruction) {
- CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ true);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ true);
}
void InstructionCodeGeneratorMIPS64::VisitVecLoad(HVecLoad* instruction) {
@@ -1377,7 +1385,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecLoad(HVecLoad* instruction) {
}
void LocationsBuilderMIPS64::VisitVecStore(HVecStore* instruction) {
- CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ false);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ false);
}
void InstructionCodeGeneratorMIPS64::VisitVecStore(HVecStore* instruction) {
diff --git a/compiler/optimizing/code_generator_vector_x86.cc b/compiler/optimizing/code_generator_vector_x86.cc
index 2502275b3a..0ee00356b9 100644
--- a/compiler/optimizing/code_generator_vector_x86.cc
+++ b/compiler/optimizing/code_generator_vector_x86.cc
@@ -205,8 +205,8 @@ void LocationsBuilderX86::VisitVecReduce(HVecReduce* instruction) {
CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
// Long reduction or min/max require a temporary.
if (instruction->GetPackedType() == DataType::Type::kInt64 ||
- instruction->GetKind() == HVecReduce::kMin ||
- instruction->GetKind() == HVecReduce::kMax) {
+ instruction->GetReductionKind() == HVecReduce::kMin ||
+ instruction->GetReductionKind() == HVecReduce::kMax) {
instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
}
}
@@ -218,38 +218,23 @@ void InstructionCodeGeneratorX86::VisitVecReduce(HVecReduce* instruction) {
switch (instruction->GetPackedType()) {
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- switch (instruction->GetKind()) {
+ switch (instruction->GetReductionKind()) {
case HVecReduce::kSum:
__ movaps(dst, src);
__ phaddd(dst, dst);
__ phaddd(dst, dst);
break;
- case HVecReduce::kMin: {
- XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- __ movaps(tmp, src);
- __ movaps(dst, src);
- __ psrldq(tmp, Immediate(8));
- __ pminsd(dst, tmp);
- __ psrldq(tmp, Immediate(4));
- __ pminsd(dst, tmp);
- break;
- }
- case HVecReduce::kMax: {
- XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- __ movaps(tmp, src);
- __ movaps(dst, src);
- __ psrldq(tmp, Immediate(8));
- __ pmaxsd(dst, tmp);
- __ psrldq(tmp, Immediate(4));
- __ pmaxsd(dst, tmp);
- break;
- }
+ case HVecReduce::kMin:
+ case HVecReduce::kMax:
+ // Historical note: We've had a broken implementation here. b/117863065
+ // Do not draw on the old code if we ever want to bring MIN/MAX reduction back.
+ LOG(FATAL) << "Unsupported reduction type.";
}
break;
case DataType::Type::kInt64: {
DCHECK_EQ(2u, instruction->GetVectorLength());
XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- switch (instruction->GetKind()) {
+ switch (instruction->GetReductionKind()) {
case HVecReduce::kSum:
__ movaps(tmp, src);
__ movaps(dst, src);
@@ -1143,6 +1128,14 @@ void InstructionCodeGeneratorX86::VisitVecSADAccumulate(HVecSADAccumulate* instr
LOG(FATAL) << "No SIMD for " << instruction->GetId();
}
+void LocationsBuilderX86::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorX86::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
// Helper to set up locations for vector memory operations.
static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
diff --git a/compiler/optimizing/code_generator_vector_x86_64.cc b/compiler/optimizing/code_generator_vector_x86_64.cc
index 4a67dafd8a..9c2882766c 100644
--- a/compiler/optimizing/code_generator_vector_x86_64.cc
+++ b/compiler/optimizing/code_generator_vector_x86_64.cc
@@ -188,8 +188,8 @@ void LocationsBuilderX86_64::VisitVecReduce(HVecReduce* instruction) {
CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
// Long reduction or min/max require a temporary.
if (instruction->GetPackedType() == DataType::Type::kInt64 ||
- instruction->GetKind() == HVecReduce::kMin ||
- instruction->GetKind() == HVecReduce::kMax) {
+ instruction->GetReductionKind() == HVecReduce::kMin ||
+ instruction->GetReductionKind() == HVecReduce::kMax) {
instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
}
}
@@ -201,38 +201,23 @@ void InstructionCodeGeneratorX86_64::VisitVecReduce(HVecReduce* instruction) {
switch (instruction->GetPackedType()) {
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- switch (instruction->GetKind()) {
+ switch (instruction->GetReductionKind()) {
case HVecReduce::kSum:
__ movaps(dst, src);
__ phaddd(dst, dst);
__ phaddd(dst, dst);
break;
- case HVecReduce::kMin: {
- XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- __ movaps(tmp, src);
- __ movaps(dst, src);
- __ psrldq(tmp, Immediate(8));
- __ pminsd(dst, tmp);
- __ psrldq(tmp, Immediate(4));
- __ pminsd(dst, tmp);
- break;
- }
- case HVecReduce::kMax: {
- XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- __ movaps(tmp, src);
- __ movaps(dst, src);
- __ psrldq(tmp, Immediate(8));
- __ pmaxsd(dst, tmp);
- __ psrldq(tmp, Immediate(4));
- __ pmaxsd(dst, tmp);
- break;
- }
+ case HVecReduce::kMin:
+ case HVecReduce::kMax:
+ // Historical note: We've had a broken implementation here. b/117863065
+ // Do not draw on the old code if we ever want to bring MIN/MAX reduction back.
+ LOG(FATAL) << "Unsupported reduction type.";
}
break;
case DataType::Type::kInt64: {
DCHECK_EQ(2u, instruction->GetVectorLength());
XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- switch (instruction->GetKind()) {
+ switch (instruction->GetReductionKind()) {
case HVecReduce::kSum:
__ movaps(tmp, src);
__ movaps(dst, src);
@@ -1116,6 +1101,14 @@ void InstructionCodeGeneratorX86_64::VisitVecSADAccumulate(HVecSADAccumulate* in
LOG(FATAL) << "No SIMD for " << instruction->GetId();
}
+void LocationsBuilderX86_64::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorX86_64::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
// Helper to set up locations for vector memory operations.
static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 6a27081dab..766ff78fa4 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1228,7 +1228,7 @@ Location InvokeDexCallingConventionVisitorX86::GetNextLocation(DataType::Type ty
case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected parameter type " << type;
- break;
+ UNREACHABLE();
}
return Location::NoLocation();
}
@@ -1720,7 +1720,7 @@ void InstructionCodeGeneratorX86::VisitIf(HIf* if_instr) {
nullptr : codegen_->GetLabelOf(true_successor);
Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
nullptr : codegen_->GetLabelOf(false_successor);
- GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+ GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
}
void LocationsBuilderX86::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -1738,9 +1738,9 @@ void LocationsBuilderX86::VisitDeoptimize(HDeoptimize* deoptimize) {
void InstructionCodeGeneratorX86::VisitDeoptimize(HDeoptimize* deoptimize) {
SlowPathCode* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathX86>(deoptimize);
GenerateTestAndBranch<Label>(deoptimize,
- /* condition_input_index */ 0,
+ /* condition_input_index= */ 0,
slow_path->GetEntryLabel(),
- /* false_target */ nullptr);
+ /* false_target= */ nullptr);
}
void LocationsBuilderX86::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
@@ -1863,7 +1863,7 @@ void InstructionCodeGeneratorX86::VisitSelect(HSelect* select) {
} else {
NearLabel false_target;
GenerateTestAndBranch<NearLabel>(
- select, /* condition_input_index */ 2, /* true_target */ nullptr, &false_target);
+ select, /* condition_input_index= */ 2, /* true_target= */ nullptr, &false_target);
codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
__ Bind(&false_target);
}
@@ -2989,7 +2989,7 @@ void LocationsBuilderX86::VisitAdd(HAdd* add) {
default:
LOG(FATAL) << "Unexpected add type " << add->GetResultType();
- break;
+ UNREACHABLE();
}
}
@@ -3434,8 +3434,8 @@ void InstructionCodeGeneratorX86::GenerateRemFP(HRem *rem) {
// Load the values to the FP stack in reverse order, using temporaries if needed.
const bool is_wide = !is_float;
- PushOntoFPStack(second, elem_size, 2 * elem_size, /* is_fp */ true, is_wide);
- PushOntoFPStack(first, 0, 2 * elem_size, /* is_fp */ true, is_wide);
+ PushOntoFPStack(second, elem_size, 2 * elem_size, /* is_fp= */ true, is_wide);
+ PushOntoFPStack(first, 0, 2 * elem_size, /* is_fp= */ true, is_wide);
// Loop doing FPREM until we stabilize.
NearLabel retry;
@@ -3497,6 +3497,27 @@ void InstructionCodeGeneratorX86::DivRemOneOrMinusOne(HBinaryOperation* instruct
}
}
+void InstructionCodeGeneratorX86::RemByPowerOfTwo(HRem* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Location second = locations->InAt(1);
+
+ Register out = locations->Out().AsRegister<Register>();
+ Register numerator = locations->InAt(0).AsRegister<Register>();
+
+ int32_t imm = Int64FromConstant(second.GetConstant());
+ DCHECK(IsPowerOfTwo(AbsOrMin(imm)));
+ uint32_t abs_imm = static_cast<uint32_t>(AbsOrMin(imm));
+
+ Register tmp = locations->GetTemp(0).AsRegister<Register>();
+ NearLabel done;
+ __ movl(out, numerator);
+ __ andl(out, Immediate(abs_imm-1));
+ __ j(Condition::kZero, &done);
+ __ leal(tmp, Address(out, static_cast<int32_t>(~(abs_imm-1))));
+ __ testl(numerator, numerator);
+ __ cmovl(Condition::kLess, out, tmp);
+ __ Bind(&done);
+}
void InstructionCodeGeneratorX86::DivByPowerOfTwo(HDiv* instruction) {
LocationSummary* locations = instruction->GetLocations();
@@ -3551,7 +3572,7 @@ void InstructionCodeGeneratorX86::GenerateDivRemWithAnyConstant(HBinaryOperation
int64_t magic;
int shift;
- CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
+ CalculateMagicAndShiftForDivRem(imm, /* is_long= */ false, &magic, &shift);
// Save the numerator.
__ movl(num, eax);
@@ -3610,8 +3631,12 @@ void InstructionCodeGeneratorX86::GenerateDivRemIntegral(HBinaryOperation* instr
// Do not generate anything for 0. DivZeroCheck would forbid any generated code.
} else if (imm == 1 || imm == -1) {
DivRemOneOrMinusOne(instruction);
- } else if (is_div && IsPowerOfTwo(AbsOrMin(imm))) {
- DivByPowerOfTwo(instruction->AsDiv());
+ } else if (IsPowerOfTwo(AbsOrMin(imm))) {
+ if (is_div) {
+ DivByPowerOfTwo(instruction->AsDiv());
+ } else {
+ RemByPowerOfTwo(instruction->AsRem());
+ }
} else {
DCHECK(imm <= -2 || imm >= 2);
GenerateDivRemWithAnyConstant(instruction);
@@ -4525,10 +4550,8 @@ void LocationsBuilderX86::VisitNewArray(HNewArray* instruction) {
}
void InstructionCodeGeneratorX86::VisitNewArray(HNewArray* instruction) {
- // Note: if heap poisoning is enabled, the entry point takes cares
- // of poisoning the reference.
- QuickEntrypointEnum entrypoint =
- CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+ QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
@@ -4778,14 +4801,14 @@ void CodeGeneratorX86::GenerateMemoryBarrier(MemBarrierKind kind) {
}
case MemBarrierKind::kNTStoreStore:
// Non-Temporal Store/Store needs an explicit fence.
- MemoryFence(/* non-temporal */ true);
+ MemoryFence(/* non-temporal= */ true);
break;
}
}
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
+ ArtMethod* method ATTRIBUTE_UNUSED) {
return desired_dispatch_info;
}
@@ -4913,14 +4936,14 @@ void CodeGeneratorX86::GenerateVirtualCall(
void CodeGeneratorX86::RecordBootImageIntrinsicPatch(HX86ComputeBaseMethodAddress* method_address,
uint32_t intrinsic_data) {
boot_image_intrinsic_patches_.emplace_back(
- method_address, /* target_dex_file */ nullptr, intrinsic_data);
+ method_address, /* target_dex_file= */ nullptr, intrinsic_data);
__ Bind(&boot_image_intrinsic_patches_.back().label);
}
void CodeGeneratorX86::RecordBootImageRelRoPatch(HX86ComputeBaseMethodAddress* method_address,
uint32_t boot_image_offset) {
boot_image_method_patches_.emplace_back(
- method_address, /* target_dex_file */ nullptr, boot_image_offset);
+ method_address, /* target_dex_file= */ nullptr, boot_image_offset);
__ Bind(&boot_image_method_patches_.back().label);
}
@@ -4988,7 +5011,7 @@ void CodeGeneratorX86::LoadBootImageAddress(Register reg,
invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex()).AsRegister<Register>();
__ leal(reg, Address(method_address_reg, CodeGeneratorX86::kDummy32BitOffset));
RecordBootImageIntrinsicPatch(method_address, boot_image_reference);
- } else if (Runtime::Current()->IsAotCompiler()) {
+ } else if (GetCompilerOptions().GetCompilePic()) {
DCHECK_EQ(invoke->InputCount(), invoke->GetNumberOfArguments() + 1u);
HX86ComputeBaseMethodAddress* method_address =
invoke->InputAt(invoke->GetSpecialInputIndex())->AsX86ComputeBaseMethodAddress();
@@ -5214,7 +5237,7 @@ void InstructionCodeGeneratorX86::HandleFieldGet(HInstruction* instruction,
// Note that a potential implicit null check is handled in this
// CodeGeneratorX86::GenerateFieldLoadWithBakerReadBarrier call.
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, out, base, offset, /* needs_null_check */ true);
+ instruction, out, base, offset, /* needs_null_check= */ true);
if (is_volatile) {
codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
}
@@ -5697,7 +5720,7 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
// Note that a potential implicit null check is handled in this
// CodeGeneratorX86::GenerateArrayLoadWithBakerReadBarrier call.
codegen_->GenerateArrayLoadWithBakerReadBarrier(
- instruction, out_loc, obj, data_offset, index, /* needs_null_check */ true);
+ instruction, out_loc, obj, data_offset, index, /* needs_null_check= */ true);
} else {
Register out = out_loc.AsRegister<Register>();
__ movl(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_4, data_offset));
@@ -6559,7 +6582,7 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFE
cls,
out_loc,
Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()),
- /* fixup_label */ nullptr,
+ /* fixup_label= */ nullptr,
read_barrier_option);
break;
}
@@ -7086,7 +7109,7 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
}
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -7118,7 +7141,7 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
if (zero.IsLinked()) {
@@ -7426,6 +7449,61 @@ void InstructionCodeGeneratorX86::VisitMonitorOperation(HMonitorOperation* instr
}
}
+void LocationsBuilderX86::VisitX86AndNot(HX86AndNot* instruction) {
+ DCHECK(codegen_->GetInstructionSetFeatures().HasAVX2());
+ DCHECK(DataType::IsIntOrLongType(instruction->GetType())) << instruction->GetType();
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorX86::VisitX86AndNot(HX86AndNot* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ Location dest = locations->Out();
+ if (instruction->GetResultType() == DataType::Type::kInt32) {
+ __ andn(dest.AsRegister<Register>(),
+ first.AsRegister<Register>(),
+ second.AsRegister<Register>());
+ } else {
+ DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt64);
+ __ andn(dest.AsRegisterPairLow<Register>(),
+ first.AsRegisterPairLow<Register>(),
+ second.AsRegisterPairLow<Register>());
+ __ andn(dest.AsRegisterPairHigh<Register>(),
+ first.AsRegisterPairHigh<Register>(),
+ second.AsRegisterPairHigh<Register>());
+ }
+}
+
+void LocationsBuilderX86::VisitX86MaskOrResetLeastSetBit(HX86MaskOrResetLeastSetBit* instruction) {
+ DCHECK(codegen_->GetInstructionSetFeatures().HasAVX2());
+ DCHECK(instruction->GetType() == DataType::Type::kInt32) << instruction->GetType();
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorX86::VisitX86MaskOrResetLeastSetBit(
+ HX86MaskOrResetLeastSetBit* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Location src = locations->InAt(0);
+ Location dest = locations->Out();
+ DCHECK(instruction->GetResultType() == DataType::Type::kInt32);
+ switch (instruction->GetOpKind()) {
+ case HInstruction::kAnd:
+ __ blsr(dest.AsRegister<Register>(), src.AsRegister<Register>());
+ break;
+ case HInstruction::kXor:
+ __ blsmsk(dest.AsRegister<Register>(), src.AsRegister<Register>());
+ break;
+ default:
+ LOG(FATAL) << "Unreachable";
+ }
+}
+
void LocationsBuilderX86::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
void LocationsBuilderX86::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
void LocationsBuilderX86::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
@@ -7572,7 +7650,7 @@ void InstructionCodeGeneratorX86::GenerateReferenceLoadOneRegister(
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(out + offset)
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, out, out_reg, offset, /* needs_null_check */ false);
+ instruction, out, out_reg, offset, /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// Save the value of `out` into `maybe_temp` before overwriting it
@@ -7606,7 +7684,7 @@ void InstructionCodeGeneratorX86::GenerateReferenceLoadTwoRegisters(
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, out, obj_reg, offset, /* needs_null_check */ false);
+ instruction, out, obj_reg, offset, /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
@@ -7655,7 +7733,7 @@ void InstructionCodeGeneratorX86::GenerateGcRootFieldLoad(
// Slow path marking the GC root `root`.
SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathX86(
- instruction, root, /* unpoison_ref_before_marking */ false);
+ instruction, root, /* unpoison_ref_before_marking= */ false);
codegen_->AddSlowPath(slow_path);
// Test the entrypoint (`Thread::Current()->pReadBarrierMarkReg ## root.reg()`).
@@ -7785,10 +7863,10 @@ void CodeGeneratorX86::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* i
if (always_update_field) {
DCHECK(temp != nullptr);
slow_path = new (GetScopedAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86(
- instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp);
+ instruction, ref, obj, src, /* unpoison_ref_before_marking= */ true, *temp);
} else {
slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathX86(
- instruction, ref, /* unpoison_ref_before_marking */ true);
+ instruction, ref, /* unpoison_ref_before_marking= */ true);
}
AddSlowPath(slow_path);
@@ -8301,7 +8379,7 @@ void CodeGeneratorX86::PatchJitRootUse(uint8_t* code,
uint32_t code_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
uintptr_t address =
reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
- typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
+ using unaligned_uint32_t __attribute__((__aligned__(1))) = uint32_t;
reinterpret_cast<unaligned_uint32_t*>(code + code_offset)[0] =
dchecked_integral_cast<uint32_t>(address);
}
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 615477171b..deeef888e2 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -159,6 +159,7 @@ class LocationsBuilderX86 : public HGraphVisitor {
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_X86(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
@@ -190,6 +191,7 @@ class InstructionCodeGeneratorX86 : public InstructionCodeGenerator {
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_X86(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
@@ -216,6 +218,7 @@ class InstructionCodeGeneratorX86 : public InstructionCodeGenerator {
void GenerateDivRemIntegral(HBinaryOperation* instruction);
void DivRemOneOrMinusOne(HBinaryOperation* instruction);
void DivByPowerOfTwo(HDiv* instruction);
+ void RemByPowerOfTwo(HRem* instruction);
void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
void GenerateRemFP(HRem* rem);
void HandleCondition(HCondition* condition);
@@ -410,7 +413,7 @@ class CodeGeneratorX86 : public CodeGenerator {
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) override;
+ ArtMethod* method) override;
// Generate a call to a static or direct method.
void GenerateStaticOrDirectCall(
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 489652b85b..67a2aa561b 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -978,7 +978,7 @@ inline Condition X86_64FPCondition(IfCondition cond) {
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86_64::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
+ ArtMethod* method ATTRIBUTE_UNUSED) {
return desired_dispatch_info;
}
@@ -992,7 +992,7 @@ void CodeGeneratorX86_64::GenerateStaticOrDirectCall(
// temp = thread->string_init_entrypoint
uint32_t offset =
GetThreadOffset<kX86_64PointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
- __ gs()->movq(temp.AsRegister<CpuRegister>(), Address::Absolute(offset, /* no_rip */ true));
+ __ gs()->movq(temp.AsRegister<CpuRegister>(), Address::Absolute(offset, /* no_rip= */ true));
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
@@ -1001,19 +1001,19 @@ void CodeGeneratorX86_64::GenerateStaticOrDirectCall(
case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative:
DCHECK(GetCompilerOptions().IsBootImage());
__ leal(temp.AsRegister<CpuRegister>(),
- Address::Absolute(kDummy32BitOffset, /* no_rip */ false));
+ Address::Absolute(kDummy32BitOffset, /* no_rip= */ false));
RecordBootImageMethodPatch(invoke);
break;
case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
// Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
__ movl(temp.AsRegister<CpuRegister>(),
- Address::Absolute(kDummy32BitOffset, /* no_rip */ false));
+ Address::Absolute(kDummy32BitOffset, /* no_rip= */ false));
RecordBootImageRelRoPatch(GetBootImageOffset(invoke));
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
__ movq(temp.AsRegister<CpuRegister>(),
- Address::Absolute(kDummy32BitOffset, /* no_rip */ false));
+ Address::Absolute(kDummy32BitOffset, /* no_rip= */ false));
RecordMethodBssEntryPatch(invoke);
break;
}
@@ -1076,12 +1076,12 @@ void CodeGeneratorX86_64::GenerateVirtualCall(
}
void CodeGeneratorX86_64::RecordBootImageIntrinsicPatch(uint32_t intrinsic_data) {
- boot_image_intrinsic_patches_.emplace_back(/* target_dex_file */ nullptr, intrinsic_data);
+ boot_image_intrinsic_patches_.emplace_back(/* target_dex_file= */ nullptr, intrinsic_data);
__ Bind(&boot_image_intrinsic_patches_.back().label);
}
void CodeGeneratorX86_64::RecordBootImageRelRoPatch(uint32_t boot_image_offset) {
- boot_image_method_patches_.emplace_back(/* target_dex_file */ nullptr, boot_image_offset);
+ boot_image_method_patches_.emplace_back(/* target_dex_file= */ nullptr, boot_image_offset);
__ Bind(&boot_image_method_patches_.back().label);
}
@@ -1123,10 +1123,10 @@ Label* CodeGeneratorX86_64::NewStringBssEntryPatch(HLoadString* load_string) {
void CodeGeneratorX86_64::LoadBootImageAddress(CpuRegister reg, uint32_t boot_image_reference) {
if (GetCompilerOptions().IsBootImage()) {
- __ leal(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+ __ leal(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
RecordBootImageIntrinsicPatch(boot_image_reference);
- } else if (Runtime::Current()->IsAotCompiler()) {
- __ movl(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+ } else if (GetCompilerOptions().GetCompilePic()) {
+ __ movl(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
RecordBootImageRelRoPatch(boot_image_reference);
} else {
DCHECK(Runtime::Current()->UseJitCompilation());
@@ -1146,7 +1146,7 @@ void CodeGeneratorX86_64::AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* in
DCHECK_EQ(boot_image_offset, IntrinsicVisitor::IntegerValueOfInfo::kInvalidReference);
// Load the class the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative.
__ leal(argument,
- Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+ Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
MethodReference target_method = invoke->GetTargetMethod();
dex::TypeIndex type_idx = target_method.dex_file->GetMethodId(target_method.index).class_idx_;
boot_image_type_patches_.emplace_back(target_method.dex_file, type_idx.index_);
@@ -1277,7 +1277,7 @@ void CodeGeneratorX86_64::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_poin
}
void CodeGeneratorX86_64::GenerateInvokeRuntime(int32_t entry_point_offset) {
- __ gs()->call(Address::Absolute(entry_point_offset, /* no_rip */ true));
+ __ gs()->call(Address::Absolute(entry_point_offset, /* no_rip= */ true));
}
static constexpr int kNumberOfCpuRegisterPairs = 0;
@@ -1799,7 +1799,7 @@ void InstructionCodeGeneratorX86_64::VisitIf(HIf* if_instr) {
nullptr : codegen_->GetLabelOf(true_successor);
Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
nullptr : codegen_->GetLabelOf(false_successor);
- GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+ GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
}
void LocationsBuilderX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -1817,9 +1817,9 @@ void LocationsBuilderX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
void InstructionCodeGeneratorX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
SlowPathCode* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathX86_64>(deoptimize);
GenerateTestAndBranch<Label>(deoptimize,
- /* condition_input_index */ 0,
+ /* condition_input_index= */ 0,
slow_path->GetEntryLabel(),
- /* false_target */ nullptr);
+ /* false_target= */ nullptr);
}
void LocationsBuilderX86_64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
@@ -1922,8 +1922,8 @@ void InstructionCodeGeneratorX86_64::VisitSelect(HSelect* select) {
} else {
NearLabel false_target;
GenerateTestAndBranch<NearLabel>(select,
- /* condition_input_index */ 2,
- /* true_target */ nullptr,
+ /* condition_input_index= */ 2,
+ /* true_target= */ nullptr,
&false_target);
codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
__ Bind(&false_target);
@@ -2425,7 +2425,7 @@ Location InvokeDexCallingConventionVisitorX86_64::GetNextLocation(DataType::Type
case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected parameter type " << type;
- break;
+ UNREACHABLE();
}
return Location::NoLocation();
}
@@ -3560,7 +3560,40 @@ void InstructionCodeGeneratorX86_64::DivRemOneOrMinusOne(HBinaryOperation* instr
LOG(FATAL) << "Unexpected type for div by (-)1 " << instruction->GetResultType();
}
}
+void InstructionCodeGeneratorX86_64::RemByPowerOfTwo(HRem* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Location second = locations->InAt(1);
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
+ CpuRegister numerator = locations->InAt(0).AsRegister<CpuRegister>();
+ int64_t imm = Int64FromConstant(second.GetConstant());
+ DCHECK(IsPowerOfTwo(AbsOrMin(imm)));
+ uint64_t abs_imm = AbsOrMin(imm);
+ CpuRegister tmp = locations->GetTemp(0).AsRegister<CpuRegister>();
+ if (instruction->GetResultType() == DataType::Type::kInt32) {
+ NearLabel done;
+ __ movl(out, numerator);
+ __ andl(out, Immediate(abs_imm-1));
+ __ j(Condition::kZero, &done);
+ __ leal(tmp, Address(out, static_cast<int32_t>(~(abs_imm-1))));
+ __ testl(numerator, numerator);
+ __ cmov(Condition::kLess, out, tmp, false);
+ __ Bind(&done);
+
+ } else {
+ DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt64);
+ codegen_->Load64BitValue(tmp, abs_imm - 1);
+ NearLabel done;
+ __ movq(out, numerator);
+ __ andq(out, tmp);
+ __ j(Condition::kZero, &done);
+ __ movq(tmp, numerator);
+ __ sarq(tmp, Immediate(63));
+ __ shlq(tmp, Immediate(WhichPowerOf2(abs_imm)));
+ __ orq(out, tmp);
+ __ Bind(&done);
+ }
+}
void InstructionCodeGeneratorX86_64::DivByPowerOfTwo(HDiv* instruction) {
LocationSummary* locations = instruction->GetLocations();
Location second = locations->InAt(1);
@@ -3575,9 +3608,17 @@ void InstructionCodeGeneratorX86_64::DivByPowerOfTwo(HDiv* instruction) {
CpuRegister tmp = locations->GetTemp(0).AsRegister<CpuRegister>();
if (instruction->GetResultType() == DataType::Type::kInt32) {
- __ leal(tmp, Address(numerator, abs_imm - 1));
- __ testl(numerator, numerator);
- __ cmov(kGreaterEqual, tmp, numerator);
+ // When denominator is equal to 2, we can add signed bit and numerator to tmp.
+ // Below we are using addl instruction instead of cmov which give us 1 cycle benefit.
+ if (abs_imm == 2) {
+ __ leal(tmp, Address(numerator, 0));
+ __ shrl(tmp, Immediate(31));
+ __ addl(tmp, numerator);
+ } else {
+ __ leal(tmp, Address(numerator, abs_imm - 1));
+ __ testl(numerator, numerator);
+ __ cmov(kGreaterEqual, tmp, numerator);
+ }
int shift = CTZ(imm);
__ sarl(tmp, Immediate(shift));
@@ -3589,11 +3630,16 @@ void InstructionCodeGeneratorX86_64::DivByPowerOfTwo(HDiv* instruction) {
} else {
DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt64);
CpuRegister rdx = locations->GetTemp(0).AsRegister<CpuRegister>();
-
- codegen_->Load64BitValue(rdx, abs_imm - 1);
- __ addq(rdx, numerator);
- __ testq(numerator, numerator);
- __ cmov(kGreaterEqual, rdx, numerator);
+ if (abs_imm == 2) {
+ __ movq(rdx, numerator);
+ __ shrq(rdx, Immediate(63));
+ __ addq(rdx, numerator);
+ } else {
+ codegen_->Load64BitValue(rdx, abs_imm - 1);
+ __ addq(rdx, numerator);
+ __ testq(numerator, numerator);
+ __ cmov(kGreaterEqual, rdx, numerator);
+ }
int shift = CTZ(imm);
__ sarq(rdx, Immediate(shift));
@@ -3633,7 +3679,7 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemWithAnyConstant(HBinaryOperat
if (instruction->GetResultType() == DataType::Type::kInt32) {
int imm = second.GetConstant()->AsIntConstant()->GetValue();
- CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
+ CalculateMagicAndShiftForDivRem(imm, false /* is_long= */, &magic, &shift);
__ movl(numerator, eax);
@@ -3670,7 +3716,7 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemWithAnyConstant(HBinaryOperat
CpuRegister rax = eax;
CpuRegister rdx = edx;
- CalculateMagicAndShiftForDivRem(imm, true /* is_long */, &magic, &shift);
+ CalculateMagicAndShiftForDivRem(imm, true /* is_long= */, &magic, &shift);
// Save the numerator.
__ movq(numerator, rax);
@@ -3737,8 +3783,12 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemIntegral(HBinaryOperation* in
// Do not generate anything. DivZeroCheck would prevent any code to be executed.
} else if (imm == 1 || imm == -1) {
DivRemOneOrMinusOne(instruction);
- } else if (instruction->IsDiv() && IsPowerOfTwo(AbsOrMin(imm))) {
- DivByPowerOfTwo(instruction->AsDiv());
+ } else if (IsPowerOfTwo(AbsOrMin(imm))) {
+ if (is_div) {
+ DivByPowerOfTwo(instruction->AsDiv());
+ } else {
+ RemByPowerOfTwo(instruction->AsRem());
+ }
} else {
DCHECK(imm <= -2 || imm >= 2);
GenerateDivRemWithAnyConstant(instruction);
@@ -4371,10 +4421,8 @@ void LocationsBuilderX86_64::VisitNewArray(HNewArray* instruction) {
}
void InstructionCodeGeneratorX86_64::VisitNewArray(HNewArray* instruction) {
- // Note: if heap poisoning is enabled, the entry point takes cares
- // of poisoning the reference.
- QuickEntrypointEnum entrypoint =
- CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+ QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
@@ -4506,7 +4554,7 @@ void CodeGeneratorX86_64::GenerateMemoryBarrier(MemBarrierKind kind) {
}
case MemBarrierKind::kNTStoreStore:
// Non-Temporal Store/Store needs an explicit fence.
- MemoryFence(/* non-temporal */ true);
+ MemoryFence(/* non-temporal= */ true);
break;
}
}
@@ -4583,7 +4631,7 @@ void InstructionCodeGeneratorX86_64::HandleFieldGet(HInstruction* instruction,
// Note that a potential implicit null check is handled in this
// CodeGeneratorX86_64::GenerateFieldLoadWithBakerReadBarrier call.
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, out, base, offset, /* needs_null_check */ true);
+ instruction, out, base, offset, /* needs_null_check= */ true);
if (is_volatile) {
codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
}
@@ -5038,7 +5086,7 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
// Note that a potential implicit null check is handled in this
// CodeGeneratorX86_64::GenerateArrayLoadWithBakerReadBarrier call.
codegen_->GenerateArrayLoadWithBakerReadBarrier(
- instruction, out_loc, obj, data_offset, index, /* needs_null_check */ true);
+ instruction, out_loc, obj, data_offset, index, /* needs_null_check= */ true);
} else {
CpuRegister out = out_loc.AsRegister<CpuRegister>();
__ movl(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_4, data_offset));
@@ -5438,7 +5486,7 @@ void CodeGeneratorX86_64::MarkGCCard(CpuRegister temp,
}
// Load the address of the card table into `card`.
__ gs()->movq(card, Address::Absolute(Thread::CardTableOffset<kX86_64PointerSize>().Int32Value(),
- /* no_rip */ true));
+ /* no_rip= */ true));
// Calculate the offset (in the card table) of the card corresponding to
// `object`.
__ movq(temp, object);
@@ -5518,7 +5566,7 @@ void InstructionCodeGeneratorX86_64::GenerateSuspendCheck(HSuspendCheck* instruc
}
__ gs()->cmpw(Address::Absolute(Thread::ThreadFlagsOffset<kX86_64PointerSize>().Int32Value(),
- /* no_rip */ true),
+ /* no_rip= */ true),
Immediate(0));
if (successor == nullptr) {
__ j(kNotEqual, slow_path->GetEntryLabel());
@@ -5900,25 +5948,25 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
cls,
out_loc,
Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()),
- /* fixup_label */ nullptr,
+ /* fixup_label= */ nullptr,
read_barrier_option);
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- __ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+ __ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
codegen_->RecordBootImageTypePatch(cls);
break;
case HLoadClass::LoadKind::kBootImageRelRo: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
- __ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+ __ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
codegen_->RecordBootImageRelRoPatch(codegen_->GetBootImageOffset(cls));
break;
}
case HLoadClass::LoadKind::kBssEntry: {
Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
- /* no_rip */ false);
+ /* no_rip= */ false);
Label* fixup_label = codegen_->NewTypeBssEntryPatch(cls);
// /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, read_barrier_option);
@@ -5934,7 +5982,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
}
case HLoadClass::LoadKind::kJitTableAddress: {
Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
- /* no_rip */ true);
+ /* no_rip= */ true);
Label* fixup_label =
codegen_->NewJitRootClassPatch(cls->GetDexFile(), cls->GetTypeIndex(), cls->GetClass());
// /* GcRoot<mirror::Class> */ out = *address
@@ -6059,19 +6107,19 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) NO_THREA
switch (load->GetLoadKind()) {
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
- __ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+ __ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
codegen_->RecordBootImageStringPatch(load);
return;
}
case HLoadString::LoadKind::kBootImageRelRo: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
- __ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+ __ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
codegen_->RecordBootImageRelRoPatch(codegen_->GetBootImageOffset(load));
return;
}
case HLoadString::LoadKind::kBssEntry: {
Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
- /* no_rip */ false);
+ /* no_rip= */ false);
Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
// /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
@@ -6090,7 +6138,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) NO_THREA
}
case HLoadString::LoadKind::kJitTableAddress: {
Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
- /* no_rip */ true);
+ /* no_rip= */ true);
Label* fixup_label = codegen_->NewJitRootStringPatch(
load->GetDexFile(), load->GetStringIndex(), load->GetString());
// /* GcRoot<mirror::String> */ out = *address
@@ -6112,7 +6160,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) NO_THREA
static Address GetExceptionTlsAddress() {
return Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>().Int32Value(),
- /* no_rip */ true);
+ /* no_rip= */ true);
}
void LocationsBuilderX86_64::VisitLoadException(HLoadException* load) {
@@ -6387,7 +6435,7 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
}
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86_64(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -6419,7 +6467,7 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86_64(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
if (zero.IsLinked()) {
@@ -6736,6 +6784,48 @@ void InstructionCodeGeneratorX86_64::VisitMonitorOperation(HMonitorOperation* in
}
}
+void LocationsBuilderX86_64::VisitX86AndNot(HX86AndNot* instruction) {
+ DCHECK(codegen_->GetInstructionSetFeatures().HasAVX2());
+ DCHECK(DataType::IsIntOrLongType(instruction->GetType())) << instruction->GetType();
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ // There is no immediate variant of negated bitwise and in X86.
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void LocationsBuilderX86_64::VisitX86MaskOrResetLeastSetBit(HX86MaskOrResetLeastSetBit* instruction) {
+ DCHECK(codegen_->GetInstructionSetFeatures().HasAVX2());
+ DCHECK(DataType::IsIntOrLongType(instruction->GetType())) << instruction->GetType();
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorX86_64::VisitX86AndNot(HX86AndNot* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ Location dest = locations->Out();
+ __ andn(dest.AsRegister<CpuRegister>(), first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
+}
+
+void InstructionCodeGeneratorX86_64::VisitX86MaskOrResetLeastSetBit(HX86MaskOrResetLeastSetBit* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Location src = locations->InAt(0);
+ Location dest = locations->Out();
+ switch (instruction->GetOpKind()) {
+ case HInstruction::kAnd:
+ __ blsr(dest.AsRegister<CpuRegister>(), src.AsRegister<CpuRegister>());
+ break;
+ case HInstruction::kXor:
+ __ blsmsk(dest.AsRegister<CpuRegister>(), src.AsRegister<CpuRegister>());
+ break;
+ default:
+ LOG(FATAL) << "Unreachable";
+ }
+}
+
void LocationsBuilderX86_64::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
void LocationsBuilderX86_64::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
void LocationsBuilderX86_64::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
@@ -6864,7 +6954,7 @@ void InstructionCodeGeneratorX86_64::GenerateReferenceLoadOneRegister(
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(out + offset)
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, out, out_reg, offset, /* needs_null_check */ false);
+ instruction, out, out_reg, offset, /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// Save the value of `out` into `maybe_temp` before overwriting it
@@ -6898,7 +6988,7 @@ void InstructionCodeGeneratorX86_64::GenerateReferenceLoadTwoRegisters(
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, out, obj_reg, offset, /* needs_null_check */ false);
+ instruction, out, obj_reg, offset, /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
@@ -6947,13 +7037,13 @@ void InstructionCodeGeneratorX86_64::GenerateGcRootFieldLoad(
// Slow path marking the GC root `root`.
SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathX86_64(
- instruction, root, /* unpoison_ref_before_marking */ false);
+ instruction, root, /* unpoison_ref_before_marking= */ false);
codegen_->AddSlowPath(slow_path);
// Test the `Thread::Current()->pReadBarrierMarkReg ## root.reg()` entrypoint.
const int32_t entry_point_offset =
Thread::ReadBarrierMarkEntryPointsOffset<kX86_64PointerSize>(root.reg());
- __ gs()->cmpl(Address::Absolute(entry_point_offset, /* no_rip */ true), Immediate(0));
+ __ gs()->cmpl(Address::Absolute(entry_point_offset, /* no_rip= */ true), Immediate(0));
// The entrypoint is null when the GC is not marking.
__ j(kNotEqual, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -7079,10 +7169,10 @@ void CodeGeneratorX86_64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction
DCHECK(temp1 != nullptr);
DCHECK(temp2 != nullptr);
slow_path = new (GetScopedAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86_64(
- instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp1, *temp2);
+ instruction, ref, obj, src, /* unpoison_ref_before_marking= */ true, *temp1, *temp2);
} else {
slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathX86_64(
- instruction, ref, /* unpoison_ref_before_marking */ true);
+ instruction, ref, /* unpoison_ref_before_marking= */ true);
}
AddSlowPath(slow_path);
@@ -7542,7 +7632,7 @@ void CodeGeneratorX86_64::PatchJitRootUse(uint8_t* code,
uint32_t code_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
uintptr_t address =
reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
- typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
+ using unaligned_uint32_t __attribute__((__aligned__(1))) = uint32_t;
reinterpret_cast<unaligned_uint32_t*>(code + code_offset)[0] =
dchecked_integral_cast<uint32_t>(address);
}
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index f77a5c84b4..f74e130702 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -161,6 +161,7 @@ class LocationsBuilderX86_64 : public HGraphVisitor {
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_X86_64(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
@@ -192,6 +193,7 @@ class InstructionCodeGeneratorX86_64 : public InstructionCodeGenerator {
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_X86_64(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
@@ -213,6 +215,7 @@ class InstructionCodeGeneratorX86_64 : public InstructionCodeGenerator {
void GenerateRemFP(HRem* rem);
void DivRemOneOrMinusOne(HBinaryOperation* instruction);
void DivByPowerOfTwo(HDiv* instruction);
+ void RemByPowerOfTwo(HRem* instruction);
void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
void GenerateDivRemIntegral(HBinaryOperation* instruction);
void HandleCondition(HCondition* condition);
@@ -409,7 +412,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) override;
+ ArtMethod* method) override;
void GenerateStaticOrDirectCall(
HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc
index d6c97552dc..f406983fc2 100644
--- a/compiler/optimizing/code_sinking.cc
+++ b/compiler/optimizing/code_sinking.cc
@@ -180,7 +180,7 @@ static HInstruction* FindIdealPosition(HInstruction* instruction,
DCHECK(!instruction->IsPhi()); // Makes no sense for Phi.
// Find the target block.
- CommonDominator finder(/* start_block */ nullptr);
+ CommonDominator finder(/* block= */ nullptr);
for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
HInstruction* user = use.GetUser();
if (!(filter && ShouldFilterUse(instruction, user, post_dominated))) {
@@ -259,12 +259,12 @@ void CodeSinking::SinkCodeToUncommonBranch(HBasicBlock* end_block) {
size_t number_of_instructions = graph_->GetCurrentInstructionId();
ScopedArenaVector<HInstruction*> worklist(allocator.Adapter(kArenaAllocMisc));
- ArenaBitVector processed_instructions(&allocator, number_of_instructions, /* expandable */ false);
+ ArenaBitVector processed_instructions(&allocator, number_of_instructions, /* expandable= */ false);
processed_instructions.ClearAllBits();
- ArenaBitVector post_dominated(&allocator, graph_->GetBlocks().size(), /* expandable */ false);
+ ArenaBitVector post_dominated(&allocator, graph_->GetBlocks().size(), /* expandable= */ false);
post_dominated.ClearAllBits();
ArenaBitVector instructions_that_can_move(
- &allocator, number_of_instructions, /* expandable */ false);
+ &allocator, number_of_instructions, /* expandable= */ false);
instructions_that_can_move.ClearAllBits();
ScopedArenaVector<HInstruction*> move_in_order(allocator.Adapter(kArenaAllocMisc));
@@ -414,7 +414,7 @@ void CodeSinking::SinkCodeToUncommonBranch(HBasicBlock* end_block) {
}
// Find the position of the instruction we're storing into, filtering out this
// store and all other stores to that instruction.
- position = FindIdealPosition(instruction->InputAt(0), post_dominated, /* filter */ true);
+ position = FindIdealPosition(instruction->InputAt(0), post_dominated, /* filter= */ true);
// The position needs to be dominated by the store, in order for the store to move there.
if (position == nullptr || !instruction->GetBlock()->Dominates(position->GetBlock())) {
@@ -434,7 +434,7 @@ void CodeSinking::SinkCodeToUncommonBranch(HBasicBlock* end_block) {
continue;
}
MaybeRecordStat(stats_, MethodCompilationStat::kInstructionSunk);
- instruction->MoveBefore(position, /* ensure_safety */ false);
+ instruction->MoveBefore(position, /* do_checks= */ false);
}
}
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index f186191a0f..b5a7c137f6 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -823,6 +823,33 @@ TEST_F(CodegenTest, ARM64ParallelMoveResolverSIMD) {
InternalCodeAllocator code_allocator;
codegen.Finalize(&code_allocator);
}
+
+// Check that ART ISA Features are propagated to VIXL for arm64 (using cortex-a75 as example).
+TEST_F(CodegenTest, ARM64IsaVIXLFeaturesA75) {
+ OverrideInstructionSetFeatures(InstructionSet::kArm64, "cortex-a75");
+ HGraph* graph = CreateGraph();
+ arm64::CodeGeneratorARM64 codegen(graph, *compiler_options_);
+ vixl::CPUFeatures* features = codegen.GetVIXLAssembler()->GetCPUFeatures();
+
+ EXPECT_TRUE(features->Has(vixl::CPUFeatures::kCRC32));
+ EXPECT_TRUE(features->Has(vixl::CPUFeatures::kDotProduct));
+ EXPECT_TRUE(features->Has(vixl::CPUFeatures::kFPHalf));
+ EXPECT_TRUE(features->Has(vixl::CPUFeatures::kAtomics));
+}
+
+// Check that ART ISA Features are propagated to VIXL for arm64 (using cortex-a53 as example).
+TEST_F(CodegenTest, ARM64IsaVIXLFeaturesA53) {
+ OverrideInstructionSetFeatures(InstructionSet::kArm64, "cortex-a53");
+ HGraph* graph = CreateGraph();
+ arm64::CodeGeneratorARM64 codegen(graph, *compiler_options_);
+ vixl::CPUFeatures* features = codegen.GetVIXLAssembler()->GetCPUFeatures();
+
+ EXPECT_TRUE(features->Has(vixl::CPUFeatures::kCRC32));
+ EXPECT_FALSE(features->Has(vixl::CPUFeatures::kDotProduct));
+ EXPECT_FALSE(features->Has(vixl::CPUFeatures::kFPHalf));
+ EXPECT_FALSE(features->Has(vixl::CPUFeatures::kAtomics));
+}
+
#endif
#ifdef ART_ENABLE_CODEGEN_mips
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index b1436f863c..74d9d3a993 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -70,7 +70,7 @@ class ConstantFoldingTest : public OptimizingUnitTest {
check_after_cf(graph_);
- HDeadCodeElimination(graph_, nullptr /* stats */, "dead_code_elimination").Run();
+ HDeadCodeElimination(graph_, /* stats= */ nullptr, "dead_code_elimination").Run();
GraphChecker graph_checker_dce(graph_);
graph_checker_dce.Run();
ASSERT_TRUE(graph_checker_dce.IsValid());
diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.cc b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
index 3cb8bf2f47..3a1a9e023d 100644
--- a/compiler/optimizing/constructor_fence_redundancy_elimination.cc
+++ b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
@@ -78,7 +78,7 @@ class CFREVisitor : public HGraphVisitor {
VisitSetLocation(instruction, value);
}
- void VisitDeoptimize(HDeoptimize* instruction ATTRIBUTE_UNUSED) {
+ void VisitDeoptimize(HDeoptimize* instruction ATTRIBUTE_UNUSED) override {
// Pessimize: Merge all fences.
MergeCandidateFences();
}
diff --git a/compiler/optimizing/data_type.h b/compiler/optimizing/data_type.h
index 5ac6e46003..3cbcc9e0c3 100644
--- a/compiler/optimizing/data_type.h
+++ b/compiler/optimizing/data_type.h
@@ -231,6 +231,21 @@ class DataType {
}
}
+ static Type ToUnsigned(Type type) {
+ switch (type) {
+ case Type::kInt8:
+ return Type::kUint8;
+ case Type::kInt16:
+ return Type::kUint16;
+ case Type::kInt32:
+ return Type::kUint32;
+ case Type::kInt64:
+ return Type::kUint64;
+ default:
+ return type;
+ }
+ }
+
static const char* PrettyDescriptor(Type type);
private:
diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc
index 277453545a..f5cd4dc27a 100644
--- a/compiler/optimizing/dead_code_elimination_test.cc
+++ b/compiler/optimizing/dead_code_elimination_test.cc
@@ -43,7 +43,7 @@ void DeadCodeEliminationTest::TestCode(const std::vector<uint16_t>& data,
std::string actual_before = printer_before.str();
ASSERT_EQ(actual_before, expected_before);
- HDeadCodeElimination(graph, nullptr /* stats */, "dead_code_elimination").Run();
+ HDeadCodeElimination(graph, /* stats= */ nullptr, "dead_code_elimination").Run();
GraphChecker graph_checker(graph);
graph_checker.Run();
ASSERT_TRUE(graph_checker.IsValid());
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index a689f35e0f..01d9603802 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -635,8 +635,8 @@ void GraphChecker::HandleTypeCheckInstruction(HTypeCheckInstruction* check) {
}
}
CheckTypeCheckBitstringInput(
- check, /* input_pos */ 2, check_values, expected_path_to_root, "path_to_root");
- CheckTypeCheckBitstringInput(check, /* input_pos */ 3, check_values, expected_mask, "mask");
+ check, /* input_pos= */ 2, check_values, expected_path_to_root, "path_to_root");
+ CheckTypeCheckBitstringInput(check, /* input_pos= */ 3, check_values, expected_mask, "mask");
} else {
if (!input->IsLoadClass()) {
AddError(StringPrintf("%s:%d (classic) expects a HLoadClass as second input, not %s:%d.",
@@ -931,7 +931,7 @@ void GraphChecker::VisitPhi(HPhi* phi) {
// because the BitVector reallocation strategy has very bad worst-case behavior.
ArenaBitVector visited(&allocator,
GetGraph()->GetCurrentInstructionId(),
- /* expandable */ false,
+ /* expandable= */ false,
kArenaAllocGraphChecker);
visited.ClearAllBits();
if (!IsConstantEquivalent(phi, other_phi, &visited)) {
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 31db8c205f..2a7bbcb72f 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -106,8 +106,7 @@ std::ostream& operator<<(std::ostream& os, const StringList& list) {
}
}
-typedef Disassembler* create_disasm_prototype(InstructionSet instruction_set,
- DisassemblerOptions* options);
+using create_disasm_prototype = Disassembler*(InstructionSet, DisassemblerOptions*);
class HGraphVisualizerDisassembler {
public:
HGraphVisualizerDisassembler(InstructionSet instruction_set,
@@ -131,10 +130,10 @@ class HGraphVisualizerDisassembler {
// been generated, so we can read data in literal pools.
disassembler_ = std::unique_ptr<Disassembler>((*create_disassembler)(
instruction_set,
- new DisassemblerOptions(/* absolute_addresses */ false,
+ new DisassemblerOptions(/* absolute_addresses= */ false,
base_address,
end_address,
- /* can_read_literals */ true,
+ /* can_read_literals= */ true,
Is64BitInstructionSet(instruction_set)
? &Thread::DumpThreadOffset<PointerSize::k64>
: &Thread::DumpThreadOffset<PointerSize::k32>)));
@@ -394,7 +393,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
void VisitLoadMethodType(HLoadMethodType* load_method_type) override {
StartAttributeStream("load_kind") << "RuntimeCall";
const DexFile& dex_file = load_method_type->GetDexFile();
- const DexFile::ProtoId& proto_id = dex_file.GetProtoId(load_method_type->GetProtoIndex());
+ const dex::ProtoId& proto_id = dex_file.GetProtoId(load_method_type->GetProtoIndex());
StartAttributeStream("method_type") << dex_file.GetProtoSignature(proto_id);
}
@@ -564,6 +563,14 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
StartAttributeStream("kind") << instruction->GetOpKind();
}
+ void VisitVecDotProd(HVecDotProd* instruction) override {
+ VisitVecOperation(instruction);
+ DataType::Type arg_type = instruction->InputAt(1)->AsVecOperation()->GetPackedType();
+ StartAttributeStream("type") << (instruction->IsZeroExtending() ?
+ DataType::ToUnsigned(arg_type) :
+ DataType::ToSigned(arg_type));
+ }
+
#if defined(ART_ENABLE_CODEGEN_arm) || defined(ART_ENABLE_CODEGEN_arm64)
void VisitMultiplyAccumulate(HMultiplyAccumulate* instruction) override {
StartAttributeStream("kind") << instruction->GetOpKind();
@@ -917,8 +924,8 @@ void HGraphVisualizer::DumpGraphWithDisassembly() const {
HGraphVisualizerPrinter printer(graph_,
*output_,
"disassembly",
- /* is_after_pass */ true,
- /* graph_in_bad_state */ false,
+ /* is_after_pass= */ true,
+ /* graph_in_bad_state= */ false,
codegen_,
codegen_.GetDisassemblyInformation());
printer.Run();
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index e6b6326726..3689d1d232 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -348,7 +348,7 @@ class GlobalValueNumberer : public ValueObject {
side_effects_(side_effects),
sets_(graph->GetBlocks().size(), nullptr, allocator_.Adapter(kArenaAllocGvn)),
visited_blocks_(
- &allocator_, graph->GetBlocks().size(), /* expandable */ false, kArenaAllocGvn) {
+ &allocator_, graph->GetBlocks().size(), /* expandable= */ false, kArenaAllocGvn) {
visited_blocks_.ClearAllBits();
}
@@ -546,12 +546,12 @@ HBasicBlock* GlobalValueNumberer::FindVisitedBlockWithRecyclableSet(
// that is larger, we return it if no perfectly-matching set is found.
// Note that we defer testing WillBeReferencedAgain until all other criteria
// have been satisfied because it might be expensive.
- if (current_set->CanHoldCopyOf(reference_set, /* exact_match */ true)) {
+ if (current_set->CanHoldCopyOf(reference_set, /* exact_match= */ true)) {
if (!WillBeReferencedAgain(current_block)) {
return current_block;
}
} else if (secondary_match == nullptr &&
- current_set->CanHoldCopyOf(reference_set, /* exact_match */ false)) {
+ current_set->CanHoldCopyOf(reference_set, /* exact_match= */ false)) {
if (!WillBeReferencedAgain(current_block)) {
secondary_match = current_block;
}
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index a4d638f4c6..3a10d5831d 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -1074,8 +1074,8 @@ bool HInductionVarAnalysis::IsTaken(InductionInfo* lower_expr,
&& lower_value >= upper_value;
default:
LOG(FATAL) << "CONDITION UNREACHABLE";
+ UNREACHABLE();
}
- return false; // not certain, may be untaken
}
bool HInductionVarAnalysis::IsFinite(InductionInfo* upper_expr,
@@ -1099,8 +1099,8 @@ bool HInductionVarAnalysis::IsFinite(InductionInfo* upper_expr,
return (IsAtLeast(upper_expr, &value) && value >= (min - stride_value));
default:
LOG(FATAL) << "CONDITION UNREACHABLE";
+ UNREACHABLE();
}
- return false; // not certain, may be infinite
}
bool HInductionVarAnalysis::FitsNarrowerControl(InductionInfo* lower_expr,
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index 55eca2316a..4c78fa8f06 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -216,13 +216,13 @@ bool InductionVarRange::GetInductionRange(HInstruction* context,
chase_hint_ = chase_hint;
bool in_body = context->GetBlock() != loop->GetHeader();
int64_t stride_value = 0;
- *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min */ true));
- *max_val = SimplifyMax(GetVal(info, trip, in_body, /* is_min */ false), chase_hint);
+ *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min= */ true));
+ *max_val = SimplifyMax(GetVal(info, trip, in_body, /* is_min= */ false), chase_hint);
*needs_finite_test = NeedsTripCount(info, &stride_value) && IsUnsafeTripCount(trip);
chase_hint_ = nullptr;
// Retry chasing constants for wrap-around (merge sensitive).
if (!min_val->is_known && info->induction_class == HInductionVarAnalysis::kWrapAround) {
- *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min */ true));
+ *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min= */ true));
}
return true;
}
@@ -445,8 +445,8 @@ bool InductionVarRange::IsConstant(HInductionVarAnalysis::InductionInfo* info,
}
// Try range analysis on the invariant, only accept a proper range
// to avoid arithmetic wrap-around anomalies.
- Value min_val = GetVal(info, nullptr, /* in_body */ true, /* is_min */ true);
- Value max_val = GetVal(info, nullptr, /* in_body */ true, /* is_min */ false);
+ Value min_val = GetVal(info, nullptr, /* in_body= */ true, /* is_min= */ true);
+ Value max_val = GetVal(info, nullptr, /* in_body= */ true, /* is_min= */ false);
if (IsConstantValue(min_val) &&
IsConstantValue(max_val) && min_val.b_constant <= max_val.b_constant) {
if ((request == kExact && min_val.b_constant == max_val.b_constant) || request == kAtMost) {
@@ -791,10 +791,10 @@ InductionVarRange::Value InductionVarRange::GetMul(HInductionVarAnalysis::Induct
return MulRangeAndConstant(value, info1, trip, in_body, is_min);
}
// Interval ranges.
- Value v1_min = GetVal(info1, trip, in_body, /* is_min */ true);
- Value v1_max = GetVal(info1, trip, in_body, /* is_min */ false);
- Value v2_min = GetVal(info2, trip, in_body, /* is_min */ true);
- Value v2_max = GetVal(info2, trip, in_body, /* is_min */ false);
+ Value v1_min = GetVal(info1, trip, in_body, /* is_min= */ true);
+ Value v1_max = GetVal(info1, trip, in_body, /* is_min= */ false);
+ Value v2_min = GetVal(info2, trip, in_body, /* is_min= */ true);
+ Value v2_max = GetVal(info2, trip, in_body, /* is_min= */ false);
// Positive range vs. positive or negative range.
if (IsConstantValue(v1_min) && v1_min.b_constant >= 0) {
if (IsConstantValue(v2_min) && v2_min.b_constant >= 0) {
@@ -825,10 +825,10 @@ InductionVarRange::Value InductionVarRange::GetDiv(HInductionVarAnalysis::Induct
return DivRangeAndConstant(value, info1, trip, in_body, is_min);
}
// Interval ranges.
- Value v1_min = GetVal(info1, trip, in_body, /* is_min */ true);
- Value v1_max = GetVal(info1, trip, in_body, /* is_min */ false);
- Value v2_min = GetVal(info2, trip, in_body, /* is_min */ true);
- Value v2_max = GetVal(info2, trip, in_body, /* is_min */ false);
+ Value v1_min = GetVal(info1, trip, in_body, /* is_min= */ true);
+ Value v1_max = GetVal(info1, trip, in_body, /* is_min= */ false);
+ Value v2_min = GetVal(info2, trip, in_body, /* is_min= */ true);
+ Value v2_max = GetVal(info2, trip, in_body, /* is_min= */ false);
// Positive range vs. positive or negative range.
if (IsConstantValue(v1_min) && v1_min.b_constant >= 0) {
if (IsConstantValue(v2_min) && v2_min.b_constant >= 0) {
@@ -1019,10 +1019,10 @@ bool InductionVarRange::GenerateRangeOrLastValue(HInstruction* context,
// Code generation for taken test: generate the code when requested or otherwise analyze
// if code generation is feasible when taken test is needed.
if (taken_test != nullptr) {
- return GenerateCode(trip->op_b, nullptr, graph, block, taken_test, in_body, /* is_min */ false);
+ return GenerateCode(trip->op_b, nullptr, graph, block, taken_test, in_body, /* is_min= */ false);
} else if (*needs_taken_test) {
if (!GenerateCode(
- trip->op_b, nullptr, nullptr, nullptr, nullptr, in_body, /* is_min */ false)) {
+ trip->op_b, nullptr, nullptr, nullptr, nullptr, in_body, /* is_min= */ false)) {
return false;
}
}
@@ -1030,9 +1030,9 @@ bool InductionVarRange::GenerateRangeOrLastValue(HInstruction* context,
return
// Success on lower if invariant (not set), or code can be generated.
((info->induction_class == HInductionVarAnalysis::kInvariant) ||
- GenerateCode(info, trip, graph, block, lower, in_body, /* is_min */ true)) &&
+ GenerateCode(info, trip, graph, block, lower, in_body, /* is_min= */ true)) &&
// And success on upper.
- GenerateCode(info, trip, graph, block, upper, in_body, /* is_min */ false);
+ GenerateCode(info, trip, graph, block, upper, in_body, /* is_min= */ false);
}
bool InductionVarRange::GenerateLastValuePolynomial(HInductionVarAnalysis::InductionInfo* info,
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index e5bc6ef22c..f6af384af0 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -252,24 +252,24 @@ class InductionVarRangeTest : public OptimizingUnitTest {
Value GetMin(HInductionVarAnalysis::InductionInfo* info,
HInductionVarAnalysis::InductionInfo* trip) {
- return range_.GetVal(info, trip, /* in_body */ true, /* is_min */ true);
+ return range_.GetVal(info, trip, /* in_body= */ true, /* is_min= */ true);
}
Value GetMax(HInductionVarAnalysis::InductionInfo* info,
HInductionVarAnalysis::InductionInfo* trip) {
- return range_.GetVal(info, trip, /* in_body */ true, /* is_min */ false);
+ return range_.GetVal(info, trip, /* in_body= */ true, /* is_min= */ false);
}
Value GetMul(HInductionVarAnalysis::InductionInfo* info1,
HInductionVarAnalysis::InductionInfo* info2,
bool is_min) {
- return range_.GetMul(info1, info2, nullptr, /* in_body */ true, is_min);
+ return range_.GetMul(info1, info2, nullptr, /* in_body= */ true, is_min);
}
Value GetDiv(HInductionVarAnalysis::InductionInfo* info1,
HInductionVarAnalysis::InductionInfo* info2,
bool is_min) {
- return range_.GetDiv(info1, info2, nullptr, /* in_body */ true, is_min);
+ return range_.GetDiv(info1, info2, nullptr, /* in_body= */ true, is_min);
}
Value GetRem(HInductionVarAnalysis::InductionInfo* info1,
@@ -701,7 +701,11 @@ TEST_F(InductionVarRangeTest, MaxValue) {
TEST_F(InductionVarRangeTest, ArrayLengthAndHints) {
// We pass a bogus constant for the class to avoid mocking one.
- HInstruction* new_array = new (GetAllocator()) HNewArray(x_, x_, 0);
+ HInstruction* new_array = new (GetAllocator()) HNewArray(
+ /* cls= */ x_,
+ /* length= */ x_,
+ /* dex_pc= */ 0,
+ /* component_size_shift= */ 0);
entry_block_->AddInstruction(new_array);
HInstruction* array_length = new (GetAllocator()) HArrayLength(new_array, 0);
entry_block_->AddInstruction(array_length);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 3ba741472e..96d6d2a1ae 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -18,6 +18,7 @@
#include "art_method-inl.h"
#include "base/enums.h"
+#include "base/logging.h"
#include "builder.h"
#include "class_linker.h"
#include "class_root.h"
@@ -27,7 +28,6 @@
#include "dex/inline_method_analyser.h"
#include "dex/verification_results.h"
#include "dex/verified_method.h"
-#include "driver/compiler_driver-inl.h"
#include "driver/compiler_options.h"
#include "driver/dex_compilation_unit.h"
#include "instruction_simplifier.h"
@@ -36,8 +36,9 @@
#include "jit/jit_code_cache.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache.h"
+#include "mirror/object_array-alloc-inl.h"
+#include "mirror/object_array-inl.h"
#include "nodes.h"
-#include "optimizing_compiler.h"
#include "reference_type_propagation.h"
#include "register_allocator_linear_scan.h"
#include "scoped_thread_state_change-inl.h"
@@ -149,13 +150,13 @@ bool HInliner::Run() {
// If we're compiling with a core image (which is only used for
// test purposes), honor inlining directives in method names:
- // - if a method's name contains the substring "$inline$", ensure
- // that this method is actually inlined;
// - if a method's name contains the substring "$noinline$", do not
- // inline that method.
+ // inline that method;
+ // - if a method's name contains the substring "$inline$", ensure
+ // that this method is actually inlined.
// We limit the latter to AOT compilation, as the JIT may or may not inline
// depending on the state of classes at runtime.
- const bool honor_noinline_directives = IsCompilingWithCoreImage();
+ const bool honor_noinline_directives = codegen_->GetCompilerOptions().CompilingWithCoreImage();
const bool honor_inline_directives =
honor_noinline_directives && Runtime::Current()->IsAotCompiler();
@@ -174,7 +175,7 @@ bool HInliner::Run() {
if (honor_noinline_directives) {
// Debugging case: directives in method names control or assert on inlining.
std::string callee_name = outer_compilation_unit_.GetDexFile()->PrettyMethod(
- call->GetDexMethodIndex(), /* with_signature */ false);
+ call->GetDexMethodIndex(), /* with_signature= */ false);
// Tests prevent inlining by having $noinline$ in their method names.
if (callee_name.find("$noinline$") == std::string::npos) {
if (TryInline(call)) {
@@ -406,7 +407,7 @@ ArtMethod* HInliner::TryCHADevirtualization(ArtMethod* resolved_method) {
return single_impl;
}
-static bool IsMethodUnverified(CompilerDriver* const compiler_driver, ArtMethod* method)
+static bool IsMethodUnverified(const CompilerOptions& compiler_options, ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (!method->GetDeclaringClass()->IsVerified()) {
if (Runtime::Current()->UseJitCompilation()) {
@@ -415,8 +416,9 @@ static bool IsMethodUnverified(CompilerDriver* const compiler_driver, ArtMethod*
return true;
}
uint16_t class_def_idx = method->GetDeclaringClass()->GetDexClassDefIndex();
- if (!compiler_driver->IsMethodVerifiedWithoutFailures(
- method->GetDexMethodIndex(), class_def_idx, *method->GetDexFile())) {
+ if (!compiler_options.IsMethodVerifiedWithoutFailures(method->GetDexMethodIndex(),
+ class_def_idx,
+ *method->GetDexFile())) {
// Method has soft or hard failures, don't analyze.
return true;
}
@@ -424,11 +426,11 @@ static bool IsMethodUnverified(CompilerDriver* const compiler_driver, ArtMethod*
return false;
}
-static bool AlwaysThrows(CompilerDriver* const compiler_driver, ArtMethod* method)
+static bool AlwaysThrows(const CompilerOptions& compiler_options, ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(method != nullptr);
// Skip non-compilable and unverified methods.
- if (!method->IsCompilable() || IsMethodUnverified(compiler_driver, method)) {
+ if (!method->IsCompilable() || IsMethodUnverified(compiler_options, method)) {
return false;
}
// Skip native methods, methods with try blocks, and methods that are too large.
@@ -502,7 +504,7 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) {
bool result = TryInlineAndReplace(invoke_instruction,
actual_method,
ReferenceTypeInfo::CreateInvalid(),
- /* do_rtp */ true,
+ /* do_rtp= */ true,
cha_devirtualize);
if (result) {
// Successfully inlined.
@@ -516,7 +518,7 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) {
MaybeRecordStat(stats_, MethodCompilationStat::kInlinedInvokeVirtualOrInterface);
}
}
- } else if (!cha_devirtualize && AlwaysThrows(compiler_driver_, actual_method)) {
+ } else if (!cha_devirtualize && AlwaysThrows(codegen_->GetCompilerOptions(), actual_method)) {
// Set always throws property for non-inlined method call with single target
// (unless it was obtained through CHA, because that would imply we have
// to add the CHA dependency, which seems not worth it).
@@ -678,7 +680,7 @@ HInliner::InlineCacheType HInliner::GetInlineCacheAOT(
/*out*/Handle<mirror::ObjectArray<mirror::Class>>* inline_cache)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(Runtime::Current()->IsAotCompiler());
- const ProfileCompilationInfo* pci = compiler_driver_->GetProfileCompilationInfo();
+ const ProfileCompilationInfo* pci = codegen_->GetCompilerOptions().GetProfileCompilationInfo();
if (pci == nullptr) {
return kInlineCacheNoData;
}
@@ -856,9 +858,9 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
if (!TryInlineAndReplace(invoke_instruction,
resolved_method,
- ReferenceTypeInfo::Create(monomorphic_type, /* is_exact */ true),
- /* do_rtp */ false,
- /* cha_devirtualize */ false)) {
+ ReferenceTypeInfo::Create(monomorphic_type, /* is_exact= */ true),
+ /* do_rtp= */ false,
+ /* cha_devirtualize= */ false)) {
return false;
}
@@ -869,7 +871,7 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
class_index,
monomorphic_type,
invoke_instruction,
- /* with_deoptimization */ true);
+ /* with_deoptimization= */ true);
// Run type propagation to get the guard typed, and eventually propagate the
// type of the receiver.
@@ -877,7 +879,7 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
- /* is_first_run */ false);
+ /* is_first_run= */ false);
rtp_fixup.Run();
MaybeRecordStat(stats_, MethodCompilationStat::kInlinedMonomorphicCall);
@@ -947,7 +949,7 @@ HInstruction* HInliner::AddTypeGuard(HInstruction* receiver,
klass,
is_referrer,
invoke_instruction->GetDexPc(),
- /* needs_access_check */ false);
+ /* needs_access_check= */ false);
HLoadClass::LoadKind kind = HSharpening::ComputeLoadClassKind(
load_class, codegen_, caller_compilation_unit_);
DCHECK(kind != HLoadClass::LoadKind::kInvalid)
@@ -1025,7 +1027,7 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
if (!class_index.IsValid() ||
!TryBuildAndInline(invoke_instruction,
method,
- ReferenceTypeInfo::Create(handle, /* is_exact */ true),
+ ReferenceTypeInfo::Create(handle, /* is_exact= */ true),
&return_replacement)) {
all_targets_inlined = false;
} else {
@@ -1077,7 +1079,7 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
- /* is_first_run */ false);
+ /* is_first_run= */ false);
rtp_fixup.Run();
return true;
}
@@ -1148,14 +1150,14 @@ void HInliner::CreateDiamondPatternForPolymorphicInline(HInstruction* compare,
graph_->UpdateLoopAndTryInformationOfNewBlock(
- then, original_invoke_block, /* replace_if_back_edge */ false);
+ then, original_invoke_block, /* replace_if_back_edge= */ false);
graph_->UpdateLoopAndTryInformationOfNewBlock(
- otherwise, original_invoke_block, /* replace_if_back_edge */ false);
+ otherwise, original_invoke_block, /* replace_if_back_edge= */ false);
// In case the original invoke location was a back edge, we need to update
// the loop to now have the merge block as a back edge.
graph_->UpdateLoopAndTryInformationOfNewBlock(
- merge, original_invoke_block, /* replace_if_back_edge */ true);
+ merge, original_invoke_block, /* replace_if_back_edge= */ true);
}
bool HInliner::TryInlinePolymorphicCallToSameTarget(
@@ -1273,7 +1275,7 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(
outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
- /* is_first_run */ false);
+ /* is_first_run= */ false);
rtp_fixup.Run();
MaybeRecordStat(stats_, MethodCompilationStat::kInlinedPolymorphicCall);
@@ -1296,9 +1298,7 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction,
// If invoke_instruction is devirtualized to a different method, give intrinsics
// another chance before we try to inline it.
- bool wrong_invoke_type = false;
- if (invoke_instruction->GetResolvedMethod() != method &&
- IntrinsicsRecognizer::Recognize(invoke_instruction, method, &wrong_invoke_type)) {
+ if (invoke_instruction->GetResolvedMethod() != method && method->IsIntrinsic()) {
MaybeRecordStat(stats_, MethodCompilationStat::kIntrinsicRecognized);
if (invoke_instruction->IsInvokeInterface()) {
// We don't intrinsify an invoke-interface directly.
@@ -1311,6 +1311,7 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction,
invoke_instruction->GetDexMethodIndex(), // Use interface method's dex method index.
method,
method->GetMethodIndex());
+ DCHECK_NE(new_invoke->GetIntrinsic(), Intrinsics::kNone);
HInputsRef inputs = invoke_instruction->GetInputs();
for (size_t index = 0; index != inputs.size(); ++index) {
new_invoke->SetArgumentAt(index, inputs[index]);
@@ -1320,14 +1321,11 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction,
if (invoke_instruction->GetType() == DataType::Type::kReference) {
new_invoke->SetReferenceTypeInfo(invoke_instruction->GetReferenceTypeInfo());
}
- // Run intrinsic recognizer again to set new_invoke's intrinsic.
- IntrinsicsRecognizer::Recognize(new_invoke, method, &wrong_invoke_type);
- DCHECK_NE(new_invoke->GetIntrinsic(), Intrinsics::kNone);
return_replacement = new_invoke;
// invoke_instruction is replaced with new_invoke.
should_remove_invoke_instruction = true;
} else {
- // invoke_instruction is intrinsified and stays.
+ invoke_instruction->SetResolvedMethod(method);
}
} else if (!TryBuildAndInline(invoke_instruction, method, receiver_type, &return_replacement)) {
if (invoke_instruction->IsInvokeInterface()) {
@@ -1401,7 +1399,7 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction,
outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
- /* is_first_run */ false).Run();
+ /* is_first_run= */ false).Run();
}
return true;
}
@@ -1421,10 +1419,6 @@ size_t HInliner::CountRecursiveCallsOf(ArtMethod* method) const {
static inline bool MayInline(const CompilerOptions& compiler_options,
const DexFile& inlined_from,
const DexFile& inlined_into) {
- if (kIsTargetBuild) {
- return true;
- }
-
// We're not allowed to inline across dex files if we're the no-inline-from dex file.
if (!IsSameDexFile(inlined_from, inlined_into) &&
ContainsElement(compiler_options.GetNoInlineFromDexFile(), &inlined_from)) {
@@ -1506,7 +1500,7 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
return false;
}
- if (IsMethodUnverified(compiler_driver_, method)) {
+ if (IsMethodUnverified(codegen_->GetCompilerOptions(), method)) {
LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedNotVerified)
<< "Method " << method->PrettyMethod()
<< " couldn't be verified, so it cannot be inlined";
@@ -1631,7 +1625,8 @@ bool HInliner::TryPatternSubstitution(HInvoke* invoke_instruction,
[](uint16_t index) { return index != DexFile::kDexNoIndex16; }));
// Create HInstanceFieldSet for each IPUT that stores non-zero data.
- HInstruction* obj = GetInvokeInputForArgVRegIndex(invoke_instruction, /* this */ 0u);
+ HInstruction* obj = GetInvokeInputForArgVRegIndex(invoke_instruction,
+ /* arg_vreg_index= */ 0u);
bool needs_constructor_barrier = false;
for (size_t i = 0; i != number_of_iputs; ++i) {
HInstruction* value = GetInvokeInputForArgVRegIndex(invoke_instruction, iput_args[i]);
@@ -1649,7 +1644,7 @@ bool HInliner::TryPatternSubstitution(HInvoke* invoke_instruction,
}
}
if (needs_constructor_barrier) {
- // See CompilerDriver::RequiresConstructorBarrier for more details.
+ // See DexCompilationUnit::RequiresConstructorBarrier for more details.
DCHECK(obj != nullptr) << "only non-static methods can have a constructor fence";
HConstructorFence* constructor_fence =
@@ -1673,7 +1668,7 @@ HInstanceFieldGet* HInliner::CreateInstanceFieldGet(uint32_t field_index,
REQUIRES_SHARED(Locks::mutator_lock_) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ArtField* resolved_field =
- class_linker->LookupResolvedField(field_index, referrer, /* is_static */ false);
+ class_linker->LookupResolvedField(field_index, referrer, /* is_static= */ false);
DCHECK(resolved_field != nullptr);
HInstanceFieldGet* iget = new (graph_->GetAllocator()) HInstanceFieldGet(
obj,
@@ -1686,7 +1681,7 @@ HInstanceFieldGet* HInliner::CreateInstanceFieldGet(uint32_t field_index,
*referrer->GetDexFile(),
// Read barrier generates a runtime call in slow path and we need a valid
// dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537.
- /* dex_pc */ 0);
+ /* dex_pc= */ 0);
if (iget->GetType() == DataType::Type::kReference) {
// Use the same dex_cache that we used for field lookup as the hint_dex_cache.
Handle<mirror::DexCache> dex_cache = handles_->NewHandle(referrer->GetDexCache());
@@ -1694,7 +1689,7 @@ HInstanceFieldGet* HInliner::CreateInstanceFieldGet(uint32_t field_index,
outer_compilation_unit_.GetClassLoader(),
dex_cache,
handles_,
- /* is_first_run */ false);
+ /* is_first_run= */ false);
rtp.Visit(iget);
}
return iget;
@@ -1708,7 +1703,7 @@ HInstanceFieldSet* HInliner::CreateInstanceFieldSet(uint32_t field_index,
REQUIRES_SHARED(Locks::mutator_lock_) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ArtField* resolved_field =
- class_linker->LookupResolvedField(field_index, referrer, /* is_static */ false);
+ class_linker->LookupResolvedField(field_index, referrer, /* is_static= */ false);
DCHECK(resolved_field != nullptr);
if (is_final != nullptr) {
// This information is needed only for constructors.
@@ -1727,7 +1722,7 @@ HInstanceFieldSet* HInliner::CreateInstanceFieldSet(uint32_t field_index,
*referrer->GetDexFile(),
// Read barrier generates a runtime call in slow path and we need a valid
// dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537.
- /* dex_pc */ 0);
+ /* dex_pc= */ 0);
return iput;
}
@@ -1739,6 +1734,21 @@ static inline Handle<T> NewHandleIfDifferent(T* object,
return (object != hint.Get()) ? handles->NewHandle(object) : hint;
}
+static bool CanEncodeInlinedMethodInStackMap(const DexFile& caller_dex_file, ArtMethod* callee)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!Runtime::Current()->IsAotCompiler()) {
+ // JIT can always encode methods in stack maps.
+ return true;
+ }
+ if (IsSameDexFile(caller_dex_file, *callee->GetDexFile())) {
+ return true;
+ }
+ // TODO(ngeoffray): Support more AOT cases for inlining:
+ // - methods in multidex
+ // - methods in boot image for on-device non-PIC compilation.
+ return false;
+}
+
bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
ReferenceTypeInfo receiver_type,
@@ -1746,7 +1756,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
HInstruction** return_replacement) {
DCHECK(!(resolved_method->IsStatic() && receiver_type.IsValid()));
ScopedObjectAccess soa(Thread::Current());
- const DexFile::CodeItem* code_item = resolved_method->GetCodeItem();
+ const dex::CodeItem* code_item = resolved_method->GetCodeItem();
const DexFile& callee_dex_file = *resolved_method->GetDexFile();
uint32_t method_index = resolved_method->GetDexMethodIndex();
CodeItemDebugInfoAccessor code_item_accessor(resolved_method->DexInstructionDebugInfo());
@@ -1759,6 +1769,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
caller_compilation_unit_.GetClassLoader(),
handles_);
+ Handle<mirror::Class> compiling_class = handles_->NewHandle(resolved_method->GetDeclaringClass());
DexCompilationUnit dex_compilation_unit(
class_loader,
class_linker,
@@ -1767,8 +1778,9 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
resolved_method->GetDeclaringClass()->GetDexClassDefIndex(),
method_index,
resolved_method->GetAccessFlags(),
- /* verified_method */ nullptr,
- dex_cache);
+ /* verified_method= */ nullptr,
+ dex_cache,
+ compiling_class);
InvokeType invoke_type = invoke_instruction->GetInvokeType();
if (invoke_type == kInterface) {
@@ -1777,6 +1789,14 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
invoke_type = kVirtual;
}
+ bool caller_dead_reference_safe = graph_->IsDeadReferenceSafe();
+ const dex::ClassDef& callee_class = resolved_method->GetClassDef();
+ // MethodContainsRSensitiveAccess is currently slow, but HasDeadReferenceSafeAnnotation()
+ // is currently rarely true.
+ bool callee_dead_reference_safe =
+ annotations::HasDeadReferenceSafeAnnotation(callee_dex_file, callee_class)
+ && !annotations::MethodContainsRSensitiveAccess(callee_dex_file, callee_class, method_index);
+
const int32_t caller_instruction_counter = graph_->GetCurrentInstructionId();
HGraph* callee_graph = new (graph_->GetAllocator()) HGraph(
graph_->GetAllocator(),
@@ -1785,8 +1805,9 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
method_index,
codegen_->GetCompilerOptions().GetInstructionSet(),
invoke_type,
+ callee_dead_reference_safe,
graph_->IsDebuggable(),
- /* osr */ false,
+ /* osr= */ false,
caller_instruction_counter);
callee_graph->SetArtMethod(resolved_method);
@@ -1807,7 +1828,6 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
code_item_accessor,
&dex_compilation_unit,
&outer_compilation_unit_,
- compiler_driver_,
codegen_,
inline_stats_,
resolved_method->GetQuickenedInfo(),
@@ -1868,7 +1888,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
outer_compilation_unit_.GetClassLoader(),
dex_compilation_unit.GetDexCache(),
handles_,
- /* is_first_run */ false).Run();
+ /* is_first_run= */ false).Run();
}
RunOptimizations(callee_graph, code_item, dex_compilation_unit);
@@ -2012,23 +2032,26 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
inline_stats_->AddTo(stats_);
}
+ if (caller_dead_reference_safe && !callee_dead_reference_safe) {
+ // Caller was dead reference safe, but is not anymore, since we inlined dead
+ // reference unsafe code. Prior transformations remain valid, since they did not
+ // affect the inlined code.
+ graph_->MarkDeadReferenceUnsafe();
+ }
+
return true;
}
void HInliner::RunOptimizations(HGraph* callee_graph,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
const DexCompilationUnit& dex_compilation_unit) {
// Note: if the outermost_graph_ is being compiled OSR, we should not run any
// optimization that could lead to a HDeoptimize. The following optimizations do not.
HDeadCodeElimination dce(callee_graph, inline_stats_, "dead_code_elimination$inliner");
HConstantFolding fold(callee_graph, "constant_folding$inliner");
- HSharpening sharpening(callee_graph, codegen_);
InstructionSimplifier simplify(callee_graph, codegen_, inline_stats_);
- IntrinsicsRecognizer intrinsics(callee_graph, inline_stats_);
HOptimization* optimizations[] = {
- &intrinsics,
- &sharpening,
&simplify,
&fold,
&dce,
@@ -2063,7 +2086,6 @@ void HInliner::RunOptimizations(HGraph* callee_graph,
codegen_,
outer_compilation_unit_,
dex_compilation_unit,
- compiler_driver_,
handles_,
inline_stats_,
total_number_of_dex_registers_ + accessor.RegistersSize(),
@@ -2097,7 +2119,7 @@ bool HInliner::ArgumentTypesMoreSpecific(HInvoke* invoke_instruction, ArtMethod*
// is more specific than the class which declares the method.
if (!resolved_method->IsStatic()) {
if (IsReferenceTypeRefinement(GetClassRTI(resolved_method->GetDeclaringClass()),
- /* declared_can_be_null */ false,
+ /* declared_can_be_null= */ false,
invoke_instruction->InputAt(0u))) {
return true;
}
@@ -2106,7 +2128,7 @@ bool HInliner::ArgumentTypesMoreSpecific(HInvoke* invoke_instruction, ArtMethod*
// Iterate over the list of parameter types and test whether any of the
// actual inputs has a more specific reference type than the type declared in
// the signature.
- const DexFile::TypeList* param_list = resolved_method->GetParameterTypeList();
+ const dex::TypeList* param_list = resolved_method->GetParameterTypeList();
for (size_t param_idx = 0,
input_idx = resolved_method->IsStatic() ? 0 : 1,
e = (param_list == nullptr ? 0 : param_list->Size());
@@ -2117,7 +2139,7 @@ bool HInliner::ArgumentTypesMoreSpecific(HInvoke* invoke_instruction, ArtMethod*
ObjPtr<mirror::Class> param_cls = resolved_method->LookupResolvedClassFromTypeIndex(
param_list->GetTypeItem(param_idx).type_idx_);
if (IsReferenceTypeRefinement(GetClassRTI(param_cls),
- /* declared_can_be_null */ true,
+ /* declared_can_be_null= */ true,
input)) {
return true;
}
@@ -2134,7 +2156,7 @@ bool HInliner::ReturnTypeMoreSpecific(HInvoke* invoke_instruction,
if (return_replacement->GetType() == DataType::Type::kReference) {
// Test if the return type is a refinement of the declared return type.
if (IsReferenceTypeRefinement(invoke_instruction->GetReferenceTypeInfo(),
- /* declared_can_be_null */ true,
+ /* declared_can_be_null= */ true,
return_replacement)) {
return true;
} else if (return_replacement->IsInstanceFieldGet()) {
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 6fd0c204b2..efd4c74079 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -38,7 +38,6 @@ class HInliner : public HOptimization {
CodeGenerator* codegen,
const DexCompilationUnit& outer_compilation_unit,
const DexCompilationUnit& caller_compilation_unit,
- CompilerDriver* compiler_driver,
VariableSizedHandleScope* handles,
OptimizingCompilerStats* stats,
size_t total_number_of_dex_registers,
@@ -51,7 +50,6 @@ class HInliner : public HOptimization {
outer_compilation_unit_(outer_compilation_unit),
caller_compilation_unit_(caller_compilation_unit),
codegen_(codegen),
- compiler_driver_(compiler_driver),
total_number_of_dex_registers_(total_number_of_dex_registers),
total_number_of_instructions_(total_number_of_instructions),
parent_(parent),
@@ -101,7 +99,7 @@ class HInliner : public HOptimization {
// Run simple optimizations on `callee_graph`.
void RunOptimizations(HGraph* callee_graph,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
const DexCompilationUnit& dex_compilation_unit)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -280,7 +278,6 @@ class HInliner : public HOptimization {
const DexCompilationUnit& outer_compilation_unit_;
const DexCompilationUnit& caller_compilation_unit_;
CodeGenerator* const codegen_;
- CompilerDriver* const compiler_driver_;
const size_t total_number_of_dex_registers_;
size_t total_number_of_instructions_;
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index e555d0d890..5e7b57523f 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -19,12 +19,13 @@
#include "art_method-inl.h"
#include "base/arena_bit_vector.h"
#include "base/bit_vector-inl.h"
+#include "base/logging.h"
#include "block_builder.h"
-#include "class_linker.h"
+#include "class_linker-inl.h"
+#include "code_generator.h"
#include "data_type-inl.h"
#include "dex/bytecode_utils.h"
#include "dex/dex_instruction-inl.h"
-#include "driver/compiler_driver-inl.h"
#include "driver/dex_compilation_unit.h"
#include "driver/compiler_options.h"
#include "imtable-inl.h"
@@ -47,7 +48,6 @@ HInstructionBuilder::HInstructionBuilder(HGraph* graph,
DataType::Type return_type,
const DexCompilationUnit* dex_compilation_unit,
const DexCompilationUnit* outer_compilation_unit,
- CompilerDriver* compiler_driver,
CodeGenerator* code_generator,
ArrayRef<const uint8_t> interpreter_metadata,
OptimizingCompilerStats* compiler_stats,
@@ -61,7 +61,6 @@ HInstructionBuilder::HInstructionBuilder(HGraph* graph,
return_type_(return_type),
block_builder_(block_builder),
ssa_builder_(ssa_builder),
- compiler_driver_(compiler_driver),
code_generator_(code_generator),
dex_compilation_unit_(dex_compilation_unit),
outer_compilation_unit_(outer_compilation_unit),
@@ -73,7 +72,8 @@ HInstructionBuilder::HInstructionBuilder(HGraph* graph,
current_locals_(nullptr),
latest_result_(nullptr),
current_this_parameter_(nullptr),
- loop_headers_(local_allocator->Adapter(kArenaAllocGraphBuilder)) {
+ loop_headers_(local_allocator->Adapter(kArenaAllocGraphBuilder)),
+ class_cache_(std::less<dex::TypeIndex>(), local_allocator->Adapter(kArenaAllocGraphBuilder)) {
loop_headers_.reserve(kDefaultNumberOfLoops);
}
@@ -319,8 +319,8 @@ bool HInstructionBuilder::Build() {
// Find locations where we want to generate extra stackmaps for native debugging.
// This allows us to generate the info only at interesting points (for example,
// at start of java statement) rather than before every dex instruction.
- const bool native_debuggable = compiler_driver_ != nullptr &&
- compiler_driver_->GetCompilerOptions().GetNativeDebuggable();
+ const bool native_debuggable = code_generator_ != nullptr &&
+ code_generator_->GetCompilerOptions().GetNativeDebuggable();
ArenaBitVector* native_debug_info_locations = nullptr;
if (native_debuggable) {
native_debug_info_locations = FindNativeDebugInfoLocations();
@@ -434,7 +434,7 @@ void HInstructionBuilder::BuildIntrinsic(ArtMethod* method) {
HInvokeStaticOrDirect::DispatchInfo dispatch_info = {
HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall,
HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
- /* method_load_data */ 0u
+ /* method_load_data= */ 0u
};
InvokeType invoke_type = dex_compilation_unit_->IsStatic() ? kStatic : kDirect;
HInvokeStaticOrDirect* invoke = new (allocator_) HInvokeStaticOrDirect(
@@ -449,7 +449,7 @@ void HInstructionBuilder::BuildIntrinsic(ArtMethod* method) {
target_method,
HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
RangeInstructionOperands operands(graph_->GetNumberOfVRegs() - in_vregs, in_vregs);
- HandleInvoke(invoke, operands, dex_file_->GetMethodShorty(method_idx), /* is_unresolved */ false);
+ HandleInvoke(invoke, operands, dex_file_->GetMethodShorty(method_idx), /* is_unresolved= */ false);
// Add the return instruction.
if (return_type_ == DataType::Type::kVoid) {
@@ -466,22 +466,17 @@ void HInstructionBuilder::BuildIntrinsic(ArtMethod* method) {
}
ArenaBitVector* HInstructionBuilder::FindNativeDebugInfoLocations() {
- // The callback gets called when the line number changes.
- // In other words, it marks the start of new java statement.
- struct Callback {
- static bool Position(void* ctx, const DexFile::PositionInfo& entry) {
- static_cast<ArenaBitVector*>(ctx)->SetBit(entry.address_);
- return false;
- }
- };
ArenaBitVector* locations = ArenaBitVector::Create(local_allocator_,
code_item_accessor_.InsnsSizeInCodeUnits(),
- /* expandable */ false,
+ /* expandable= */ false,
kArenaAllocGraphBuilder);
locations->ClearAllBits();
- dex_file_->DecodeDebugPositionInfo(code_item_accessor_.DebugInfoOffset(),
- Callback::Position,
- locations);
+ // The visitor gets called when the line number changes.
+ // In other words, it marks the start of new java statement.
+ code_item_accessor_.DecodeDebugPositionInfo([&](const DexFile::PositionInfo& entry) {
+ locations->SetBit(entry.address_);
+ return false;
+ });
// Instruction-specific tweaks.
for (const DexInstructionPcPair& inst : code_item_accessor_) {
switch (inst->Opcode()) {
@@ -564,7 +559,7 @@ void HInstructionBuilder::InitializeParameters() {
uint16_t locals_index = graph_->GetNumberOfLocalVRegs();
uint16_t parameter_index = 0;
- const DexFile::MethodId& referrer_method_id =
+ const dex::MethodId& referrer_method_id =
dex_file_->GetMethodId(dex_compilation_unit_->GetDexMethodIndex());
if (!dex_compilation_unit_->IsStatic()) {
// Add the implicit 'this' argument, not expressed in the signature.
@@ -572,7 +567,7 @@ void HInstructionBuilder::InitializeParameters() {
referrer_method_id.class_idx_,
parameter_index++,
DataType::Type::kReference,
- /* is_this */ true);
+ /* is_this= */ true);
AppendInstruction(parameter);
UpdateLocal(locals_index++, parameter);
number_of_parameters--;
@@ -581,15 +576,15 @@ void HInstructionBuilder::InitializeParameters() {
DCHECK(current_this_parameter_ == nullptr);
}
- const DexFile::ProtoId& proto = dex_file_->GetMethodPrototype(referrer_method_id);
- const DexFile::TypeList* arg_types = dex_file_->GetProtoParameters(proto);
+ const dex::ProtoId& proto = dex_file_->GetMethodPrototype(referrer_method_id);
+ const dex::TypeList* arg_types = dex_file_->GetProtoParameters(proto);
for (int i = 0, shorty_pos = 1; i < number_of_parameters; i++) {
HParameterValue* parameter = new (allocator_) HParameterValue(
*dex_file_,
arg_types->GetTypeItem(shorty_pos - 1).type_idx_,
parameter_index++,
DataType::FromShorty(shorty[shorty_pos]),
- /* is_this */ false);
+ /* is_this= */ false);
++shorty_pos;
AppendInstruction(parameter);
// Store the parameter value in the local that the dex code will use
@@ -714,20 +709,18 @@ void HInstructionBuilder::Binop_22b(const Instruction& instruction, bool reverse
// Does the method being compiled need any constructor barriers being inserted?
// (Always 'false' for methods that aren't <init>.)
-static bool RequiresConstructorBarrier(const DexCompilationUnit* cu, CompilerDriver* driver) {
+static bool RequiresConstructorBarrier(const DexCompilationUnit* cu) {
// Can be null in unit tests only.
if (UNLIKELY(cu == nullptr)) {
return false;
}
- Thread* self = Thread::Current();
- return cu->IsConstructor()
- && !cu->IsStatic()
- // RequiresConstructorBarrier must only be queried for <init> methods;
- // it's effectively "false" for every other method.
- //
- // See CompilerDriver::RequiresConstructBarrier for more explanation.
- && driver->RequiresConstructorBarrier(self, cu->GetDexFile(), cu->GetClassDefIndex());
+ // Constructor barriers are applicable only for <init> methods.
+ if (LIKELY(!cu->IsConstructor() || cu->IsStatic())) {
+ return false;
+ }
+
+ return cu->RequiresConstructorBarrier();
}
// Returns true if `block` has only one successor which starts at the next
@@ -773,7 +766,7 @@ void HInstructionBuilder::BuildReturn(const Instruction& instruction,
// Only <init> (which is a return-void) could possibly have a constructor fence.
// This may insert additional redundant constructor fences from the super constructors.
// TODO: remove redundant constructor fences (b/36656456).
- if (RequiresConstructorBarrier(dex_compilation_unit_, compiler_driver_)) {
+ if (RequiresConstructorBarrier(dex_compilation_unit_)) {
// Compiling instance constructor.
DCHECK_STREQ("<init>", graph_->GetMethodName());
@@ -787,7 +780,7 @@ void HInstructionBuilder::BuildReturn(const Instruction& instruction,
}
AppendInstruction(new (allocator_) HReturnVoid(dex_pc));
} else {
- DCHECK(!RequiresConstructorBarrier(dex_compilation_unit_, compiler_driver_));
+ DCHECK(!RequiresConstructorBarrier(dex_compilation_unit_));
HInstruction* value = LoadLocal(instruction.VRegA(), type);
AppendInstruction(new (allocator_) HReturn(value, dex_pc));
}
@@ -854,7 +847,7 @@ ArtMethod* HInstructionBuilder::ResolveMethod(uint16_t method_idx, InvokeType in
// make this an invoke-unresolved to handle cross-dex invokes or abstract super methods, both of
// which require runtime handling.
if (invoke_type == kSuper) {
- ObjPtr<mirror::Class> compiling_class = ResolveCompilingClass(soa);
+ ObjPtr<mirror::Class> compiling_class = dex_compilation_unit_->GetCompilingClass().Get();
if (compiling_class == nullptr) {
// We could not determine the method's class we need to wait until runtime.
DCHECK(Runtime::Current()->IsAotCompiler());
@@ -884,8 +877,8 @@ ArtMethod* HInstructionBuilder::ResolveMethod(uint16_t method_idx, InvokeType in
// The back-end code generator relies on this check in order to ensure that it will not
// attempt to read the dex_cache with a dex_method_index that is not from the correct
// dex_file. If we didn't do this check then the dex_method_index will not be updated in the
- // builder, which means that the code-generator (and compiler driver during sharpening and
- // inliner, maybe) might invoke an incorrect method.
+ // builder, which means that the code-generator (and sharpening and inliner, maybe)
+ // might invoke an incorrect method.
// TODO: The actual method could still be referenced in the current dex file, so we
// could try locating it.
// TODO: Remove the dex_file restriction.
@@ -933,7 +926,7 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
dex_pc,
method_idx,
invoke_type);
- return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ true);
+ return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ true);
}
// Replace calls to String.<init> with StringFactory.
@@ -952,10 +945,10 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
HInvoke* invoke = new (allocator_) HInvokeStaticOrDirect(
allocator_,
number_of_arguments - 1,
- DataType::Type::kReference /*return_type */,
+ /* return_type= */ DataType::Type::kReference,
dex_pc,
method_idx,
- nullptr /* resolved_method */,
+ /* resolved_method= */ nullptr,
dispatch_info,
invoke_type,
target_method,
@@ -974,7 +967,7 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
ScopedObjectAccess soa(Thread::Current());
if (invoke_type == kStatic) {
clinit_check =
- ProcessClinitCheckForInvoke(soa, dex_pc, resolved_method, &clinit_check_requirement);
+ ProcessClinitCheckForInvoke(dex_pc, resolved_method, &clinit_check_requirement);
} else if (invoke_type == kSuper) {
if (IsSameDexFile(*resolved_method->GetDexFile(), *dex_compilation_unit_->GetDexFile())) {
// Update the method index to the one resolved. Note that this may be a no-op if
@@ -983,11 +976,8 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
}
}
- HInvokeStaticOrDirect::DispatchInfo dispatch_info = {
- HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall,
- HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
- 0u
- };
+ HInvokeStaticOrDirect::DispatchInfo dispatch_info =
+ HSharpening::SharpenInvokeStaticOrDirect(resolved_method, code_generator_);
MethodReference target_method(resolved_method->GetDexFile(),
resolved_method->GetDexMethodIndex());
invoke = new (allocator_) HInvokeStaticOrDirect(allocator_,
@@ -1020,7 +1010,7 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
resolved_method,
ImTable::GetImtIndex(resolved_method));
}
- return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ false, clinit_check);
+ return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ false, clinit_check);
}
bool HInstructionBuilder::BuildInvokePolymorphic(uint32_t dex_pc,
@@ -1036,7 +1026,7 @@ bool HInstructionBuilder::BuildInvokePolymorphic(uint32_t dex_pc,
return_type,
dex_pc,
method_idx);
- return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ false);
+ return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ false);
}
@@ -1052,7 +1042,7 @@ bool HInstructionBuilder::BuildInvokeCustom(uint32_t dex_pc,
call_site_idx,
return_type,
dex_pc);
- return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ false);
+ return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ false);
}
HNewInstance* HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, uint32_t dex_pc) {
@@ -1063,7 +1053,7 @@ HNewInstance* HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, u
HInstruction* cls = load_class;
Handle<mirror::Class> klass = load_class->GetClass();
- if (!IsInitialized(soa, klass)) {
+ if (!IsInitialized(klass)) {
cls = new (allocator_) HClinitCheck(load_class, dex_pc);
AppendInstruction(cls);
}
@@ -1292,7 +1282,7 @@ static bool HasTrivialInitialization(ObjPtr<mirror::Class> cls,
return true;
}
-bool HInstructionBuilder::IsInitialized(ScopedObjectAccess& soa, Handle<mirror::Class> cls) const {
+bool HInstructionBuilder::IsInitialized(Handle<mirror::Class> cls) const {
if (cls == nullptr) {
return false;
}
@@ -1307,37 +1297,33 @@ bool HInstructionBuilder::IsInitialized(ScopedObjectAccess& soa, Handle<mirror::
}
// Assume loaded only if klass is in the boot image. App classes cannot be assumed
// loaded because we don't even know what class loader will be used to load them.
- if (IsInBootImage(cls.Get(), compiler_driver_->GetCompilerOptions())) {
+ if (IsInBootImage(cls.Get(), code_generator_->GetCompilerOptions())) {
return true;
}
}
- // We can avoid the class initialization check for `cls` in static methods in the
- // very same class. Instance methods of the same class can run on an escaped instance
+ // We can avoid the class initialization check for `cls` in static methods and constructors
+ // in the very same class; invoking a static method involves a class initialization check
+ // and so does the instance allocation that must be executed before invoking a constructor.
+ // Other instance methods of the same class can run on an escaped instance
// of an erroneous class. Even a superclass may need to be checked as the subclass
// can be completely initialized while the superclass is initializing and the subclass
// remains initialized when the superclass initializer throws afterwards. b/62478025
// Note: The HClinitCheck+HInvokeStaticOrDirect merging can still apply.
- ObjPtr<mirror::Class> outermost_cls = ResolveOutermostCompilingClass(soa);
- bool is_static = (dex_compilation_unit_->GetAccessFlags() & kAccStatic) != 0u;
- if (is_static && outermost_cls == cls.Get()) {
+ auto is_static_method_or_constructor_of_cls = [cls](const DexCompilationUnit& compilation_unit)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return (compilation_unit.GetAccessFlags() & (kAccStatic | kAccConstructor)) != 0u &&
+ compilation_unit.GetCompilingClass().Get() == cls.Get();
+ };
+ if (is_static_method_or_constructor_of_cls(*outer_compilation_unit_) ||
+ // Check also the innermost method. Though excessive copies of ClinitCheck can be
+ // eliminated by GVN, that happens only after the decision whether to inline the
+ // graph or not and that may depend on the presence of the ClinitCheck.
+ // TODO: We should walk over the entire inlined method chain, but we don't pass that
+ // information to the builder.
+ is_static_method_or_constructor_of_cls(*dex_compilation_unit_)) {
return true;
}
- // Remember if the compiled class is a subclass of `cls`. By the time this is used
- // below the `outermost_cls` may be invalidated by calling ResolveCompilingClass().
- bool is_subclass = IsSubClass(outermost_cls, cls.Get());
- if (dex_compilation_unit_ != outer_compilation_unit_) {
- // Check also the innermost method. Though excessive copies of ClinitCheck can be
- // eliminated by GVN, that happens only after the decision whether to inline the
- // graph or not and that may depend on the presence of the ClinitCheck.
- // TODO: We should walk over the entire inlined method chain, but we don't pass that
- // information to the builder.
- ObjPtr<mirror::Class> innermost_cls = ResolveCompilingClass(soa);
- if (is_static && innermost_cls == cls.Get()) {
- return true;
- }
- is_subclass = is_subclass || IsSubClass(innermost_cls, cls.Get());
- }
// Otherwise, we may be able to avoid the check if `cls` is a superclass of a method being
// compiled here (anywhere in the inlining chain) as the `cls` must have started initializing
@@ -1358,7 +1344,12 @@ bool HInstructionBuilder::IsInitialized(ScopedObjectAccess& soa, Handle<mirror::
// TODO: We should walk over the entire inlined methods chain, but we don't pass that
// information to the builder. (We could also check if we're guaranteed a non-null instance
// of `cls` at this location but that's outside the scope of the instruction builder.)
- if (is_subclass && HasTrivialInitialization(cls.Get(), compiler_driver_->GetCompilerOptions())) {
+ bool is_subclass = IsSubClass(outer_compilation_unit_->GetCompilingClass().Get(), cls.Get());
+ if (dex_compilation_unit_ != outer_compilation_unit_) {
+ is_subclass = is_subclass ||
+ IsSubClass(dex_compilation_unit_->GetCompilingClass().Get(), cls.Get());
+ }
+ if (is_subclass && HasTrivialInitialization(cls.Get(), code_generator_->GetCompilerOptions())) {
return true;
}
@@ -1366,22 +1357,20 @@ bool HInstructionBuilder::IsInitialized(ScopedObjectAccess& soa, Handle<mirror::
}
HClinitCheck* HInstructionBuilder::ProcessClinitCheckForInvoke(
- ScopedObjectAccess& soa,
uint32_t dex_pc,
ArtMethod* resolved_method,
HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement) {
Handle<mirror::Class> klass = handles_->NewHandle(resolved_method->GetDeclaringClass());
HClinitCheck* clinit_check = nullptr;
- if (IsInitialized(soa, klass)) {
+ if (IsInitialized(klass)) {
*clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kNone;
} else {
- HLoadClass* cls = BuildLoadClass(soa,
- klass->GetDexTypeIndex(),
+ HLoadClass* cls = BuildLoadClass(klass->GetDexTypeIndex(),
klass->GetDexFile(),
klass,
dex_pc,
- /* needs_access_check */ false);
+ /* needs_access_check= */ false);
if (cls != nullptr) {
*clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit;
clinit_check = new (allocator_) HClinitCheck(cls, dex_pc);
@@ -1505,27 +1494,28 @@ bool HInstructionBuilder::HandleStringInit(HInvoke* invoke,
// to be visited once it is clear whether it has remaining uses.
if (arg_this->IsNewInstance()) {
ssa_builder_->AddUninitializedString(arg_this->AsNewInstance());
- // Walk over all vregs and replace any occurrence of `arg_this` with `invoke`.
- for (size_t vreg = 0, e = current_locals_->size(); vreg < e; ++vreg) {
- if ((*current_locals_)[vreg] == arg_this) {
- (*current_locals_)[vreg] = invoke;
- }
- }
} else {
DCHECK(arg_this->IsPhi());
// We can get a phi as input of a String.<init> if there is a loop between the
// allocation and the String.<init> call. As we don't know which other phis might alias
- // with `arg_this`, we keep a record of these phis and will analyze their inputs and
- // uses once the inputs and users are populated (in ssa_builder.cc).
- // Note: we only do this for phis, as it is a somewhat more expensive operation than
- // what we're doing above when the input is the `HNewInstance`.
- ssa_builder_->AddUninitializedStringPhi(arg_this->AsPhi(), invoke);
+ // with `arg_this`, we keep a record of those invocations so we can later replace
+ // the allocation with the invocation.
+ // Add the actual 'this' input so the analysis knows what is the allocation instruction.
+ // The input will be removed during the analysis.
+ invoke->AddInput(arg_this);
+ ssa_builder_->AddUninitializedStringPhi(invoke);
+ }
+ // Walk over all vregs and replace any occurrence of `arg_this` with `invoke`.
+ for (size_t vreg = 0, e = current_locals_->size(); vreg < e; ++vreg) {
+ if ((*current_locals_)[vreg] == arg_this) {
+ (*current_locals_)[vreg] = invoke;
+ }
}
return true;
}
static DataType::Type GetFieldAccessType(const DexFile& dex_file, uint16_t field_index) {
- const DexFile::FieldId& field_id = dex_file.GetFieldId(field_index);
+ const dex::FieldId& field_id = dex_file.GetFieldId(field_index);
const char* type = dex_file.GetFieldTypeDescriptor(field_id);
return DataType::FromShorty(type[0]);
}
@@ -1549,7 +1539,7 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio
}
ScopedObjectAccess soa(Thread::Current());
- ArtField* resolved_field = ResolveField(field_index, /* is_static */ false, is_put);
+ ArtField* resolved_field = ResolveField(field_index, /* is_static= */ false, is_put);
// Generate an explicit null check on the reference, unless the field access
// is unresolved. In that case, we rely on the runtime to perform various
@@ -1612,43 +1602,6 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio
return true;
}
-static ObjPtr<mirror::Class> ResolveClassFrom(ScopedObjectAccess& soa,
- CompilerDriver* driver,
- const DexCompilationUnit& compilation_unit)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- Handle<mirror::ClassLoader> class_loader = compilation_unit.GetClassLoader();
- Handle<mirror::DexCache> dex_cache = compilation_unit.GetDexCache();
-
- return driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, &compilation_unit);
-}
-
-ObjPtr<mirror::Class> HInstructionBuilder::ResolveOutermostCompilingClass(
- ScopedObjectAccess& soa) const {
- return ResolveClassFrom(soa, compiler_driver_, *outer_compilation_unit_);
-}
-
-ObjPtr<mirror::Class> HInstructionBuilder::ResolveCompilingClass(ScopedObjectAccess& soa) const {
- return ResolveClassFrom(soa, compiler_driver_, *dex_compilation_unit_);
-}
-
-bool HInstructionBuilder::IsOutermostCompilingClass(dex::TypeIndex type_index) const {
- ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<2> hs(soa.Self());
- Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
- Handle<mirror::Class> cls(hs.NewHandle(compiler_driver_->ResolveClass(
- soa, dex_cache, class_loader, type_index, dex_compilation_unit_)));
- Handle<mirror::Class> outer_class(hs.NewHandle(ResolveOutermostCompilingClass(soa)));
-
- // GetOutermostCompilingClass returns null when the class is unresolved
- // (e.g. if it derives from an unresolved class). This is bogus knowing that
- // we are compiling it.
- // When this happens we cannot establish a direct relation between the current
- // class and the outer class, so we return false.
- // (Note that this is only used for optimizing invokes and field accesses)
- return (cls != nullptr) && (outer_class.Get() == cls.Get());
-}
-
void HInstructionBuilder::BuildUnresolvedStaticFieldAccess(const Instruction& instruction,
uint32_t dex_pc,
bool is_put,
@@ -1668,18 +1621,17 @@ void HInstructionBuilder::BuildUnresolvedStaticFieldAccess(const Instruction& in
ArtField* HInstructionBuilder::ResolveField(uint16_t field_idx, bool is_static, bool is_put) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<2> hs(soa.Self());
ClassLinker* class_linker = dex_compilation_unit_->GetClassLinker();
Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
- Handle<mirror::Class> compiling_class(hs.NewHandle(ResolveCompilingClass(soa)));
ArtField* resolved_field = class_linker->ResolveField(field_idx,
dex_compilation_unit_->GetDexCache(),
class_loader,
is_static);
+ DCHECK_EQ(resolved_field == nullptr, soa.Self()->IsExceptionPending());
if (UNLIKELY(resolved_field == nullptr)) {
- // Clean up any exception left by type resolution.
+ // Clean up any exception left by field resolution.
soa.Self()->ClearException();
return nullptr;
}
@@ -1691,6 +1643,7 @@ ArtField* HInstructionBuilder::ResolveField(uint16_t field_idx, bool is_static,
}
// Check access.
+ Handle<mirror::Class> compiling_class = dex_compilation_unit_->GetCompilingClass();
if (compiling_class == nullptr) {
if (!resolved_field->IsPublic()) {
return nullptr;
@@ -1720,7 +1673,7 @@ void HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
uint16_t field_index = instruction.VRegB_21c();
ScopedObjectAccess soa(Thread::Current());
- ArtField* resolved_field = ResolveField(field_index, /* is_static */ true, is_put);
+ ArtField* resolved_field = ResolveField(field_index, /* is_static= */ true, is_put);
if (resolved_field == nullptr) {
MaybeRecordStat(compilation_stats_,
@@ -1733,12 +1686,11 @@ void HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
DataType::Type field_type = GetFieldAccessType(*dex_file_, field_index);
Handle<mirror::Class> klass = handles_->NewHandle(resolved_field->GetDeclaringClass());
- HLoadClass* constant = BuildLoadClass(soa,
- klass->GetDexTypeIndex(),
+ HLoadClass* constant = BuildLoadClass(klass->GetDexTypeIndex(),
klass->GetDexFile(),
klass,
dex_pc,
- /* needs_access_check */ false);
+ /* needs_access_check= */ false);
if (constant == nullptr) {
// The class cannot be referenced from this compiled code. Generate
@@ -1750,7 +1702,7 @@ void HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
}
HInstruction* cls = constant;
- if (!IsInitialized(soa, klass)) {
+ if (!IsInitialized(klass)) {
cls = new (allocator_) HClinitCheck(constant, dex_pc);
AppendInstruction(cls);
}
@@ -1849,15 +1801,27 @@ void HInstructionBuilder::BuildArrayAccess(const Instruction& instruction,
graph_->SetHasBoundsChecks(true);
}
+HNewArray* HInstructionBuilder::BuildNewArray(uint32_t dex_pc,
+ dex::TypeIndex type_index,
+ HInstruction* length) {
+ HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
+
+ const char* descriptor = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(type_index));
+ DCHECK_EQ(descriptor[0], '[');
+ size_t component_type_shift = Primitive::ComponentSizeShift(Primitive::GetType(descriptor[1]));
+
+ HNewArray* new_array = new (allocator_) HNewArray(cls, length, dex_pc, component_type_shift);
+ AppendInstruction(new_array);
+ return new_array;
+}
+
HNewArray* HInstructionBuilder::BuildFilledNewArray(uint32_t dex_pc,
dex::TypeIndex type_index,
const InstructionOperands& operands) {
const size_t number_of_operands = operands.GetNumberOfOperands();
HInstruction* length = graph_->GetIntConstant(number_of_operands, dex_pc);
- HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
- HNewArray* const object = new (allocator_) HNewArray(cls, length, dex_pc);
- AppendInstruction(object);
+ HNewArray* new_array = BuildNewArray(dex_pc, type_index, length);
const char* descriptor = dex_file_->StringByTypeIdx(type_index);
DCHECK_EQ(descriptor[0], '[') << descriptor;
char primitive = descriptor[1];
@@ -1870,13 +1834,13 @@ HNewArray* HInstructionBuilder::BuildFilledNewArray(uint32_t dex_pc,
for (size_t i = 0; i < number_of_operands; ++i) {
HInstruction* value = LoadLocal(operands.GetOperand(i), type);
HInstruction* index = graph_->GetIntConstant(i, dex_pc);
- HArraySet* aset = new (allocator_) HArraySet(object, index, value, type, dex_pc);
+ HArraySet* aset = new (allocator_) HArraySet(new_array, index, value, type, dex_pc);
ssa_builder_->MaybeAddAmbiguousArraySet(aset);
AppendInstruction(aset);
}
- latest_result_ = object;
+ latest_result_ = new_array;
- return object;
+ return new_array;
}
template <typename T>
@@ -1979,12 +1943,11 @@ HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index, uint3
ScopedObjectAccess soa(Thread::Current());
const DexFile& dex_file = *dex_compilation_unit_->GetDexFile();
Handle<mirror::Class> klass = ResolveClass(soa, type_index);
- bool needs_access_check = LoadClassNeedsAccessCheck(soa, klass);
- return BuildLoadClass(soa, type_index, dex_file, klass, dex_pc, needs_access_check);
+ bool needs_access_check = LoadClassNeedsAccessCheck(klass);
+ return BuildLoadClass(type_index, dex_file, klass, dex_pc, needs_access_check);
}
-HLoadClass* HInstructionBuilder::BuildLoadClass(ScopedObjectAccess& soa,
- dex::TypeIndex type_index,
+HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index,
const DexFile& dex_file,
Handle<mirror::Class> klass,
uint32_t dex_pc,
@@ -2001,11 +1964,8 @@ HLoadClass* HInstructionBuilder::BuildLoadClass(ScopedObjectAccess& soa,
}
// Note: `klass` must be from `handles_`.
- bool is_referrers_class = false;
- if (klass != nullptr) {
- ObjPtr<mirror::Class> outermost_cls = ResolveOutermostCompilingClass(soa);
- is_referrers_class = (outermost_cls == klass.Get());
- }
+ bool is_referrers_class =
+ (klass != nullptr) && (outer_compilation_unit_->GetCompilingClass().Get() == klass.Get());
HLoadClass* load_class = new (allocator_) HLoadClass(
graph_->GetCurrentMethod(),
type_index,
@@ -2031,22 +1991,28 @@ HLoadClass* HInstructionBuilder::BuildLoadClass(ScopedObjectAccess& soa,
Handle<mirror::Class> HInstructionBuilder::ResolveClass(ScopedObjectAccess& soa,
dex::TypeIndex type_index) {
- Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
- ObjPtr<mirror::Class> klass = compiler_driver_->ResolveClass(
- soa, dex_compilation_unit_->GetDexCache(), class_loader, type_index, dex_compilation_unit_);
- // TODO: Avoid creating excessive handles if the method references the same class repeatedly.
- // (Use a map on the local_allocator_.)
- return handles_->NewHandle(klass);
+ auto it = class_cache_.find(type_index);
+ if (it != class_cache_.end()) {
+ return it->second;
+ }
+
+ ObjPtr<mirror::Class> klass = dex_compilation_unit_->GetClassLinker()->ResolveType(
+ type_index, dex_compilation_unit_->GetDexCache(), dex_compilation_unit_->GetClassLoader());
+ DCHECK_EQ(klass == nullptr, soa.Self()->IsExceptionPending());
+ soa.Self()->ClearException(); // Clean up the exception left by type resolution if any.
+
+ Handle<mirror::Class> h_klass = handles_->NewHandle(klass);
+ class_cache_.Put(type_index, h_klass);
+ return h_klass;
}
-bool HInstructionBuilder::LoadClassNeedsAccessCheck(ScopedObjectAccess& soa,
- Handle<mirror::Class> klass) {
+bool HInstructionBuilder::LoadClassNeedsAccessCheck(Handle<mirror::Class> klass) {
if (klass == nullptr) {
return true;
} else if (klass->IsPublic()) {
return false;
} else {
- ObjPtr<mirror::Class> compiling_class = ResolveCompilingClass(soa);
+ ObjPtr<mirror::Class> compiling_class = dex_compilation_unit_->GetCompilingClass().Get();
return compiling_class == nullptr || !compiling_class->CanAccess(klass.Get());
}
}
@@ -2075,7 +2041,7 @@ void HInstructionBuilder::BuildTypeCheck(const Instruction& instruction,
ScopedObjectAccess soa(Thread::Current());
const DexFile& dex_file = *dex_compilation_unit_->GetDexFile();
Handle<mirror::Class> klass = ResolveClass(soa, type_index);
- bool needs_access_check = LoadClassNeedsAccessCheck(soa, klass);
+ bool needs_access_check = LoadClassNeedsAccessCheck(klass);
TypeCheckKind check_kind = HSharpening::ComputeTypeCheckKind(
klass.Get(), code_generator_, needs_access_check);
@@ -2093,7 +2059,7 @@ void HInstructionBuilder::BuildTypeCheck(const Instruction& instruction,
bitstring_path_to_root = graph_->GetIntConstant(static_cast<int32_t>(path_to_root), dex_pc);
bitstring_mask = graph_->GetIntConstant(static_cast<int32_t>(mask), dex_pc);
} else {
- class_or_null = BuildLoadClass(soa, type_index, dex_file, klass, dex_pc, needs_access_check);
+ class_or_null = BuildLoadClass(type_index, dex_file, klass, dex_pc, needs_access_check);
}
DCHECK(class_or_null != nullptr);
@@ -2899,10 +2865,8 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
case Instruction::NEW_ARRAY: {
dex::TypeIndex type_index(instruction.VRegC_22c());
HInstruction* length = LoadLocal(instruction.VRegB_22c(), DataType::Type::kInt32);
- HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
+ HNewArray* new_array = BuildNewArray(dex_pc, type_index, length);
- HNewArray* new_array = new (allocator_) HNewArray(cls, length, dex_pc);
- AppendInstruction(new_array);
UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction());
BuildConstructorFenceForAllocation(new_array);
break;
@@ -2982,7 +2946,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
case Instruction::IGET_CHAR_QUICK:
case Instruction::IGET_SHORT:
case Instruction::IGET_SHORT_QUICK: {
- if (!BuildInstanceFieldAccess(instruction, dex_pc, /* is_put */ false, quicken_index)) {
+ if (!BuildInstanceFieldAccess(instruction, dex_pc, /* is_put= */ false, quicken_index)) {
return false;
}
break;
@@ -3002,7 +2966,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
case Instruction::IPUT_CHAR_QUICK:
case Instruction::IPUT_SHORT:
case Instruction::IPUT_SHORT_QUICK: {
- if (!BuildInstanceFieldAccess(instruction, dex_pc, /* is_put */ true, quicken_index)) {
+ if (!BuildInstanceFieldAccess(instruction, dex_pc, /* is_put= */ true, quicken_index)) {
return false;
}
break;
@@ -3015,7 +2979,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
case Instruction::SGET_BYTE:
case Instruction::SGET_CHAR:
case Instruction::SGET_SHORT: {
- BuildStaticFieldAccess(instruction, dex_pc, /* is_put */ false);
+ BuildStaticFieldAccess(instruction, dex_pc, /* is_put= */ false);
break;
}
@@ -3026,7 +2990,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
case Instruction::SPUT_BYTE:
case Instruction::SPUT_CHAR:
case Instruction::SPUT_SHORT: {
- BuildStaticFieldAccess(instruction, dex_pc, /* is_put */ true);
+ BuildStaticFieldAccess(instruction, dex_pc, /* is_put= */ true);
break;
}
@@ -3179,7 +3143,7 @@ ObjPtr<mirror::Class> HInstructionBuilder::LookupResolvedType(
ObjPtr<mirror::Class> HInstructionBuilder::LookupReferrerClass() const {
// TODO: Cache the result in a Handle<mirror::Class>.
- const DexFile::MethodId& method_id =
+ const dex::MethodId& method_id =
dex_compilation_unit_->GetDexFile()->GetMethodId(dex_compilation_unit_->GetDexMethodIndex());
return LookupResolvedType(method_id.class_idx_, *dex_compilation_unit_);
}
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index af1b86ca6f..d701445946 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -34,7 +34,6 @@ class ArenaBitVector;
class ArtField;
class ArtMethod;
class CodeGenerator;
-class CompilerDriver;
class DexCompilationUnit;
class HBasicBlockBuilder;
class Instruction;
@@ -59,7 +58,6 @@ class HInstructionBuilder : public ValueObject {
DataType::Type return_type,
const DexCompilationUnit* dex_compilation_unit,
const DexCompilationUnit* outer_compilation_unit,
- CompilerDriver* compiler_driver,
CodeGenerator* code_generator,
ArrayRef<const uint8_t> interpreter_metadata,
OptimizingCompilerStats* compiler_stats,
@@ -179,6 +177,9 @@ class HInstructionBuilder : public ValueObject {
uint32_t call_site_idx,
const InstructionOperands& operands);
+ // Builds a new array node.
+ HNewArray* BuildNewArray(uint32_t dex_pc, dex::TypeIndex type_index, HInstruction* length);
+
// Builds a new array node and the instructions that fill it.
HNewArray* BuildFilledNewArray(uint32_t dex_pc,
dex::TypeIndex type_index,
@@ -219,8 +220,7 @@ class HInstructionBuilder : public ValueObject {
// Builds a `HLoadClass` loading the given `type_index`.
HLoadClass* BuildLoadClass(dex::TypeIndex type_index, uint32_t dex_pc);
- HLoadClass* BuildLoadClass(ScopedObjectAccess& soa,
- dex::TypeIndex type_index,
+ HLoadClass* BuildLoadClass(dex::TypeIndex type_index,
const DexFile& dex_file,
Handle<mirror::Class> klass,
uint32_t dex_pc,
@@ -230,7 +230,7 @@ class HInstructionBuilder : public ValueObject {
Handle<mirror::Class> ResolveClass(ScopedObjectAccess& soa, dex::TypeIndex type_index)
REQUIRES_SHARED(Locks::mutator_lock_);
- bool LoadClassNeedsAccessCheck(ScopedObjectAccess& soa, Handle<mirror::Class> klass)
+ bool LoadClassNeedsAccessCheck(Handle<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_);
// Builds a `HLoadMethodHandle` loading the given `method_handle_index`.
@@ -239,17 +239,6 @@ class HInstructionBuilder : public ValueObject {
// Builds a `HLoadMethodType` loading the given `proto_index`.
void BuildLoadMethodType(dex::ProtoIndex proto_index, uint32_t dex_pc);
- // Returns the outer-most compiling method's class.
- ObjPtr<mirror::Class> ResolveOutermostCompilingClass(ScopedObjectAccess& soa) const
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- // Returns the class whose method is being compiled.
- ObjPtr<mirror::Class> ResolveCompilingClass(ScopedObjectAccess& soa) const
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- // Returns whether `type_index` points to the outer-most compiling method's class.
- bool IsOutermostCompilingClass(dex::TypeIndex type_index) const;
-
void PotentiallySimplifyFakeString(uint16_t original_dex_register,
uint32_t dex_pc,
HInvoke* invoke);
@@ -272,7 +261,6 @@ class HInstructionBuilder : public ValueObject {
void HandleStringInitResult(HInvokeStaticOrDirect* invoke);
HClinitCheck* ProcessClinitCheckForInvoke(
- ScopedObjectAccess& soa,
uint32_t dex_pc,
ArtMethod* method,
HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement)
@@ -286,7 +274,7 @@ class HInstructionBuilder : public ValueObject {
void BuildConstructorFenceForAllocation(HInstruction* allocation);
// Return whether the compiler can assume `cls` is initialized.
- bool IsInitialized(ScopedObjectAccess& soa, Handle<mirror::Class> cls) const
+ bool IsInitialized(Handle<mirror::Class> cls) const
REQUIRES_SHARED(Locks::mutator_lock_);
// Try to resolve a method using the class linker. Return null if a method could
@@ -317,8 +305,6 @@ class HInstructionBuilder : public ValueObject {
HBasicBlockBuilder* const block_builder_;
SsaBuilder* const ssa_builder_;
- CompilerDriver* const compiler_driver_;
-
CodeGenerator* const code_generator_;
// The compilation unit of the current method being compiled. Note that
@@ -348,6 +334,10 @@ class HInstructionBuilder : public ValueObject {
ScopedArenaVector<HBasicBlock*> loop_headers_;
+ // Cached resolved types for the current compilation unit's DexFile.
+ // Handle<>s reference entries in the `handles_`.
+ ScopedArenaSafeMap<dex::TypeIndex, Handle<mirror::Class>> class_cache_;
+
static constexpr int kDefaultNumberOfLoops = 2;
DISALLOW_COPY_AND_ASSIGN(HInstructionBuilder);
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index bb96c211cb..a433d7ef73 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -372,7 +372,7 @@ void InstructionSimplifierVisitor::VisitShift(HBinaryOperation* instruction) {
// (as defined by shift semantics). This ensures other
// optimizations do not need to special case for such situations.
DCHECK_EQ(shift_amount->GetType(), DataType::Type::kInt32);
- instruction->ReplaceInput(GetGraph()->GetIntConstant(masked_cst), /* index */ 1);
+ instruction->ReplaceInput(GetGraph()->GetIntConstant(masked_cst), /* index= */ 1);
RecordSimplification();
return;
}
@@ -749,8 +749,8 @@ static HCondition* GetOppositeConditionSwapOps(ArenaAllocator* allocator, HInstr
return new (allocator) HBelowOrEqual(rhs, lhs);
default:
LOG(FATAL) << "Unknown ConditionType " << cond->GetKind();
+ UNREACHABLE();
}
- return nullptr;
}
static bool CmpHasBoolType(HInstruction* input, HInstruction* cmp) {
@@ -1181,8 +1181,7 @@ void InstructionSimplifierVisitor::VisitTypeConversion(HTypeConversion* instruct
HInstruction* input = instruction->GetInput();
DataType::Type input_type = input->GetType();
DataType::Type result_type = instruction->GetResultType();
- if (DataType::IsTypeConversionImplicit(input_type, result_type)) {
- // Remove the implicit conversion; this includes conversion to the same type.
+ if (instruction->IsImplicitConversion()) {
instruction->ReplaceWith(input);
instruction->GetBlock()->RemoveInstruction(instruction);
RecordSimplification();
@@ -1317,7 +1316,7 @@ void InstructionSimplifierVisitor::VisitAdd(HAdd* instruction) {
}
HNeg* neg = left_is_neg ? left->AsNeg() : right->AsNeg();
- if ((left_is_neg ^ right_is_neg) && neg->HasOnlyOneNonEnvironmentUse()) {
+ if (left_is_neg != right_is_neg && neg->HasOnlyOneNonEnvironmentUse()) {
// Replace code looking like
// NEG tmp, b
// ADD dst, a, tmp
@@ -2290,7 +2289,7 @@ void InstructionSimplifierVisitor::SimplifySystemArrayCopy(HInvoke* instruction)
// the invoke, as we would need to look it up in the current dex file, and it
// is unlikely that it exists. The most usual situation for such typed
// arraycopy methods is a direct pointer to the boot image.
- HSharpening::SharpenInvokeStaticOrDirect(invoke, codegen_);
+ invoke->SetDispatchInfo(HSharpening::SharpenInvokeStaticOrDirect(method, codegen_));
}
}
}
@@ -2362,17 +2361,17 @@ void InstructionSimplifierVisitor::SimplifyStringCharAt(HInvoke* invoke) {
ArenaAllocator* allocator = GetGraph()->GetAllocator();
// We treat String as an array to allow DCE and BCE to seamlessly work on strings,
// so create the HArrayLength, HBoundsCheck and HArrayGet.
- HArrayLength* length = new (allocator) HArrayLength(str, dex_pc, /* is_string_length */ true);
+ HArrayLength* length = new (allocator) HArrayLength(str, dex_pc, /* is_string_length= */ true);
invoke->GetBlock()->InsertInstructionBefore(length, invoke);
HBoundsCheck* bounds_check = new (allocator) HBoundsCheck(
- index, length, dex_pc, /* is_string_char_at */ true);
+ index, length, dex_pc, /* is_string_char_at= */ true);
invoke->GetBlock()->InsertInstructionBefore(bounds_check, invoke);
HArrayGet* array_get = new (allocator) HArrayGet(str,
bounds_check,
DataType::Type::kUint16,
SideEffects::None(), // Strings are immutable.
dex_pc,
- /* is_string_char_at */ true);
+ /* is_string_char_at= */ true);
invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, array_get);
bounds_check->CopyEnvironmentFrom(invoke->GetEnvironment());
GetGraph()->SetHasBoundsChecks(true);
@@ -2384,7 +2383,7 @@ void InstructionSimplifierVisitor::SimplifyStringIsEmptyOrLength(HInvoke* invoke
// We treat String as an array to allow DCE and BCE to seamlessly work on strings,
// so create the HArrayLength.
HArrayLength* length =
- new (GetGraph()->GetAllocator()) HArrayLength(str, dex_pc, /* is_string_length */ true);
+ new (GetGraph()->GetAllocator()) HArrayLength(str, dex_pc, /* is_string_length= */ true);
HInstruction* replacement;
if (invoke->GetIntrinsic() == Intrinsics::kStringIsEmpty) {
// For String.isEmpty(), create the `HEqual` representing the `length == 0`.
@@ -2535,28 +2534,28 @@ void InstructionSimplifierVisitor::VisitInvoke(HInvoke* instruction) {
SimplifySystemArrayCopy(instruction);
break;
case Intrinsics::kIntegerRotateRight:
- SimplifyRotate(instruction, /* is_left */ false, DataType::Type::kInt32);
+ SimplifyRotate(instruction, /* is_left= */ false, DataType::Type::kInt32);
break;
case Intrinsics::kLongRotateRight:
- SimplifyRotate(instruction, /* is_left */ false, DataType::Type::kInt64);
+ SimplifyRotate(instruction, /* is_left= */ false, DataType::Type::kInt64);
break;
case Intrinsics::kIntegerRotateLeft:
- SimplifyRotate(instruction, /* is_left */ true, DataType::Type::kInt32);
+ SimplifyRotate(instruction, /* is_left= */ true, DataType::Type::kInt32);
break;
case Intrinsics::kLongRotateLeft:
- SimplifyRotate(instruction, /* is_left */ true, DataType::Type::kInt64);
+ SimplifyRotate(instruction, /* is_left= */ true, DataType::Type::kInt64);
break;
case Intrinsics::kIntegerCompare:
- SimplifyCompare(instruction, /* is_signum */ false, DataType::Type::kInt32);
+ SimplifyCompare(instruction, /* is_signum= */ false, DataType::Type::kInt32);
break;
case Intrinsics::kLongCompare:
- SimplifyCompare(instruction, /* is_signum */ false, DataType::Type::kInt64);
+ SimplifyCompare(instruction, /* is_signum= */ false, DataType::Type::kInt64);
break;
case Intrinsics::kIntegerSignum:
- SimplifyCompare(instruction, /* is_signum */ true, DataType::Type::kInt32);
+ SimplifyCompare(instruction, /* is_signum= */ true, DataType::Type::kInt32);
break;
case Intrinsics::kLongSignum:
- SimplifyCompare(instruction, /* is_signum */ true, DataType::Type::kInt64);
+ SimplifyCompare(instruction, /* is_signum= */ true, DataType::Type::kInt64);
break;
case Intrinsics::kFloatIsNaN:
case Intrinsics::kDoubleIsNaN:
diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc
index 24fbb6cb4c..01e9cff6d8 100644
--- a/compiler/optimizing/instruction_simplifier_arm.cc
+++ b/compiler/optimizing/instruction_simplifier_arm.cc
@@ -43,11 +43,11 @@ class InstructionSimplifierArmVisitor : public HGraphVisitor {
bool TryMergeIntoUsersShifterOperand(HInstruction* instruction);
bool TryMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op, bool do_merge);
bool CanMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
- return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ false);
+ return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge= */ false);
}
bool MergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
DCHECK(CanMergeIntoShifterOperand(use, bitfield_op));
- return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ true);
+ return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge= */ true);
}
/**
@@ -202,6 +202,11 @@ void InstructionSimplifierArmVisitor::VisitArrayGet(HArrayGet* instruction) {
return;
}
+ // TODO: Support intermediate address for object arrays on arm.
+ if (type == DataType::Type::kReference) {
+ return;
+ }
+
if (type == DataType::Type::kInt64
|| type == DataType::Type::kFloat32
|| type == DataType::Type::kFloat64) {
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index b536cb4dc4..e23decbd71 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -45,11 +45,11 @@ class InstructionSimplifierArm64Visitor : public HGraphVisitor {
HInstruction* bitfield_op,
bool do_merge);
bool CanMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
- return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ false);
+ return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge= */ false);
}
bool MergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
DCHECK(CanMergeIntoShifterOperand(use, bitfield_op));
- return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ true);
+ return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge= */ true);
}
/**
diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc
index ccdcb3532d..0f30f662cd 100644
--- a/compiler/optimizing/instruction_simplifier_shared.cc
+++ b/compiler/optimizing/instruction_simplifier_shared.cc
@@ -245,11 +245,11 @@ bool TryExtractArrayAccessAddress(HInstruction* access,
return false;
}
if (kEmitCompilerReadBarrier &&
+ !kUseBakerReadBarrier &&
access->IsArrayGet() &&
access->GetType() == DataType::Type::kReference) {
- // For object arrays, the read barrier instrumentation requires
+ // For object arrays, the non-Baker read barrier instrumentation requires
// the original array pointer.
- // TODO: This can be relaxed for Baker CC.
return false;
}
diff --git a/compiler/optimizing/instruction_simplifier_x86.cc b/compiler/optimizing/instruction_simplifier_x86.cc
new file mode 100644
index 0000000000..2d8f94a85b
--- /dev/null
+++ b/compiler/optimizing/instruction_simplifier_x86.cc
@@ -0,0 +1,88 @@
+/* Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_simplifier_x86.h"
+#include "instruction_simplifier_x86_shared.h"
+#include "code_generator_x86.h"
+
+namespace art {
+
+namespace x86 {
+
+class InstructionSimplifierX86Visitor : public HGraphVisitor {
+ public:
+ InstructionSimplifierX86Visitor(HGraph* graph,
+ CodeGenerator* codegen,
+ OptimizingCompilerStats* stats)
+ : HGraphVisitor(graph),
+ codegen_(down_cast<CodeGeneratorX86*>(codegen)),
+ stats_(stats) {}
+
+ void RecordSimplification() {
+ MaybeRecordStat(stats_, MethodCompilationStat::kInstructionSimplificationsArch);
+ }
+
+ bool HasAVX2() {
+ return (codegen_->GetInstructionSetFeatures().HasAVX2());
+ }
+
+ void VisitBasicBlock(HBasicBlock* block) override {
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* instruction = it.Current();
+ if (instruction->IsInBlock()) {
+ instruction->Accept(this);
+ }
+ }
+ }
+
+ void VisitAnd(HAnd * instruction) override;
+ void VisitXor(HXor* instruction) override;
+
+ private:
+ CodeGeneratorX86* codegen_;
+ OptimizingCompilerStats* stats_;
+};
+
+
+void InstructionSimplifierX86Visitor::VisitAnd(HAnd* instruction) {
+ if (TryCombineAndNot(instruction)) {
+ RecordSimplification();
+ } else if (instruction->GetResultType() == DataType::Type::kInt32) {
+ if (TryGenerateResetLeastSetBit(instruction)) {
+ RecordSimplification();
+ }
+ }
+}
+
+void InstructionSimplifierX86Visitor::VisitXor(HXor* instruction) {
+ if (instruction->GetResultType() == DataType::Type::kInt32) {
+ if (TryGenerateMaskUptoLeastSetBit(instruction)) {
+ RecordSimplification();
+ }
+ }
+}
+
+bool InstructionSimplifierX86::Run() {
+ InstructionSimplifierX86Visitor visitor(graph_, codegen_, stats_);
+ if (visitor.HasAVX2()) {
+ visitor.VisitReversePostOrder();
+ return true;
+ }
+ return false;
+}
+
+} // namespace x86
+} // namespace art
+
diff --git a/compiler/optimizing/instruction_simplifier_x86.h b/compiler/optimizing/instruction_simplifier_x86.h
new file mode 100644
index 0000000000..6f10006db2
--- /dev/null
+++ b/compiler/optimizing/instruction_simplifier_x86.h
@@ -0,0 +1,44 @@
+/*Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_H_
+#define ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_H_
+
+#include "nodes.h"
+#include "optimization.h"
+
+namespace art {
+
+class CodeGenerator;
+namespace x86 {
+
+class InstructionSimplifierX86 : public HOptimization {
+ public:
+ InstructionSimplifierX86(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
+ : HOptimization(graph, kInstructionSimplifierX86PassName, stats),
+ codegen_(codegen) {}
+
+ static constexpr const char* kInstructionSimplifierX86PassName = "instruction_simplifier_x86";
+
+ bool Run() override;
+
+ private:
+ CodeGenerator* codegen_;
+};
+
+} // namespace x86
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_H_
diff --git a/compiler/optimizing/instruction_simplifier_x86_64.cc b/compiler/optimizing/instruction_simplifier_x86_64.cc
new file mode 100644
index 0000000000..56c6b414d7
--- /dev/null
+++ b/compiler/optimizing/instruction_simplifier_x86_64.cc
@@ -0,0 +1,82 @@
+/* Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_simplifier_x86_64.h"
+#include "instruction_simplifier_x86_shared.h"
+#include "code_generator_x86_64.h"
+
+namespace art {
+
+namespace x86_64 {
+
+class InstructionSimplifierX86_64Visitor : public HGraphVisitor {
+ public:
+ InstructionSimplifierX86_64Visitor(HGraph* graph,
+ CodeGenerator* codegen,
+ OptimizingCompilerStats* stats)
+ : HGraphVisitor(graph),
+ codegen_(down_cast<CodeGeneratorX86_64*>(codegen)),
+ stats_(stats) {}
+
+ void RecordSimplification() {
+ MaybeRecordStat(stats_, MethodCompilationStat::kInstructionSimplificationsArch);
+ }
+
+ bool HasAVX2() {
+ return codegen_->GetInstructionSetFeatures().HasAVX2();
+ }
+
+ void VisitBasicBlock(HBasicBlock* block) override {
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* instruction = it.Current();
+ if (instruction->IsInBlock()) {
+ instruction->Accept(this);
+ }
+ }
+ }
+
+ void VisitAnd(HAnd* instruction) override;
+ void VisitXor(HXor* instruction) override;
+
+ private:
+ CodeGeneratorX86_64* codegen_;
+ OptimizingCompilerStats* stats_;
+};
+
+void InstructionSimplifierX86_64Visitor::VisitAnd(HAnd* instruction) {
+ if (TryCombineAndNot(instruction)) {
+ RecordSimplification();
+ } else if (TryGenerateResetLeastSetBit(instruction)) {
+ RecordSimplification();
+ }
+}
+
+
+void InstructionSimplifierX86_64Visitor::VisitXor(HXor* instruction) {
+ if (TryGenerateMaskUptoLeastSetBit(instruction)) {
+ RecordSimplification();
+ }
+}
+
+bool InstructionSimplifierX86_64::Run() {
+ InstructionSimplifierX86_64Visitor visitor(graph_, codegen_, stats_);
+ if (visitor.HasAVX2()) {
+ visitor.VisitReversePostOrder();
+ return true;
+ }
+ return false;
+}
+} // namespace x86_64
+} // namespace art
diff --git a/compiler/optimizing/instruction_simplifier_x86_64.h b/compiler/optimizing/instruction_simplifier_x86_64.h
new file mode 100644
index 0000000000..6cae24d11a
--- /dev/null
+++ b/compiler/optimizing/instruction_simplifier_x86_64.h
@@ -0,0 +1,48 @@
+/* Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_64_H_
+#define ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_64_H_
+
+#include "nodes.h"
+#include "optimization.h"
+
+namespace art {
+
+class CodeGenerator;
+
+namespace x86_64 {
+
+class InstructionSimplifierX86_64 : public HOptimization {
+ public:
+ InstructionSimplifierX86_64(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
+ : HOptimization(graph, kInstructionSimplifierX86_64PassName, stats),
+ codegen_(codegen) {}
+
+ static constexpr const char* kInstructionSimplifierX86_64PassName =
+ "instruction_simplifier_x86_64";
+
+ bool Run() override;
+
+ private:
+ CodeGenerator* codegen_;
+};
+
+} // namespace x86_64
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_64_H_
+
+
diff --git a/compiler/optimizing/instruction_simplifier_x86_shared.cc b/compiler/optimizing/instruction_simplifier_x86_shared.cc
new file mode 100644
index 0000000000..2805abb2bb
--- /dev/null
+++ b/compiler/optimizing/instruction_simplifier_x86_shared.cc
@@ -0,0 +1,137 @@
+/* Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_simplifier_x86_shared.h"
+#include "nodes_x86.h"
+
+namespace art {
+
+bool TryCombineAndNot(HAnd* instruction) {
+ DataType::Type type = instruction->GetType();
+ if (!DataType::IsIntOrLongType(type)) {
+ return false;
+ }
+ // Replace code looking like
+ // Not tmp, y
+ // And dst, x, tmp
+ // with
+ // AndNot dst, x, y
+ HInstruction* left = instruction->GetLeft();
+ HInstruction* right = instruction->GetRight();
+ // Perform simplication only when either left or right
+ // is Not. When both are Not, instruction should be simplified with
+ // DeMorgan's Laws.
+ if (left->IsNot() ^ right->IsNot()) {
+ bool left_is_not = left->IsNot();
+ HInstruction* other_ins = (left_is_not ? right : left);
+ HNot* not_ins = (left_is_not ? left : right)->AsNot();
+ // Only do the simplification if instruction has only one use
+ // and thus can be safely removed.
+ if (not_ins->HasOnlyOneNonEnvironmentUse()) {
+ ArenaAllocator* arena = instruction->GetBlock()->GetGraph()->GetAllocator();
+ HX86AndNot* and_not = new (arena) HX86AndNot(type,
+ not_ins->GetInput(),
+ other_ins,
+ instruction->GetDexPc());
+ instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, and_not);
+ DCHECK(!not_ins->HasUses());
+ not_ins->GetBlock()->RemoveInstruction(not_ins);
+ return true;
+ }
+ }
+ return false;
+}
+
+bool TryGenerateResetLeastSetBit(HAnd* instruction) {
+ DataType::Type type = instruction->GetType();
+ if (!DataType::IsIntOrLongType(type)) {
+ return false;
+ }
+ // Replace code looking like
+ // Add tmp, x, -1 or Sub tmp, x, 1
+ // And dest x, tmp
+ // with
+ // MaskOrResetLeastSetBit dest, x
+ HInstruction* candidate = nullptr;
+ HInstruction* other = nullptr;
+ HInstruction* left = instruction->GetLeft();
+ HInstruction* right = instruction->GetRight();
+ if (AreLeastSetBitInputs(left, right)) {
+ candidate = left;
+ other = right;
+ } else if (AreLeastSetBitInputs(right, left)) {
+ candidate = right;
+ other = left;
+ }
+ if (candidate != nullptr && candidate->HasOnlyOneNonEnvironmentUse()) {
+ ArenaAllocator* arena = instruction->GetBlock()->GetGraph()->GetAllocator();
+ HX86MaskOrResetLeastSetBit* lsb = new (arena) HX86MaskOrResetLeastSetBit(
+ type, HInstruction::kAnd, other, instruction->GetDexPc());
+ instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, lsb);
+ DCHECK(!candidate->HasUses());
+ candidate->GetBlock()->RemoveInstruction(candidate);
+ return true;
+ }
+ return false;
+}
+
+bool TryGenerateMaskUptoLeastSetBit(HXor* instruction) {
+ DataType::Type type = instruction->GetType();
+ if (!DataType::IsIntOrLongType(type)) {
+ return false;
+ }
+ // Replace code looking like
+ // Add tmp, x, -1 or Sub tmp, x, 1
+ // Xor dest x, tmp
+ // with
+ // MaskOrResetLeastSetBit dest, x
+ HInstruction* left = instruction->GetLeft();
+ HInstruction* right = instruction->GetRight();
+ HInstruction* other = nullptr;
+ HInstruction* candidate = nullptr;
+ if (AreLeastSetBitInputs(left, right)) {
+ candidate = left;
+ other = right;
+ } else if (AreLeastSetBitInputs(right, left)) {
+ candidate = right;
+ other = left;
+ }
+ if (candidate != nullptr && candidate->HasOnlyOneNonEnvironmentUse()) {
+ ArenaAllocator* arena = instruction->GetBlock()->GetGraph()->GetAllocator();
+ HX86MaskOrResetLeastSetBit* lsb = new (arena) HX86MaskOrResetLeastSetBit(
+ type, HInstruction::kXor, other, instruction->GetDexPc());
+ instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, lsb);
+ DCHECK(!candidate->HasUses());
+ candidate->GetBlock()->RemoveInstruction(candidate);
+ return true;
+ }
+ return false;
+}
+
+bool AreLeastSetBitInputs(HInstruction* to_test, HInstruction* other) {
+ if (to_test->IsAdd()) {
+ HAdd* add = to_test->AsAdd();
+ HConstant* cst = add->GetConstantRight();
+ return cst != nullptr && cst->IsMinusOne() && other == add->GetLeastConstantLeft();
+ }
+ if (to_test->IsSub()) {
+ HSub* sub = to_test->AsSub();
+ HConstant* cst = sub->GetConstantRight();
+ return cst != nullptr && cst->IsOne() && other == sub->GetLeastConstantLeft();
+ }
+ return false;
+}
+
+} // namespace art
diff --git a/compiler/optimizing/instruction_simplifier_x86_shared.h b/compiler/optimizing/instruction_simplifier_x86_shared.h
new file mode 100644
index 0000000000..7f94d7ea4c
--- /dev/null
+++ b/compiler/optimizing/instruction_simplifier_x86_shared.h
@@ -0,0 +1,29 @@
+/* Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_SHARED_H_
+#define ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_SHARED_H_
+
+#include "nodes.h"
+
+namespace art {
+bool TryCombineAndNot(HAnd* instruction);
+bool TryGenerateResetLeastSetBit(HAnd* instruction);
+bool TryGenerateMaskUptoLeastSetBit(HXor* instruction);
+bool AreLeastSetBitInputs(HInstruction* to_test, HInstruction* other);
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_SHARED_H_
+
diff --git a/compiler/optimizing/intrinsic_objects.cc b/compiler/optimizing/intrinsic_objects.cc
index 3c20ad698b..c345624a7a 100644
--- a/compiler/optimizing/intrinsic_objects.cc
+++ b/compiler/optimizing/intrinsic_objects.cc
@@ -21,6 +21,7 @@
#include "class_root.h"
#include "handle.h"
#include "obj_ptr-inl.h"
+#include "mirror/object_array-alloc-inl.h"
#include "mirror/object_array-inl.h"
namespace art {
@@ -29,7 +30,7 @@ static ObjPtr<mirror::ObjectArray<mirror::Object>> LookupIntegerCache(Thread* se
ClassLinker* class_linker)
REQUIRES_SHARED(Locks::mutator_lock_) {
ObjPtr<mirror::Class> integer_cache_class = class_linker->LookupClass(
- self, "Ljava/lang/Integer$IntegerCache;", /* class_linker */ nullptr);
+ self, "Ljava/lang/Integer$IntegerCache;", /* class_loader= */ nullptr);
if (integer_cache_class == nullptr || !integer_cache_class->IsInitialized()) {
return nullptr;
}
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 1407ea92cb..2de0f0c737 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -33,179 +33,6 @@
namespace art {
-// Check that intrinsic enum values fit within space set aside in ArtMethod modifier flags.
-#define CHECK_INTRINSICS_ENUM_VALUES(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- static_assert( \
- static_cast<uint32_t>(Intrinsics::k ## Name) <= (kAccIntrinsicBits >> CTZ(kAccIntrinsicBits)), \
- "Instrinsics enumeration space overflow.");
-#include "intrinsics_list.h"
- INTRINSICS_LIST(CHECK_INTRINSICS_ENUM_VALUES)
-#undef INTRINSICS_LIST
-#undef CHECK_INTRINSICS_ENUM_VALUES
-
-// Function that returns whether an intrinsic is static/direct or virtual.
-static inline InvokeType GetIntrinsicInvokeType(Intrinsics i) {
- switch (i) {
- case Intrinsics::kNone:
- return kInterface; // Non-sensical for intrinsic.
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- case Intrinsics::k ## Name: \
- return IsStatic;
-#include "intrinsics_list.h"
- INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
-#undef INTRINSICS_LIST
-#undef OPTIMIZING_INTRINSICS
- }
- return kInterface;
-}
-
-// Function that returns whether an intrinsic needs an environment or not.
-static inline IntrinsicNeedsEnvironmentOrCache NeedsEnvironmentOrCache(Intrinsics i) {
- switch (i) {
- case Intrinsics::kNone:
- return kNeedsEnvironmentOrCache; // Non-sensical for intrinsic.
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- case Intrinsics::k ## Name: \
- return NeedsEnvironmentOrCache;
-#include "intrinsics_list.h"
- INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
-#undef INTRINSICS_LIST
-#undef OPTIMIZING_INTRINSICS
- }
- return kNeedsEnvironmentOrCache;
-}
-
-// Function that returns whether an intrinsic has side effects.
-static inline IntrinsicSideEffects GetSideEffects(Intrinsics i) {
- switch (i) {
- case Intrinsics::kNone:
- return kAllSideEffects;
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- case Intrinsics::k ## Name: \
- return SideEffects;
-#include "intrinsics_list.h"
- INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
-#undef INTRINSICS_LIST
-#undef OPTIMIZING_INTRINSICS
- }
- return kAllSideEffects;
-}
-
-// Function that returns whether an intrinsic can throw exceptions.
-static inline IntrinsicExceptions GetExceptions(Intrinsics i) {
- switch (i) {
- case Intrinsics::kNone:
- return kCanThrow;
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- case Intrinsics::k ## Name: \
- return Exceptions;
-#include "intrinsics_list.h"
- INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
-#undef INTRINSICS_LIST
-#undef OPTIMIZING_INTRINSICS
- }
- return kCanThrow;
-}
-
-static bool CheckInvokeType(Intrinsics intrinsic, HInvoke* invoke)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- // Whenever the intrinsic is marked as static, report an error if we find an InvokeVirtual.
- //
- // Whenever the intrinsic is marked as direct and we find an InvokeVirtual, a devirtualization
- // failure occured. We might be in a situation where we have inlined a method that calls an
- // intrinsic, but that method is in a different dex file on which we do not have a
- // verified_method that would have helped the compiler driver sharpen the call. In that case,
- // make sure that the intrinsic is actually for some final method (or in a final class), as
- // otherwise the intrinsics setup is broken.
- //
- // For the last direction, we have intrinsics for virtual functions that will perform a check
- // inline. If the precise type is known, however, the instruction will be sharpened to an
- // InvokeStaticOrDirect.
- InvokeType intrinsic_type = GetIntrinsicInvokeType(intrinsic);
- InvokeType invoke_type = invoke->GetInvokeType();
-
- switch (intrinsic_type) {
- case kStatic:
- return (invoke_type == kStatic);
-
- case kDirect:
- if (invoke_type == kDirect) {
- return true;
- }
- if (invoke_type == kVirtual) {
- ArtMethod* art_method = invoke->GetResolvedMethod();
- return (art_method->IsFinal() || art_method->GetDeclaringClass()->IsFinal());
- }
- return false;
-
- case kVirtual:
- // Call might be devirtualized.
- return (invoke_type == kVirtual || invoke_type == kDirect || invoke_type == kInterface);
-
- case kSuper:
- case kInterface:
- case kPolymorphic:
- case kCustom:
- return false;
- }
- LOG(FATAL) << "Unknown intrinsic invoke type: " << intrinsic_type;
- UNREACHABLE();
-}
-
-bool IntrinsicsRecognizer::Recognize(HInvoke* invoke,
- ArtMethod* art_method,
- /*out*/ bool* wrong_invoke_type) {
- if (art_method == nullptr) {
- art_method = invoke->GetResolvedMethod();
- }
- *wrong_invoke_type = false;
- if (art_method == nullptr || !art_method->IsIntrinsic()) {
- return false;
- }
-
- // TODO: b/65872996 The intent is that polymorphic signature methods should
- // be compiler intrinsics. At present, they are only interpreter intrinsics.
- if (art_method->IsPolymorphicSignature()) {
- return false;
- }
-
- Intrinsics intrinsic = static_cast<Intrinsics>(art_method->GetIntrinsic());
- if (CheckInvokeType(intrinsic, invoke) == false) {
- *wrong_invoke_type = true;
- return false;
- }
-
- invoke->SetIntrinsic(intrinsic,
- NeedsEnvironmentOrCache(intrinsic),
- GetSideEffects(intrinsic),
- GetExceptions(intrinsic));
- return true;
-}
-
-bool IntrinsicsRecognizer::Run() {
- bool didRecognize = false;
- ScopedObjectAccess soa(Thread::Current());
- for (HBasicBlock* block : graph_->GetReversePostOrder()) {
- for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done();
- inst_it.Advance()) {
- HInstruction* inst = inst_it.Current();
- if (inst->IsInvoke()) {
- bool wrong_invoke_type = false;
- if (Recognize(inst->AsInvoke(), /* art_method */ nullptr, &wrong_invoke_type)) {
- didRecognize = true;
- MaybeRecordStat(stats_, MethodCompilationStat::kIntrinsicRecognized);
- } else if (wrong_invoke_type) {
- LOG(WARNING)
- << "Found an intrinsic with unexpected invoke type: "
- << inst->AsInvoke()->GetResolvedMethod()->PrettyMethod() << " "
- << inst->DebugName();
- }
- }
- }
- }
- return didRecognize;
-}
-
std::ostream& operator<<(std::ostream& os, const Intrinsics& intrinsic) {
switch (intrinsic) {
case Intrinsics::kNone:
@@ -250,7 +77,7 @@ static ObjPtr<mirror::Class> LookupInitializedClass(Thread* self,
const char* descriptor)
REQUIRES_SHARED(Locks::mutator_lock_) {
ObjPtr<mirror::Class> klass =
- class_linker->LookupClass(self, descriptor, /* class_loader */ nullptr);
+ class_linker->LookupClass(self, descriptor, /* class_loader= */ nullptr);
DCHECK(klass != nullptr);
DCHECK(klass->IsInitialized());
return klass;
@@ -340,14 +167,14 @@ void IntrinsicVisitor::ComputeIntegerValueOfLocations(HInvoke* invoke,
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
ObjPtr<mirror::Class> cache_class = class_linker->LookupClass(
- self, kIntegerCacheDescriptor, /* class_loader */ nullptr);
+ self, kIntegerCacheDescriptor, /* class_loader= */ nullptr);
DCHECK(cache_class != nullptr);
if (UNLIKELY(!cache_class->IsInitialized())) {
LOG(WARNING) << "Image class " << cache_class->PrettyDescriptor() << " is uninitialized.";
return;
}
ObjPtr<mirror::Class> integer_class =
- class_linker->LookupClass(self, kIntegerDescriptor, /* class_loader */ nullptr);
+ class_linker->LookupClass(self, kIntegerDescriptor, /* class_loader= */ nullptr);
DCHECK(integer_class != nullptr);
if (UNLIKELY(!integer_class->IsInitialized())) {
LOG(WARNING) << "Image class " << integer_class->PrettyDescriptor() << " is uninitialized.";
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 7594d4a50b..ab68cce304 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -24,7 +24,6 @@
namespace art {
-class CompilerDriver;
class DexFile;
// Positive floating-point infinities.
@@ -34,28 +33,6 @@ static constexpr uint64_t kPositiveInfinityDouble = UINT64_C(0x7ff0000000000000)
static constexpr uint32_t kNanFloat = 0x7fc00000U;
static constexpr uint64_t kNanDouble = 0x7ff8000000000000;
-// Recognize intrinsics from HInvoke nodes.
-class IntrinsicsRecognizer : public HOptimization {
- public:
- IntrinsicsRecognizer(HGraph* graph,
- OptimizingCompilerStats* stats,
- const char* name = kIntrinsicsRecognizerPassName)
- : HOptimization(graph, name, stats) {}
-
- bool Run() override;
-
- // Static helper that recognizes intrinsic call. Returns true on success.
- // If it fails due to invoke type mismatch, wrong_invoke_type is set.
- // Useful to recognize intrinsics on individual calls outside this full pass.
- static bool Recognize(HInvoke* invoke, ArtMethod* method, /*out*/ bool* wrong_invoke_type)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- static constexpr const char* kIntrinsicsRecognizerPassName = "intrinsics_recognition";
-
- private:
- DISALLOW_COPY_AND_ASSIGN(IntrinsicsRecognizer);
-};
-
class IntrinsicVisitor : public ValueObject {
public:
virtual ~IntrinsicVisitor() {}
@@ -264,11 +241,15 @@ void IntrinsicCodeGenerator ## Arch::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNU
// Defines a list of unreached intrinsics: that is, method calls that are recognized as
// an intrinsic, and then always converted into HIR instructions before they reach any
-// architecture-specific intrinsics code generator.
+// architecture-specific intrinsics code generator. This only applies to non-baseline
+// compilation.
#define UNREACHABLE_INTRINSIC(Arch, Name) \
void IntrinsicLocationsBuilder ## Arch::Visit ## Name(HInvoke* invoke) { \
- LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \
- << " should have been converted to HIR"; \
+ if (Runtime::Current()->IsAotCompiler() && \
+ !codegen_->GetCompilerOptions().IsBaseline()) { \
+ LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \
+ << " should have been converted to HIR"; \
+ } \
} \
void IntrinsicCodeGenerator ## Arch::Visit ## Name(HInvoke* invoke) { \
LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index fcd278837f..ec5d17a443 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -272,10 +272,10 @@ void IntrinsicLocationsBuilderARM64::VisitDoubleLongBitsToDouble(HInvoke* invoke
}
void IntrinsicCodeGeneratorARM64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetVIXLAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetVIXLAssembler());
}
void IntrinsicCodeGeneratorARM64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetVIXLAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetVIXLAssembler());
}
void IntrinsicLocationsBuilderARM64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
@@ -286,10 +286,10 @@ void IntrinsicLocationsBuilderARM64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorARM64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetVIXLAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetVIXLAssembler());
}
void IntrinsicCodeGeneratorARM64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetVIXLAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetVIXLAssembler());
}
static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -618,7 +618,7 @@ void IntrinsicLocationsBuilderARM64::VisitMathRoundDouble(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorARM64::VisitMathRoundDouble(HInvoke* invoke) {
- GenMathRound(invoke, /* is_double */ true, GetVIXLAssembler());
+ GenMathRound(invoke, /* is_double= */ true, GetVIXLAssembler());
}
void IntrinsicLocationsBuilderARM64::VisitMathRoundFloat(HInvoke* invoke) {
@@ -626,7 +626,7 @@ void IntrinsicLocationsBuilderARM64::VisitMathRoundFloat(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorARM64::VisitMathRoundFloat(HInvoke* invoke) {
- GenMathRound(invoke, /* is_double */ false, GetVIXLAssembler());
+ GenMathRound(invoke, /* is_double= */ false, GetVIXLAssembler());
}
void IntrinsicLocationsBuilderARM64::VisitMemoryPeekByte(HInvoke* invoke) {
@@ -752,13 +752,13 @@ static void GenUnsafeGet(HInvoke* invoke,
trg_loc,
base,
MemOperand(temp.X()),
- /* needs_null_check */ false,
+ /* needs_null_check= */ false,
is_volatile);
} else {
// Other cases.
MemOperand mem_op(base.X(), offset);
if (is_volatile) {
- codegen->LoadAcquire(invoke, trg, mem_op, /* needs_null_check */ true);
+ codegen->LoadAcquire(invoke, trg, mem_op, /* needs_null_check= */ true);
} else {
codegen->Load(type, trg, mem_op);
}
@@ -813,22 +813,22 @@ void IntrinsicLocationsBuilderARM64::VisitUnsafeGetObjectVolatile(HInvoke* invok
}
void IntrinsicCodeGeneratorARM64::VisitUnsafeGet(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafeGetVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafeGetLong(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafeGetObject(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_);
}
static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -896,7 +896,7 @@ static void GenUnsafePut(HInvoke* invoke,
}
if (is_volatile || is_ordered) {
- codegen->StoreRelease(invoke, type, source, mem_op, /* needs_null_check */ false);
+ codegen->StoreRelease(invoke, type, source, mem_op, /* needs_null_check= */ false);
} else {
codegen->Store(type, source, mem_op);
}
@@ -911,64 +911,64 @@ static void GenUnsafePut(HInvoke* invoke,
void IntrinsicCodeGeneratorARM64::VisitUnsafePut(HInvoke* invoke) {
GenUnsafePut(invoke,
DataType::Type::kInt32,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutOrdered(HInvoke* invoke) {
GenUnsafePut(invoke,
DataType::Type::kInt32,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutVolatile(HInvoke* invoke) {
GenUnsafePut(invoke,
DataType::Type::kInt32,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutObject(HInvoke* invoke) {
GenUnsafePut(invoke,
DataType::Type::kReference,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
GenUnsafePut(invoke,
DataType::Type::kReference,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
GenUnsafePut(invoke,
DataType::Type::kReference,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutLong(HInvoke* invoke) {
GenUnsafePut(invoke,
DataType::Type::kInt64,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
GenUnsafePut(invoke,
DataType::Type::kInt64,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
GenUnsafePut(invoke,
DataType::Type::kInt64,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
codegen_);
}
@@ -1646,7 +1646,7 @@ void IntrinsicLocationsBuilderARM64::VisitStringIndexOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorARM64::VisitStringIndexOf(HInvoke* invoke) {
- GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero */ true);
+ GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero= */ true);
}
void IntrinsicLocationsBuilderARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1662,7 +1662,7 @@ void IntrinsicLocationsBuilderARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero */ false);
+ GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero= */ false);
}
void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -2464,8 +2464,8 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
src.W(),
class_offset,
temp3_loc,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
// Bail out if the source is not a non primitive array.
// /* HeapReference<Class> */ temp1 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
@@ -2473,8 +2473,8 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
temp1,
component_offset,
temp3_loc,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
__ Cbz(temp1, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp1` has been unpoisoned
// by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2490,8 +2490,8 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
dest.W(),
class_offset,
temp3_loc,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
// Bail out if the destination is not a non primitive array.
@@ -2507,8 +2507,8 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
temp1,
component_offset,
temp3_loc,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
__ Cbz(temp2, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp2` has been unpoisoned
// by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2526,8 +2526,8 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
src.W(),
class_offset,
temp3_loc,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
// Note: if heap poisoning is on, we are comparing two unpoisoned references here.
__ Cmp(temp1, temp2);
@@ -2540,8 +2540,8 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
temp1,
component_offset,
temp3_loc,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
// /* HeapReference<Class> */ temp1 = temp1->super_class_
// We do not need to emit a read barrier for the following
// heap reference load, as `temp1` is only used in a
@@ -2624,16 +2624,16 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
src.W(),
class_offset,
temp3_loc,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
// /* HeapReference<Class> */ temp2 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
temp2_loc,
temp1,
component_offset,
temp3_loc,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
__ Cbz(temp2, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp2` has been unpoisoned
// by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2787,7 +2787,7 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
}
// We only need one card marking on the destination array.
- codegen_->MarkGCCard(dest.W(), Register(), /* value_can_be_null */ false);
+ codegen_->MarkGCCard(dest.W(), Register(), /* value_can_be_null= */ false);
__ Bind(intrinsic_slow_path->GetExitLabel());
}
@@ -2820,7 +2820,7 @@ void IntrinsicLocationsBuilderARM64::VisitFloatIsInfinite(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorARM64::VisitFloatIsInfinite(HInvoke* invoke) {
- GenIsInfinite(invoke->GetLocations(), /* is64bit */ false, GetVIXLAssembler());
+ GenIsInfinite(invoke->GetLocations(), /* is64bit= */ false, GetVIXLAssembler());
}
void IntrinsicLocationsBuilderARM64::VisitDoubleIsInfinite(HInvoke* invoke) {
@@ -2828,7 +2828,7 @@ void IntrinsicLocationsBuilderARM64::VisitDoubleIsInfinite(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorARM64::VisitDoubleIsInfinite(HInvoke* invoke) {
- GenIsInfinite(invoke->GetLocations(), /* is64bit */ true, GetVIXLAssembler());
+ GenIsInfinite(invoke->GetLocations(), /* is64bit= */ true, GetVIXLAssembler());
}
void IntrinsicLocationsBuilderARM64::VisitIntegerValueOf(HInvoke* invoke) {
@@ -2924,6 +2924,251 @@ void IntrinsicLocationsBuilderARM64::VisitReachabilityFence(HInvoke* invoke) {
void IntrinsicCodeGeneratorARM64::VisitReachabilityFence(HInvoke* invoke ATTRIBUTE_UNUSED) { }
+void IntrinsicLocationsBuilderARM64::VisitCRC32Update(HInvoke* invoke) {
+ if (!codegen_->GetInstructionSetFeatures().HasCRC()) {
+ return;
+ }
+
+ LocationSummary* locations = new (allocator_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+// Lower the invoke of CRC32.update(int crc, int b).
+void IntrinsicCodeGeneratorARM64::VisitCRC32Update(HInvoke* invoke) {
+ DCHECK(codegen_->GetInstructionSetFeatures().HasCRC());
+
+ MacroAssembler* masm = GetVIXLAssembler();
+
+ Register crc = InputRegisterAt(invoke, 0);
+ Register val = InputRegisterAt(invoke, 1);
+ Register out = OutputRegister(invoke);
+
+ // The general algorithm of the CRC32 calculation is:
+ // crc = ~crc
+ // result = crc32_for_byte(crc, b)
+ // crc = ~result
+ // It is directly lowered to three instructions.
+
+ UseScratchRegisterScope temps(masm);
+ Register tmp = temps.AcquireSameSizeAs(out);
+
+ __ Mvn(tmp, crc);
+ __ Crc32b(tmp, tmp, val);
+ __ Mvn(out, tmp);
+}
+
+// Generate code using CRC32 instructions which calculates
+// a CRC32 value of a byte.
+//
+// Parameters:
+// masm - VIXL macro assembler
+// crc - a register holding an initial CRC value
+// ptr - a register holding a memory address of bytes
+// length - a register holding a number of bytes to process
+// out - a register to put a result of calculation
+static void GenerateCodeForCalculationCRC32ValueOfBytes(MacroAssembler* masm,
+ const Register& crc,
+ const Register& ptr,
+ const Register& length,
+ const Register& out) {
+ // The algorithm of CRC32 of bytes is:
+ // crc = ~crc
+ // process a few first bytes to make the array 8-byte aligned
+ // while array has 8 bytes do:
+ // crc = crc32_of_8bytes(crc, 8_bytes(array))
+ // if array has 4 bytes:
+ // crc = crc32_of_4bytes(crc, 4_bytes(array))
+ // if array has 2 bytes:
+ // crc = crc32_of_2bytes(crc, 2_bytes(array))
+ // if array has a byte:
+ // crc = crc32_of_byte(crc, 1_byte(array))
+ // crc = ~crc
+
+ vixl::aarch64::Label loop, done;
+ vixl::aarch64::Label process_4bytes, process_2bytes, process_1byte;
+ vixl::aarch64::Label aligned2, aligned4, aligned8;
+
+ // Use VIXL scratch registers as the VIXL macro assembler won't use them in
+ // instructions below.
+ UseScratchRegisterScope temps(masm);
+ Register len = temps.AcquireW();
+ Register array_elem = temps.AcquireW();
+
+ __ Mvn(out, crc);
+ __ Mov(len, length);
+
+ __ Tbz(ptr, 0, &aligned2);
+ __ Subs(len, len, 1);
+ __ B(&done, lo);
+ __ Ldrb(array_elem, MemOperand(ptr, 1, PostIndex));
+ __ Crc32b(out, out, array_elem);
+
+ __ Bind(&aligned2);
+ __ Tbz(ptr, 1, &aligned4);
+ __ Subs(len, len, 2);
+ __ B(&process_1byte, lo);
+ __ Ldrh(array_elem, MemOperand(ptr, 2, PostIndex));
+ __ Crc32h(out, out, array_elem);
+
+ __ Bind(&aligned4);
+ __ Tbz(ptr, 2, &aligned8);
+ __ Subs(len, len, 4);
+ __ B(&process_2bytes, lo);
+ __ Ldr(array_elem, MemOperand(ptr, 4, PostIndex));
+ __ Crc32w(out, out, array_elem);
+
+ __ Bind(&aligned8);
+ __ Subs(len, len, 8);
+ // If len < 8 go to process data by 4 bytes, 2 bytes and a byte.
+ __ B(&process_4bytes, lo);
+
+ // The main loop processing data by 8 bytes.
+ __ Bind(&loop);
+ __ Ldr(array_elem.X(), MemOperand(ptr, 8, PostIndex));
+ __ Subs(len, len, 8);
+ __ Crc32x(out, out, array_elem.X());
+ // if len >= 8, process the next 8 bytes.
+ __ B(&loop, hs);
+
+ // Process the data which is less than 8 bytes.
+ // The code generated below works with values of len
+ // which come in the range [-8, 0].
+ // The first three bits are used to detect whether 4 bytes or 2 bytes or
+ // a byte can be processed.
+ // The checking order is from bit 2 to bit 0:
+ // bit 2 is set: at least 4 bytes available
+ // bit 1 is set: at least 2 bytes available
+ // bit 0 is set: at least a byte available
+ __ Bind(&process_4bytes);
+ // Goto process_2bytes if less than four bytes available
+ __ Tbz(len, 2, &process_2bytes);
+ __ Ldr(array_elem, MemOperand(ptr, 4, PostIndex));
+ __ Crc32w(out, out, array_elem);
+
+ __ Bind(&process_2bytes);
+ // Goto process_1bytes if less than two bytes available
+ __ Tbz(len, 1, &process_1byte);
+ __ Ldrh(array_elem, MemOperand(ptr, 2, PostIndex));
+ __ Crc32h(out, out, array_elem);
+
+ __ Bind(&process_1byte);
+ // Goto done if no bytes available
+ __ Tbz(len, 0, &done);
+ __ Ldrb(array_elem, MemOperand(ptr));
+ __ Crc32b(out, out, array_elem);
+
+ __ Bind(&done);
+ __ Mvn(out, out);
+}
+
+// The threshold for sizes of arrays to use the library provided implementation
+// of CRC32.updateBytes instead of the intrinsic.
+static constexpr int32_t kCRC32UpdateBytesThreshold = 64 * 1024;
+
+void IntrinsicLocationsBuilderARM64::VisitCRC32UpdateBytes(HInvoke* invoke) {
+ if (!codegen_->GetInstructionSetFeatures().HasCRC()) {
+ return;
+ }
+
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke,
+ LocationSummary::kCallOnSlowPath,
+ kIntrinsified);
+
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(2, Location::RegisterOrConstant(invoke->InputAt(2)));
+ locations->SetInAt(3, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+// Lower the invoke of CRC32.updateBytes(int crc, byte[] b, int off, int len)
+//
+// Note: The intrinsic is not used if len exceeds a threshold.
+void IntrinsicCodeGeneratorARM64::VisitCRC32UpdateBytes(HInvoke* invoke) {
+ DCHECK(codegen_->GetInstructionSetFeatures().HasCRC());
+
+ MacroAssembler* masm = GetVIXLAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ SlowPathCodeARM64* slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
+ codegen_->AddSlowPath(slow_path);
+
+ Register length = WRegisterFrom(locations->InAt(3));
+ __ Cmp(length, kCRC32UpdateBytesThreshold);
+ __ B(slow_path->GetEntryLabel(), hi);
+
+ const uint32_t array_data_offset =
+ mirror::Array::DataOffset(Primitive::kPrimByte).Uint32Value();
+ Register ptr = XRegisterFrom(locations->GetTemp(0));
+ Register array = XRegisterFrom(locations->InAt(1));
+ Location offset = locations->InAt(2);
+ if (offset.IsConstant()) {
+ int32_t offset_value = offset.GetConstant()->AsIntConstant()->GetValue();
+ __ Add(ptr, array, array_data_offset + offset_value);
+ } else {
+ __ Add(ptr, array, array_data_offset);
+ __ Add(ptr, ptr, XRegisterFrom(offset));
+ }
+
+ Register crc = WRegisterFrom(locations->InAt(0));
+ Register out = WRegisterFrom(locations->Out());
+
+ GenerateCodeForCalculationCRC32ValueOfBytes(masm, crc, ptr, length, out);
+
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void IntrinsicLocationsBuilderARM64::VisitCRC32UpdateByteBuffer(HInvoke* invoke) {
+ if (!codegen_->GetInstructionSetFeatures().HasCRC()) {
+ return;
+ }
+
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(2, Location::RequiresRegister());
+ locations->SetInAt(3, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+// Lower the invoke of CRC32.updateByteBuffer(int crc, long addr, int off, int len)
+//
+// There is no need to generate code checking if addr is 0.
+// The method updateByteBuffer is a private method of java.util.zip.CRC32.
+// This guarantees no calls outside of the CRC32 class.
+// An address of DirectBuffer is always passed to the call of updateByteBuffer.
+// It might be an implementation of an empty DirectBuffer which can use a zero
+// address but it must have the length to be zero. The current generated code
+// correctly works with the zero length.
+void IntrinsicCodeGeneratorARM64::VisitCRC32UpdateByteBuffer(HInvoke* invoke) {
+ DCHECK(codegen_->GetInstructionSetFeatures().HasCRC());
+
+ MacroAssembler* masm = GetVIXLAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ Register addr = XRegisterFrom(locations->InAt(1));
+ Register ptr = XRegisterFrom(locations->GetTemp(0));
+ __ Add(ptr, addr, XRegisterFrom(locations->InAt(2)));
+
+ Register crc = WRegisterFrom(locations->InAt(0));
+ Register length = WRegisterFrom(locations->InAt(3));
+ Register out = WRegisterFrom(locations->Out());
+ GenerateCodeForCalculationCRC32ValueOfBytes(masm, crc, ptr, length, out);
+}
+
UNIMPLEMENTED_INTRINSIC(ARM64, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(ARM64, StringStringIndexOf);
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index f0a418454d..f0aa92e981 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -229,7 +229,7 @@ class ReadBarrierSystemArrayCopySlowPathARMVIXL : public SlowPathCodeARMVIXL {
assembler->MaybePoisonHeapReference(tmp);
__ Str(tmp, MemOperand(dst_curr_addr, element_size, PostIndex));
__ Cmp(src_curr_addr, src_stop_addr);
- __ B(ne, &loop, /* far_target */ false);
+ __ B(ne, &loop, /* is_far_target= */ false);
__ B(GetExitLabel());
}
@@ -298,10 +298,10 @@ void IntrinsicLocationsBuilderARMVIXL::VisitDoubleLongBitsToDouble(HInvoke* invo
}
void IntrinsicCodeGeneratorARMVIXL::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
void IntrinsicCodeGeneratorARMVIXL::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
void IntrinsicLocationsBuilderARMVIXL::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
@@ -312,10 +312,10 @@ void IntrinsicLocationsBuilderARMVIXL::VisitFloatIntBitsToFloat(HInvoke* invoke)
}
void IntrinsicCodeGeneratorARMVIXL::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
void IntrinsicCodeGeneratorARMVIXL::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -355,7 +355,7 @@ static void GenNumberOfLeadingZeros(HInvoke* invoke,
vixl32::Label end;
vixl32::Label* final_label = codegen->GetFinalLabel(invoke, &end);
__ Clz(out, in_reg_hi);
- __ CompareAndBranchIfNonZero(in_reg_hi, final_label, /* far_target */ false);
+ __ CompareAndBranchIfNonZero(in_reg_hi, final_label, /* is_far_target= */ false);
__ Clz(out, in_reg_lo);
__ Add(out, out, 32);
if (end.IsReferenced()) {
@@ -398,7 +398,7 @@ static void GenNumberOfTrailingZeros(HInvoke* invoke,
vixl32::Label* final_label = codegen->GetFinalLabel(invoke, &end);
__ Rbit(out, in_reg_lo);
__ Clz(out, out);
- __ CompareAndBranchIfNonZero(in_reg_lo, final_label, /* far_target */ false);
+ __ CompareAndBranchIfNonZero(in_reg_lo, final_label, /* is_far_target= */ false);
__ Rbit(out, in_reg_hi);
__ Clz(out, out);
__ Add(out, out, 32);
@@ -446,7 +446,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitMathRint(HInvoke* invoke) {
void IntrinsicCodeGeneratorARMVIXL::VisitMathRint(HInvoke* invoke) {
DCHECK(codegen_->GetInstructionSetFeatures().HasARMv8AInstructions());
ArmVIXLAssembler* assembler = GetAssembler();
- __ Vrintn(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
+ __ Vrintn(F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathRoundFloat(HInvoke* invoke) {
@@ -476,12 +476,12 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathRoundFloat(HInvoke* invoke) {
// For positive, zero or NaN inputs, rounding is done.
__ Cmp(out_reg, 0);
- __ B(ge, final_label, /* far_target */ false);
+ __ B(ge, final_label, /* is_far_target= */ false);
// Handle input < 0 cases.
// If input is negative but not a tie, previous result (round to nearest) is valid.
// If input is a negative tie, change rounding direction to positive infinity, out_reg += 1.
- __ Vrinta(F32, F32, temp1, in_reg);
+ __ Vrinta(F32, temp1, in_reg);
__ Vmov(temp2, 0.5);
__ Vsub(F32, temp1, in_reg, temp1);
__ Vcmp(F32, temp1, temp2);
@@ -642,7 +642,7 @@ static void GenUnsafeGet(HInvoke* invoke,
__ Add(RegisterFrom(temp), base, Operand(offset));
MemOperand src(RegisterFrom(temp), 0);
codegen->GenerateFieldLoadWithBakerReadBarrier(
- invoke, trg_loc, base, src, /* needs_null_check */ false);
+ invoke, trg_loc, base, src, /* needs_null_check= */ false);
if (is_volatile) {
__ Dmb(vixl32::ISH);
}
@@ -733,22 +733,22 @@ void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetObjectVolatile(HInvoke* inv
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGet(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetLong(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetObject(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_);
}
static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator,
@@ -778,39 +778,39 @@ static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator,
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePut(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- allocator_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kInt32, /* is_volatile= */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- allocator_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kInt32, /* is_volatile= */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- allocator_, features_, DataType::Type::kInt32, /* is_volatile */ true, invoke);
+ allocator_, features_, DataType::Type::kInt32, /* is_volatile= */ true, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObject(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- allocator_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kReference, /* is_volatile= */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- allocator_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kReference, /* is_volatile= */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- allocator_, features_, DataType::Type::kReference, /* is_volatile */ true, invoke);
+ allocator_, features_, DataType::Type::kReference, /* is_volatile= */ true, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLong(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- allocator_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kInt64, /* is_volatile= */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLongOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- allocator_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kInt64, /* is_volatile= */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLongVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- allocator_, features_, DataType::Type::kInt64, /* is_volatile */ true, invoke);
+ allocator_, features_, DataType::Type::kInt64, /* is_volatile= */ true, invoke);
}
static void GenUnsafePut(LocationSummary* locations,
@@ -844,7 +844,7 @@ static void GenUnsafePut(LocationSummary* locations,
__ Ldrexd(temp_lo, temp_hi, MemOperand(temp_reg));
__ Strexd(temp_lo, value_lo, value_hi, MemOperand(temp_reg));
__ Cmp(temp_lo, 0);
- __ B(ne, &loop_head, /* far_target */ false);
+ __ B(ne, &loop_head, /* is_far_target= */ false);
} else {
__ Strd(value_lo, value_hi, MemOperand(base, offset));
}
@@ -875,64 +875,64 @@ static void GenUnsafePut(LocationSummary* locations,
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePut(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt32,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutOrdered(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt32,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutVolatile(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt32,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutObject(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kReference,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kReference,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kReference,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLong(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt64,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLongOrdered(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt64,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLongVolatile(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt64,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
codegen_);
}
@@ -1026,7 +1026,7 @@ class BakerReadBarrierCasSlowPathARMVIXL : public SlowPathCodeARMVIXL {
__ Strex(tmp, value, MemOperand(tmp_ptr));
assembler->MaybeUnpoisonHeapReference(value);
__ Cmp(tmp, 0);
- __ B(ne, &loop_head, /* far_target */ false);
+ __ B(ne, &loop_head, /* is_far_target= */ false);
__ B(GetExitLabel());
}
};
@@ -1092,7 +1092,8 @@ static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorARMVIXL* c
assembler->MaybeUnpoisonHeapReference(tmp);
}
__ Subs(tmp, tmp, expected);
- __ B(ne, failure, (failure == loop_exit) ? kNear : kBranchWithoutHint);
+ static_cast<vixl32::MacroAssembler*>(assembler->GetVIXLAssembler())->
+ B(ne, failure, /* hint= */ (failure == loop_exit) ? kNear : kBranchWithoutHint);
if (type == DataType::Type::kReference) {
assembler->MaybePoisonHeapReference(value);
}
@@ -1101,7 +1102,7 @@ static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorARMVIXL* c
assembler->MaybeUnpoisonHeapReference(value);
}
__ Cmp(tmp, 0);
- __ B(ne, &loop_head, /* far_target */ false);
+ __ B(ne, &loop_head, /* is_far_target= */ false);
__ Bind(loop_exit);
@@ -1112,7 +1113,7 @@ static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorARMVIXL* c
__ Lsr(out, out, WhichPowerOf2(out.GetSizeInBits()));
if (type == DataType::Type::kReference) {
- codegen->MaybeGenerateMarkingRegisterCheck(/* code */ 128);
+ codegen->MaybeGenerateMarkingRegisterCheck(/* code= */ 128);
}
}
@@ -1307,23 +1308,23 @@ static void GenerateStringCompareToLoop(ArmVIXLAssembler* assembler,
__ Ldr(temp_reg, MemOperand(str, temp1));
__ Ldr(temp2, MemOperand(arg, temp1));
__ Cmp(temp_reg, temp2);
- __ B(ne, &find_char_diff, /* far_target */ false);
+ __ B(ne, &find_char_diff, /* is_far_target= */ false);
__ Add(temp1, temp1, char_size * 2);
__ Ldr(temp_reg, MemOperand(str, temp1));
__ Ldr(temp2, MemOperand(arg, temp1));
__ Cmp(temp_reg, temp2);
- __ B(ne, &find_char_diff_2nd_cmp, /* far_target */ false);
+ __ B(ne, &find_char_diff_2nd_cmp, /* is_far_target= */ false);
__ Add(temp1, temp1, char_size * 2);
// With string compression, we have compared 8 bytes, otherwise 4 chars.
__ Subs(temp0, temp0, (mirror::kUseStringCompression ? 8 : 4));
- __ B(hi, &loop, /* far_target */ false);
+ __ B(hi, &loop, /* is_far_target= */ false);
__ B(end);
__ Bind(&find_char_diff_2nd_cmp);
if (mirror::kUseStringCompression) {
__ Subs(temp0, temp0, 4); // 4 bytes previously compared.
- __ B(ls, end, /* far_target */ false); // Was the second comparison fully beyond the end?
+ __ B(ls, end, /* is_far_target= */ false); // Was the second comparison fully beyond the end?
} else {
// Without string compression, we can start treating temp0 as signed
// and rely on the signed comparison below.
@@ -1351,7 +1352,7 @@ static void GenerateStringCompareToLoop(ArmVIXLAssembler* assembler,
// the remaining string data, so just return length diff (out).
// The comparison is unsigned for string compression, otherwise signed.
__ Cmp(temp0, Operand(temp1, vixl32::LSR, (mirror::kUseStringCompression ? 3 : 4)));
- __ B((mirror::kUseStringCompression ? ls : le), end, /* far_target */ false);
+ __ B((mirror::kUseStringCompression ? ls : le), end, /* is_far_target= */ false);
// Extract the characters and calculate the difference.
if (mirror::kUseStringCompression) {
@@ -1418,9 +1419,9 @@ static void GenerateStringCompareToLoop(ArmVIXLAssembler* assembler,
__ Ldrb(temp_reg, MemOperand(temp1, c_char_size, PostIndex));
__ Ldrh(temp3, MemOperand(temp2, char_size, PostIndex));
__ Cmp(temp_reg, temp3);
- __ B(ne, &different_compression_diff, /* far_target */ false);
+ __ B(ne, &different_compression_diff, /* is_far_target= */ false);
__ Subs(temp0, temp0, 2);
- __ B(hi, &different_compression_loop, /* far_target */ false);
+ __ B(hi, &different_compression_loop, /* is_far_target= */ false);
__ B(end);
// Calculate the difference.
@@ -1516,12 +1517,12 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringEquals(HInvoke* invoke) {
StringEqualsOptimizations optimizations(invoke);
if (!optimizations.GetArgumentNotNull()) {
// Check if input is null, return false if it is.
- __ CompareAndBranchIfZero(arg, &return_false, /* far_target */ false);
+ __ CompareAndBranchIfZero(arg, &return_false, /* is_far_target= */ false);
}
// Reference equality check, return true if same reference.
__ Cmp(str, arg);
- __ B(eq, &return_true, /* far_target */ false);
+ __ B(eq, &return_true, /* is_far_target= */ false);
if (!optimizations.GetArgumentIsString()) {
// Instanceof check for the argument by comparing class fields.
@@ -1539,7 +1540,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringEquals(HInvoke* invoke) {
// Also, because we use the previously loaded class references only in the
// following comparison, we don't need to unpoison them.
__ Cmp(temp, out);
- __ B(ne, &return_false, /* far_target */ false);
+ __ B(ne, &return_false, /* is_far_target= */ false);
}
// Check if one of the inputs is a const string. Do not special-case both strings
@@ -1562,7 +1563,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringEquals(HInvoke* invoke) {
// Also compares the compression style, if differs return false.
__ Ldr(temp, MemOperand(arg, count_offset));
__ Cmp(temp, Operand(mirror::String::GetFlaggedCount(const_string_length, is_compressed)));
- __ B(ne, &return_false, /* far_target */ false);
+ __ B(ne, &return_false, /* is_far_target= */ false);
} else {
// Load `count` fields of this and argument strings.
__ Ldr(temp, MemOperand(str, count_offset));
@@ -1570,7 +1571,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringEquals(HInvoke* invoke) {
// Check if `count` fields are equal, return false if they're not.
// Also compares the compression style, if differs return false.
__ Cmp(temp, out);
- __ B(ne, &return_false, /* far_target */ false);
+ __ B(ne, &return_false, /* is_far_target= */ false);
}
// Assertions that must hold in order to compare strings 4 bytes at a time.
@@ -1593,9 +1594,9 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringEquals(HInvoke* invoke) {
__ Ldrd(temp, temp1, MemOperand(str, offset));
__ Ldrd(temp2, out, MemOperand(arg, offset));
__ Cmp(temp, temp2);
- __ B(ne, &return_false, /* far_label */ false);
+ __ B(ne, &return_false, /* is_far_target= */ false);
__ Cmp(temp1, out);
- __ B(ne, &return_false, /* far_label */ false);
+ __ B(ne, &return_false, /* is_far_target= */ false);
offset += 2u * sizeof(uint32_t);
remaining_bytes -= 2u * sizeof(uint32_t);
}
@@ -1603,13 +1604,13 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringEquals(HInvoke* invoke) {
__ Ldr(temp, MemOperand(str, offset));
__ Ldr(out, MemOperand(arg, offset));
__ Cmp(temp, out);
- __ B(ne, &return_false, /* far_label */ false);
+ __ B(ne, &return_false, /* is_far_target= */ false);
}
} else {
// Return true if both strings are empty. Even with string compression `count == 0` means empty.
static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
"Expecting 0=compressed, 1=uncompressed");
- __ CompareAndBranchIfZero(temp, &return_true, /* far_target */ false);
+ __ CompareAndBranchIfZero(temp, &return_true, /* is_far_target= */ false);
if (mirror::kUseStringCompression) {
// For string compression, calculate the number of bytes to compare (not chars).
@@ -1635,10 +1636,10 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringEquals(HInvoke* invoke) {
__ Ldr(temp2, MemOperand(arg, temp1));
__ Add(temp1, temp1, Operand::From(sizeof(uint32_t)));
__ Cmp(out, temp2);
- __ B(ne, &return_false, /* far_target */ false);
+ __ B(ne, &return_false, /* is_far_target= */ false);
// With string compression, we have compared 4 bytes, otherwise 2 chars.
__ Subs(temp, temp, mirror::kUseStringCompression ? 4 : 2);
- __ B(hi, &loop, /* far_target */ false);
+ __ B(hi, &loop, /* is_far_target= */ false);
}
// Return true and exit the function.
@@ -1719,7 +1720,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorARMVIXL::VisitStringIndexOf(HInvoke* invoke) {
- GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
+ GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ true);
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1735,7 +1736,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke)
}
void IntrinsicCodeGeneratorARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
+ GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ false);
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -1957,7 +1958,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
} else {
if (!optimizations.GetDestinationIsSource()) {
__ Cmp(src, dest);
- __ B(ne, &conditions_on_positions_validated, /* far_target */ false);
+ __ B(ne, &conditions_on_positions_validated, /* is_far_target= */ false);
}
__ Cmp(RegisterFrom(dest_pos), src_pos_constant);
__ B(gt, intrinsic_slow_path->GetEntryLabel());
@@ -1965,7 +1966,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
} else {
if (!optimizations.GetDestinationIsSource()) {
__ Cmp(src, dest);
- __ B(ne, &conditions_on_positions_validated, /* far_target */ false);
+ __ B(ne, &conditions_on_positions_validated, /* is_far_target= */ false);
}
if (dest_pos.IsConstant()) {
int32_t dest_pos_constant = Int32ConstantFrom(dest_pos);
@@ -2025,11 +2026,11 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
if (!optimizations.GetSourceIsNonPrimitiveArray()) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check */ false);
+ invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check= */ false);
// Bail out if the source is not a non primitive array.
// /* HeapReference<Class> */ temp1 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false);
+ invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check= */ false);
__ CompareAndBranchIfZero(temp1, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp1` has been unpoisoned
// by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2041,7 +2042,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
// /* HeapReference<Class> */ temp1 = dest->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, dest, class_offset, temp2_loc, /* needs_null_check */ false);
+ invoke, temp1_loc, dest, class_offset, temp2_loc, /* needs_null_check= */ false);
if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
// Bail out if the destination is not a non primitive array.
@@ -2053,7 +2054,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
// temporaries such a `temp1`.
// /* HeapReference<Class> */ temp2 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp2_loc, temp1, component_offset, temp3_loc, /* needs_null_check */ false);
+ invoke, temp2_loc, temp1, component_offset, temp3_loc, /* needs_null_check= */ false);
__ CompareAndBranchIfZero(temp2, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp2` has been unpoisoned
// by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2067,16 +2068,16 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
// read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below.
// /* HeapReference<Class> */ temp2 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp2_loc, src, class_offset, temp3_loc, /* needs_null_check */ false);
+ invoke, temp2_loc, src, class_offset, temp3_loc, /* needs_null_check= */ false);
// Note: if heap poisoning is on, we are comparing two unpoisoned references here.
__ Cmp(temp1, temp2);
if (optimizations.GetDestinationIsTypedObjectArray()) {
vixl32::Label do_copy;
- __ B(eq, &do_copy, /* far_target */ false);
+ __ B(eq, &do_copy, /* is_far_target= */ false);
// /* HeapReference<Class> */ temp1 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false);
+ invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check= */ false);
// /* HeapReference<Class> */ temp1 = temp1->super_class_
// We do not need to emit a read barrier for the following
// heap reference load, as `temp1` is only used in a
@@ -2133,7 +2134,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
if (optimizations.GetDestinationIsTypedObjectArray()) {
vixl32::Label do_copy;
- __ B(eq, &do_copy, /* far_target */ false);
+ __ B(eq, &do_copy, /* is_far_target= */ false);
if (!did_unpoison) {
assembler->MaybeUnpoisonHeapReference(temp1);
}
@@ -2155,10 +2156,10 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check */ false);
+ invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check= */ false);
// /* HeapReference<Class> */ temp3 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp3_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false);
+ invoke, temp3_loc, temp1, component_offset, temp2_loc, /* needs_null_check= */ false);
__ CompareAndBranchIfZero(temp3, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp3` has been unpoisoned
// by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2186,7 +2187,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
if (length.IsRegister()) {
// Don't enter the copy loop if the length is null.
- __ CompareAndBranchIfZero(RegisterFrom(length), &done, /* is_far_target */ false);
+ __ CompareAndBranchIfZero(RegisterFrom(length), &done, /* is_far_target= */ false);
}
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
@@ -2263,7 +2264,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
__ Str(temp_reg, MemOperand(temp2, element_size, PostIndex));
}
__ Cmp(temp1, temp3);
- __ B(ne, &loop, /* far_target */ false);
+ __ B(ne, &loop, /* is_far_target= */ false);
__ Bind(read_barrier_slow_path->GetExitLabel());
} else {
@@ -2285,13 +2286,13 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
__ Str(temp_reg, MemOperand(temp2, element_size, PostIndex));
}
__ Cmp(temp1, temp3);
- __ B(ne, &loop, /* far_target */ false);
+ __ B(ne, &loop, /* is_far_target= */ false);
}
__ Bind(&done);
}
// We only need one card marking on the destination array.
- codegen_->MarkGCCard(temp1, temp2, dest, NoReg, /* value_can_be_null */ false);
+ codegen_->MarkGCCard(temp1, temp2, dest, NoReg, /* can_be_null= */ false);
__ Bind(intrinsic_slow_path->GetExitLabel());
}
@@ -2821,7 +2822,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke)
__ Subs(num_chr, srcEnd, srcBegin);
// Early out for valid zero-length retrievals.
- __ B(eq, final_label, /* far_target */ false);
+ __ B(eq, final_label, /* is_far_target= */ false);
// src range to copy.
__ Add(src_ptr, srcObj, value_offset);
@@ -2837,7 +2838,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke)
__ Ldr(temp, MemOperand(srcObj, count_offset));
__ Tst(temp, 1);
temps.Release(temp);
- __ B(eq, &compressed_string_preloop, /* far_target */ false);
+ __ B(eq, &compressed_string_preloop, /* is_far_target= */ false);
}
__ Add(src_ptr, src_ptr, Operand(srcBegin, vixl32::LSL, 1));
@@ -2847,7 +2848,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke)
temp = temps.Acquire();
// Save repairing the value of num_chr on the < 4 character path.
__ Subs(temp, num_chr, 4);
- __ B(lt, &remainder, /* far_target */ false);
+ __ B(lt, &remainder, /* is_far_target= */ false);
// Keep the result of the earlier subs, we are going to fetch at least 4 characters.
__ Mov(num_chr, temp);
@@ -2862,10 +2863,10 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke)
__ Ldr(temp, MemOperand(src_ptr, char_size * 4, PostIndex));
__ Str(temp, MemOperand(dst_ptr, char_size * 4, PostIndex));
temps.Release(temp);
- __ B(ge, &loop, /* far_target */ false);
+ __ B(ge, &loop, /* is_far_target= */ false);
__ Adds(num_chr, num_chr, 4);
- __ B(eq, final_label, /* far_target */ false);
+ __ B(eq, final_label, /* is_far_target= */ false);
// Main loop for < 4 character case and remainder handling. Loads and stores one
// 16-bit Java character at a time.
@@ -2875,7 +2876,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke)
__ Subs(num_chr, num_chr, 1);
__ Strh(temp, MemOperand(dst_ptr, char_size, PostIndex));
temps.Release(temp);
- __ B(gt, &remainder, /* far_target */ false);
+ __ B(gt, &remainder, /* is_far_target= */ false);
if (mirror::kUseStringCompression) {
__ B(final_label);
@@ -2891,7 +2892,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke)
__ Strh(temp, MemOperand(dst_ptr, char_size, PostIndex));
temps.Release(temp);
__ Subs(num_chr, num_chr, 1);
- __ B(gt, &compressed_string_loop, /* far_target */ false);
+ __ B(gt, &compressed_string_loop, /* is_far_target= */ false);
}
if (done.IsReferenced()) {
@@ -2952,7 +2953,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitMathCeil(HInvoke* invoke) {
void IntrinsicCodeGeneratorARMVIXL::VisitMathCeil(HInvoke* invoke) {
ArmVIXLAssembler* assembler = GetAssembler();
DCHECK(codegen_->GetInstructionSetFeatures().HasARMv8AInstructions());
- __ Vrintp(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
+ __ Vrintp(F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathFloor(HInvoke* invoke) {
@@ -2964,7 +2965,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitMathFloor(HInvoke* invoke) {
void IntrinsicCodeGeneratorARMVIXL::VisitMathFloor(HInvoke* invoke) {
ArmVIXLAssembler* assembler = GetAssembler();
DCHECK(codegen_->GetInstructionSetFeatures().HasARMv8AInstructions());
- __ Vrintm(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
+ __ Vrintm(F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
}
void IntrinsicLocationsBuilderARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
@@ -3011,7 +3012,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
__ Add(out, in, -info.low);
__ Cmp(out, info.length);
vixl32::Label allocate, done;
- __ B(hs, &allocate, /* is_far_target */ false);
+ __ B(hs, &allocate, /* is_far_target= */ false);
// If the value is within the bounds, load the j.l.Integer directly from the array.
codegen_->LoadBootImageAddress(temp, info.array_data_boot_image_reference);
codegen_->LoadFromShiftedRegOffset(DataType::Type::kReference, locations->Out(), temp, out);
@@ -3044,7 +3045,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitThreadInterrupted(HInvoke* invoke) {
vixl32::Register temp = temps.Acquire();
vixl32::Label done;
vixl32::Label* const final_label = codegen_->GetFinalLabel(invoke, &done);
- __ CompareAndBranchIfZero(out, final_label, /* far_target */ false);
+ __ CompareAndBranchIfZero(out, final_label, /* is_far_target= */ false);
__ Dmb(vixl32::ISH);
__ Mov(temp, 0);
assembler->StoreToOffset(kStoreWord, temp, tr, offset);
@@ -3066,6 +3067,9 @@ UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundDouble) // Could be done by changing
UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeCASLong) // High register pressure.
UNIMPLEMENTED_INTRINSIC(ARMVIXL, SystemArrayCopyChar)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, CRC32Update)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, CRC32UpdateBytes)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, CRC32UpdateByteBuffer)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 2ca12b6533..3da0e578bf 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -185,7 +185,7 @@ void IntrinsicLocationsBuilderMIPS::VisitDoubleDoubleToRawLongBits(HInvoke* invo
}
void IntrinsicCodeGeneratorMIPS::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
// int java.lang.Float.floatToRawIntBits(float)
@@ -194,7 +194,7 @@ void IntrinsicLocationsBuilderMIPS::VisitFloatFloatToRawIntBits(HInvoke* invoke)
}
void IntrinsicCodeGeneratorMIPS::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -226,7 +226,7 @@ void IntrinsicLocationsBuilderMIPS::VisitDoubleLongBitsToDouble(HInvoke* invoke)
}
void IntrinsicCodeGeneratorMIPS::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
// float java.lang.Float.intBitsToFloat(int)
@@ -235,7 +235,7 @@ void IntrinsicLocationsBuilderMIPS::VisitFloatIntBitsToFloat(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
static void CreateIntToIntLocations(ArenaAllocator* allocator,
@@ -411,7 +411,7 @@ void IntrinsicCodeGeneratorMIPS::VisitIntegerReverseBytes(HInvoke* invoke) {
DataType::Type::kInt32,
IsR2OrNewer(),
IsR6(),
- /* reverseBits */ false,
+ /* reverseBits= */ false,
GetAssembler());
}
@@ -425,7 +425,7 @@ void IntrinsicCodeGeneratorMIPS::VisitLongReverseBytes(HInvoke* invoke) {
DataType::Type::kInt64,
IsR2OrNewer(),
IsR6(),
- /* reverseBits */ false,
+ /* reverseBits= */ false,
GetAssembler());
}
@@ -439,7 +439,7 @@ void IntrinsicCodeGeneratorMIPS::VisitShortReverseBytes(HInvoke* invoke) {
DataType::Type::kInt16,
IsR2OrNewer(),
IsR6(),
- /* reverseBits */ false,
+ /* reverseBits= */ false,
GetAssembler());
}
@@ -479,7 +479,7 @@ void IntrinsicLocationsBuilderMIPS::VisitIntegerNumberOfLeadingZeros(HInvoke* in
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ false, IsR6(), GetAssembler());
+ GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ false, IsR6(), GetAssembler());
}
// int java.lang.Long.numberOfLeadingZeros(long i)
@@ -488,7 +488,7 @@ void IntrinsicLocationsBuilderMIPS::VisitLongNumberOfLeadingZeros(HInvoke* invok
}
void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ true, IsR6(), GetAssembler());
+ GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ true, IsR6(), GetAssembler());
}
static void GenNumberOfTrailingZeroes(LocationSummary* locations,
@@ -566,7 +566,7 @@ void IntrinsicLocationsBuilderMIPS::VisitIntegerNumberOfTrailingZeros(HInvoke* i
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ false, IsR6(), GetAssembler());
+ GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ false, IsR6(), GetAssembler());
}
// int java.lang.Long.numberOfTrailingZeros(long i)
@@ -575,7 +575,7 @@ void IntrinsicLocationsBuilderMIPS::VisitLongNumberOfTrailingZeros(HInvoke* invo
}
void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ true, IsR6(), GetAssembler());
+ GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ true, IsR6(), GetAssembler());
}
// int java.lang.Integer.reverse(int)
@@ -588,7 +588,7 @@ void IntrinsicCodeGeneratorMIPS::VisitIntegerReverse(HInvoke* invoke) {
DataType::Type::kInt32,
IsR2OrNewer(),
IsR6(),
- /* reverseBits */ true,
+ /* reverseBits= */ true,
GetAssembler());
}
@@ -602,7 +602,7 @@ void IntrinsicCodeGeneratorMIPS::VisitLongReverse(HInvoke* invoke) {
DataType::Type::kInt64,
IsR2OrNewer(),
IsR6(),
- /* reverseBits */ true,
+ /* reverseBits= */ true,
GetAssembler());
}
@@ -1055,11 +1055,11 @@ static void GenUnsafeGet(HInvoke* invoke,
codegen->GenerateReferenceLoadWithBakerReadBarrier(invoke,
trg_loc,
base,
- /* offset */ 0U,
- /* index */ offset_loc,
+ /* offset= */ 0U,
+ /* index= */ offset_loc,
TIMES_1,
temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
if (is_volatile) {
__ Sync(0);
}
@@ -1077,8 +1077,8 @@ static void GenUnsafeGet(HInvoke* invoke,
trg_loc,
trg_loc,
base_loc,
- /* offset */ 0U,
- /* index */ offset_loc);
+ /* offset= */ 0U,
+ /* index= */ offset_loc);
}
} else {
if (is_R6) {
@@ -1107,7 +1107,7 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafeGet(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGet(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, IsR6(), codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, IsR6(), codegen_);
}
// int sun.misc.Unsafe.getIntVolatile(Object o, long offset)
@@ -1116,7 +1116,7 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, IsR6(), codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, IsR6(), codegen_);
}
// long sun.misc.Unsafe.getLong(Object o, long offset)
@@ -1125,7 +1125,7 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetLong(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetLong(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, IsR6(), codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, IsR6(), codegen_);
}
// Object sun.misc.Unsafe.getObject(Object o, long offset)
@@ -1134,7 +1134,7 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetObject(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObject(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, IsR6(), codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, IsR6(), codegen_);
}
// Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset)
@@ -1143,7 +1143,7 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, IsR6(), codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, IsR6(), codegen_);
}
static void CreateIntIntIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -1225,8 +1225,8 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePut(HInvoke* invoke) {
void IntrinsicCodeGeneratorMIPS::VisitUnsafePut(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt32,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
IsR6(),
codegen_);
}
@@ -1239,8 +1239,8 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutOrdered(HInvoke* invoke) {
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutOrdered(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt32,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
IsR6(),
codegen_);
}
@@ -1253,8 +1253,8 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutVolatile(HInvoke* invoke) {
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutVolatile(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt32,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
IsR6(),
codegen_);
}
@@ -1267,8 +1267,8 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObject(HInvoke* invoke) {
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObject(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kReference,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
IsR6(),
codegen_);
}
@@ -1281,8 +1281,8 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke)
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kReference,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
IsR6(),
codegen_);
}
@@ -1295,8 +1295,8 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kReference,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
IsR6(),
codegen_);
}
@@ -1309,8 +1309,8 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutLong(HInvoke* invoke) {
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLong(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt64,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
IsR6(),
codegen_);
}
@@ -1323,8 +1323,8 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) {
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt64,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
IsR6(),
codegen_);
}
@@ -1388,12 +1388,12 @@ static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorMIPS* code
invoke,
out_loc, // Unused, used only as a "temporary" within the read barrier.
base,
- /* offset */ 0u,
- /* index */ offset_loc,
+ /* offset= */ 0u,
+ /* index= */ offset_loc,
ScaleFactor::TIMES_1,
temp,
- /* needs_null_check */ false,
- /* always_update_field */ true);
+ /* needs_null_check= */ false,
+ /* always_update_field= */ true);
}
}
@@ -1714,7 +1714,7 @@ void IntrinsicLocationsBuilderMIPS::VisitStringIndexOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS::VisitStringIndexOf(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, /* start_at_zero */ true, GetAssembler(), codegen_);
+ GenerateStringIndexOf(invoke, /* start_at_zero= */ true, GetAssembler(), codegen_);
}
// int java.lang.String.indexOf(int ch, int fromIndex)
@@ -1735,7 +1735,7 @@ void IntrinsicLocationsBuilderMIPS::VisitStringIndexOfAfter(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, /* start_at_zero */ false, GetAssembler(), codegen_);
+ GenerateStringIndexOf(invoke, /* start_at_zero= */ false, GetAssembler(), codegen_);
}
// java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
@@ -2704,6 +2704,10 @@ UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeCASLong)
UNIMPLEMENTED_INTRINSIC(MIPS, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(MIPS, SystemArrayCopy)
+UNIMPLEMENTED_INTRINSIC(MIPS, CRC32Update)
+UNIMPLEMENTED_INTRINSIC(MIPS, CRC32UpdateBytes)
+UNIMPLEMENTED_INTRINSIC(MIPS, CRC32UpdateByteBuffer)
+
UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOfAfter);
UNIMPLEMENTED_INTRINSIC(MIPS, StringBufferAppend);
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index cbe3b42cbf..3e687652d3 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -169,7 +169,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitDoubleDoubleToRawLongBits(HInvoke* in
}
void IntrinsicCodeGeneratorMIPS64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
// int java.lang.Float.floatToRawIntBits(float)
@@ -178,7 +178,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitFloatFloatToRawIntBits(HInvoke* invok
}
void IntrinsicCodeGeneratorMIPS64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -205,7 +205,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitDoubleLongBitsToDouble(HInvoke* invok
}
void IntrinsicCodeGeneratorMIPS64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
// float java.lang.Float.intBitsToFloat(int)
@@ -214,7 +214,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitFloatIntBitsToFloat(HInvoke* invoke)
}
void IntrinsicCodeGeneratorMIPS64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -295,7 +295,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke*
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
// int java.lang.Long.numberOfLeadingZeros(long i)
@@ -304,7 +304,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* inv
}
void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
static void GenNumberOfTrailingZeroes(LocationSummary* locations,
@@ -332,7 +332,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke*
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
// int java.lang.Long.numberOfTrailingZeros(long i)
@@ -341,7 +341,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* in
}
void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
static void GenReverse(LocationSummary* locations,
@@ -911,11 +911,11 @@ static void GenUnsafeGet(HInvoke* invoke,
codegen->GenerateReferenceLoadWithBakerReadBarrier(invoke,
trg_loc,
base,
- /* offset */ 0U,
- /* index */ offset_loc,
+ /* offset= */ 0U,
+ /* index= */ offset_loc,
TIMES_1,
temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
if (is_volatile) {
__ Sync(0);
}
@@ -928,8 +928,8 @@ static void GenUnsafeGet(HInvoke* invoke,
trg_loc,
trg_loc,
base_loc,
- /* offset */ 0U,
- /* index */ offset_loc);
+ /* offset= */ 0U,
+ /* index= */ offset_loc);
}
} else {
__ Lwu(trg, TMP, 0);
@@ -952,7 +952,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGet(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGet(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
}
// int sun.misc.Unsafe.getIntVolatile(Object o, long offset)
@@ -961,7 +961,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
}
// long sun.misc.Unsafe.getLong(Object o, long offset)
@@ -970,7 +970,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLong(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLong(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
}
// long sun.misc.Unsafe.getLongVolatile(Object o, long offset)
@@ -979,7 +979,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
}
// Object sun.misc.Unsafe.getObject(Object o, long offset)
@@ -988,7 +988,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObject(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObject(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_);
}
// Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset)
@@ -997,7 +997,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invo
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_);
}
static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -1067,8 +1067,8 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePut(HInvoke* invoke) {
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePut(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt32,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
codegen_);
}
@@ -1080,8 +1080,8 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) {
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt32,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
codegen_);
}
@@ -1093,8 +1093,8 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) {
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt32,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
codegen_);
}
@@ -1106,8 +1106,8 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObject(HInvoke* invoke) {
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObject(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kReference,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
codegen_);
}
@@ -1119,8 +1119,8 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invok
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kReference,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
codegen_);
}
@@ -1132,8 +1132,8 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invo
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kReference,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
codegen_);
}
@@ -1145,8 +1145,8 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLong(HInvoke* invoke) {
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLong(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt64,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
codegen_);
}
@@ -1158,8 +1158,8 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke)
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt64,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
codegen_);
}
@@ -1171,8 +1171,8 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt64,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
codegen_);
}
@@ -1234,12 +1234,12 @@ static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorMIPS64* co
invoke,
out_loc, // Unused, used only as a "temporary" within the read barrier.
base,
- /* offset */ 0u,
- /* index */ offset_loc,
+ /* offset= */ 0u,
+ /* index= */ offset_loc,
ScaleFactor::TIMES_1,
temp,
- /* needs_null_check */ false,
- /* always_update_field */ true);
+ /* needs_null_check= */ false,
+ /* always_update_field= */ true);
}
}
@@ -1556,7 +1556,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOf(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ true);
}
// int java.lang.String.indexOf(int ch, int fromIndex)
@@ -1574,7 +1574,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ false);
}
// java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
@@ -1675,7 +1675,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitFloatIsInfinite(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS64::VisitFloatIsInfinite(HInvoke* invoke) {
- GenIsInfinite(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ GenIsInfinite(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
// boolean java.lang.Double.isInfinite(double)
@@ -1684,7 +1684,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitDoubleIsInfinite(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS64::VisitDoubleIsInfinite(HInvoke* invoke) {
- GenIsInfinite(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ GenIsInfinite(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
// void java.lang.String.getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin)
@@ -2354,6 +2354,9 @@ void IntrinsicCodeGeneratorMIPS64::VisitReachabilityFence(HInvoke* invoke ATTRIB
UNIMPLEMENTED_INTRINSIC(MIPS64, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(MIPS64, SystemArrayCopy)
+UNIMPLEMENTED_INTRINSIC(MIPS64, CRC32Update)
+UNIMPLEMENTED_INTRINSIC(MIPS64, CRC32UpdateBytes)
+UNIMPLEMENTED_INTRINSIC(MIPS64, CRC32UpdateByteBuffer)
UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 3b23798758..de697f0f96 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -223,31 +223,31 @@ static void MoveIntToFP(LocationSummary* locations, bool is64bit, X86Assembler*
}
void IntrinsicLocationsBuilderX86::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- CreateFPToIntLocations(allocator_, invoke, /* is64bit */ true);
+ CreateFPToIntLocations(allocator_, invoke, /* is64bit= */ true);
}
void IntrinsicLocationsBuilderX86::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- CreateIntToFPLocations(allocator_, invoke, /* is64bit */ true);
+ CreateIntToFPLocations(allocator_, invoke, /* is64bit= */ true);
}
void IntrinsicCodeGeneratorX86::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
void IntrinsicCodeGeneratorX86::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
void IntrinsicLocationsBuilderX86::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- CreateFPToIntLocations(allocator_, invoke, /* is64bit */ false);
+ CreateFPToIntLocations(allocator_, invoke, /* is64bit= */ false);
}
void IntrinsicLocationsBuilderX86::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- CreateIntToFPLocations(allocator_, invoke, /* is64bit */ false);
+ CreateIntToFPLocations(allocator_, invoke, /* is64bit= */ false);
}
void IntrinsicCodeGeneratorX86::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
void IntrinsicCodeGeneratorX86::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -545,6 +545,96 @@ static void GenFPToFPCall(HInvoke* invoke, CodeGeneratorX86* codegen, QuickEntry
__ cfi().AdjustCFAOffset(-16);
}
+static void CreateLowestOneBitLocations(ArenaAllocator* allocator, bool is_long, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
+ if (is_long) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ } else {
+ locations->SetInAt(0, Location::Any());
+ }
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+static void GenLowestOneBit(X86Assembler* assembler,
+ CodeGeneratorX86* codegen,
+ bool is_long,
+ HInvoke* invoke) {
+ LocationSummary* locations = invoke->GetLocations();
+ Location src = locations->InAt(0);
+ Location out_loc = locations->Out();
+
+ if (invoke->InputAt(0)->IsConstant()) {
+ // Evaluate this at compile time.
+ int64_t value = Int64FromConstant(invoke->InputAt(0)->AsConstant());
+ if (value == 0) {
+ if (is_long) {
+ __ xorl(out_loc.AsRegisterPairLow<Register>(), out_loc.AsRegisterPairLow<Register>());
+ __ xorl(out_loc.AsRegisterPairHigh<Register>(), out_loc.AsRegisterPairHigh<Register>());
+ } else {
+ __ xorl(out_loc.AsRegister<Register>(), out_loc.AsRegister<Register>());
+ }
+ return;
+ }
+ // Nonzero value.
+ value = is_long ? CTZ(static_cast<uint64_t>(value))
+ : CTZ(static_cast<uint32_t>(value));
+ if (is_long) {
+ if (value >= 32) {
+ int shift = value-32;
+ codegen->Load32BitValue(out_loc.AsRegisterPairLow<Register>(), 0);
+ codegen->Load32BitValue(out_loc.AsRegisterPairHigh<Register>(), 1 << shift);
+ } else {
+ codegen->Load32BitValue(out_loc.AsRegisterPairLow<Register>(), 1 << value);
+ codegen->Load32BitValue(out_loc.AsRegisterPairHigh<Register>(), 0);
+ }
+ } else {
+ codegen->Load32BitValue(out_loc.AsRegister<Register>(), 1 << value);
+ }
+ return;
+ }
+ // Handle non constant case
+ if (is_long) {
+ DCHECK(src.IsRegisterPair());
+ Register src_lo = src.AsRegisterPairLow<Register>();
+ Register src_hi = src.AsRegisterPairHigh<Register>();
+
+ Register out_lo = out_loc.AsRegisterPairLow<Register>();
+ Register out_hi = out_loc.AsRegisterPairHigh<Register>();
+
+ __ movl(out_lo, src_lo);
+ __ movl(out_hi, src_hi);
+
+ __ negl(out_lo);
+ __ adcl(out_hi, Immediate(0));
+ __ negl(out_hi);
+
+ __ andl(out_lo, src_lo);
+ __ andl(out_hi, src_hi);
+ } else {
+ if (codegen->GetInstructionSetFeatures().HasAVX2() && src.IsRegister()) {
+ Register out = out_loc.AsRegister<Register>();
+ __ blsi(out, src.AsRegister<Register>());
+ } else {
+ Register out = out_loc.AsRegister<Register>();
+ // Do tmp & -tmp
+ if (src.IsRegister()) {
+ __ movl(out, src.AsRegister<Register>());
+ } else {
+ DCHECK(src.IsStackSlot());
+ __ movl(out, Address(ESP, src.GetStackIndex()));
+ }
+ __ negl(out);
+
+ if (src.IsRegister()) {
+ __ andl(out, src.AsRegister<Register>());
+ } else {
+ __ andl(out, Address(ESP, src.GetStackIndex()));
+ }
+ }
+ }
+}
+
void IntrinsicLocationsBuilderX86::VisitMathCos(HInvoke* invoke) {
CreateFPToFPCallLocations(allocator_, invoke);
}
@@ -657,6 +747,21 @@ void IntrinsicCodeGeneratorX86::VisitMathTanh(HInvoke* invoke) {
GenFPToFPCall(invoke, codegen_, kQuickTanh);
}
+void IntrinsicLocationsBuilderX86::VisitIntegerLowestOneBit(HInvoke* invoke) {
+ CreateLowestOneBitLocations(allocator_, /*is_long=*/ false, invoke);
+}
+void IntrinsicCodeGeneratorX86::VisitIntegerLowestOneBit(HInvoke* invoke) {
+ GenLowestOneBit(GetAssembler(), codegen_, /*is_long=*/ false, invoke);
+}
+
+void IntrinsicLocationsBuilderX86::VisitLongLowestOneBit(HInvoke* invoke) {
+ CreateLowestOneBitLocations(allocator_, /*is_long=*/ true, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitLongLowestOneBit(HInvoke* invoke) {
+ GenLowestOneBit(GetAssembler(), codegen_, /*is_long=*/ true, invoke);
+}
+
static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
LocationSummary* locations =
new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
@@ -1220,19 +1325,19 @@ static void GenerateStringIndexOf(HInvoke* invoke,
}
void IntrinsicLocationsBuilderX86::VisitStringIndexOf(HInvoke* invoke) {
- CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ true);
+ CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero= */ true);
}
void IntrinsicCodeGeneratorX86::VisitStringIndexOf(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ true);
}
void IntrinsicLocationsBuilderX86::VisitStringIndexOfAfter(HInvoke* invoke) {
- CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ false);
+ CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero= */ false);
}
void IntrinsicCodeGeneratorX86::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ false);
}
void IntrinsicLocationsBuilderX86::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -1594,7 +1699,7 @@ static void GenUnsafeGet(HInvoke* invoke,
if (kUseBakerReadBarrier) {
Address src(base, offset, ScaleFactor::TIMES_1, 0);
codegen->GenerateReferenceLoadWithBakerReadBarrier(
- invoke, output_loc, base, src, /* needs_null_check */ false);
+ invoke, output_loc, base, src, /* needs_null_check= */ false);
} else {
__ movl(output, Address(base, offset, ScaleFactor::TIMES_1, 0));
codegen->GenerateReadBarrierSlow(
@@ -1665,45 +1770,45 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator,
void IntrinsicLocationsBuilderX86::VisitUnsafeGet(HInvoke* invoke) {
CreateIntIntIntToIntLocations(
- allocator_, invoke, DataType::Type::kInt32, /* is_volatile */ false);
+ allocator_, invoke, DataType::Type::kInt32, /* is_volatile= */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32, /* is_volatile */ true);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32, /* is_volatile= */ true);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetLong(HInvoke* invoke) {
CreateIntIntIntToIntLocations(
- allocator_, invoke, DataType::Type::kInt64, /* is_volatile */ false);
+ allocator_, invoke, DataType::Type::kInt64, /* is_volatile= */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64, /* is_volatile */ true);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64, /* is_volatile= */ true);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetObject(HInvoke* invoke) {
CreateIntIntIntToIntLocations(
- allocator_, invoke, DataType::Type::kReference, /* is_volatile */ false);
+ allocator_, invoke, DataType::Type::kReference, /* is_volatile= */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
CreateIntIntIntToIntLocations(
- allocator_, invoke, DataType::Type::kReference, /* is_volatile */ true);
+ allocator_, invoke, DataType::Type::kReference, /* is_volatile= */ true);
}
void IntrinsicCodeGeneratorX86::VisitUnsafeGet(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafeGetVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafeGetLong(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafeGetObject(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_);
}
@@ -1730,39 +1835,39 @@ static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* allocator
void IntrinsicLocationsBuilderX86::VisitUnsafePut(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kInt32, invoke, /* is_volatile= */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kInt32, invoke, /* is_volatile= */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ true);
+ allocator_, DataType::Type::kInt32, invoke, /* is_volatile= */ true);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutObject(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- allocator_, DataType::Type::kReference, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kReference, invoke, /* is_volatile= */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- allocator_, DataType::Type::kReference, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kReference, invoke, /* is_volatile= */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- allocator_, DataType::Type::kReference, invoke, /* is_volatile */ true);
+ allocator_, DataType::Type::kReference, invoke, /* is_volatile= */ true);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutLong(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kInt64, invoke, /* is_volatile= */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutLongOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kInt64, invoke, /* is_volatile= */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutLongVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ true);
+ allocator_, DataType::Type::kInt64, invoke, /* is_volatile= */ true);
}
// We don't care for ordered: it requires an AnyStore barrier, which is already given by the x86
@@ -1814,34 +1919,34 @@ static void GenUnsafePut(LocationSummary* locations,
}
void IntrinsicCodeGeneratorX86::VisitUnsafePut(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafePutOrdered(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafePutVolatile(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafePutObject(HInvoke* invoke) {
GenUnsafePut(
- invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_);
+ invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
GenUnsafePut(
- invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_);
+ invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
GenUnsafePut(
- invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ true, codegen_);
+ invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafePutLong(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafePutLongOrdered(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafePutLongVolatile(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
}
static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator,
@@ -1938,8 +2043,8 @@ static void GenCAS(DataType::Type type, HInvoke* invoke, CodeGeneratorX86* codeg
temp1_loc, // Unused, used only as a "temporary" within the read barrier.
base,
field_addr,
- /* needs_null_check */ false,
- /* always_update_field */ true,
+ /* needs_null_check= */ false,
+ /* always_update_field= */ true,
&temp2);
}
@@ -2170,19 +2275,19 @@ static void GenBitCount(X86Assembler* assembler,
}
void IntrinsicLocationsBuilderX86::VisitIntegerBitCount(HInvoke* invoke) {
- CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long */ false);
+ CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long= */ false);
}
void IntrinsicCodeGeneratorX86::VisitIntegerBitCount(HInvoke* invoke) {
- GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ false);
+ GenBitCount(GetAssembler(), codegen_, invoke, /* is_long= */ false);
}
void IntrinsicLocationsBuilderX86::VisitLongBitCount(HInvoke* invoke) {
- CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long */ true);
+ CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long= */ true);
}
void IntrinsicCodeGeneratorX86::VisitLongBitCount(HInvoke* invoke) {
- GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ true);
+ GenBitCount(GetAssembler(), codegen_, invoke, /* is_long= */ true);
}
static void CreateLeadingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_long) {
@@ -2274,19 +2379,19 @@ static void GenLeadingZeros(X86Assembler* assembler,
}
void IntrinsicLocationsBuilderX86::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- CreateLeadingZeroLocations(allocator_, invoke, /* is_long */ false);
+ CreateLeadingZeroLocations(allocator_, invoke, /* is_long= */ false);
}
void IntrinsicCodeGeneratorX86::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ false);
+ GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ false);
}
void IntrinsicLocationsBuilderX86::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- CreateLeadingZeroLocations(allocator_, invoke, /* is_long */ true);
+ CreateLeadingZeroLocations(allocator_, invoke, /* is_long= */ true);
}
void IntrinsicCodeGeneratorX86::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
+ GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ true);
}
static void CreateTrailingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_long) {
@@ -2365,19 +2470,19 @@ static void GenTrailingZeros(X86Assembler* assembler,
}
void IntrinsicLocationsBuilderX86::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- CreateTrailingZeroLocations(allocator_, invoke, /* is_long */ false);
+ CreateTrailingZeroLocations(allocator_, invoke, /* is_long= */ false);
}
void IntrinsicCodeGeneratorX86::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ false);
+ GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ false);
}
void IntrinsicLocationsBuilderX86::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- CreateTrailingZeroLocations(allocator_, invoke, /* is_long */ true);
+ CreateTrailingZeroLocations(allocator_, invoke, /* is_long= */ true);
}
void IntrinsicCodeGeneratorX86::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
+ GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ true);
}
static bool IsSameInput(HInstruction* instruction, size_t input0, size_t input1) {
@@ -2585,11 +2690,11 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, src, class_offset, /* needs_null_check */ false);
+ invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false);
// Bail out if the source is not a non primitive array.
// /* HeapReference<Class> */ temp1 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, temp1, component_offset, /* needs_null_check */ false);
+ invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false);
__ testl(temp1, temp1);
__ j(kEqual, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp1` has been unpoisoned
@@ -2622,7 +2727,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
// /* HeapReference<Class> */ temp1 = dest->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, dest, class_offset, /* needs_null_check */ false);
+ invoke, temp1_loc, dest, class_offset, /* needs_null_check= */ false);
if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
// Bail out if the destination is not a non primitive array.
@@ -2634,7 +2739,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
// temporaries such a `temp1`.
// /* HeapReference<Class> */ temp2 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp2_loc, temp1, component_offset, /* needs_null_check */ false);
+ invoke, temp2_loc, temp1, component_offset, /* needs_null_check= */ false);
__ testl(temp2, temp2);
__ j(kEqual, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp2` has been unpoisoned
@@ -2647,7 +2752,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
// read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below.
// /* HeapReference<Class> */ temp2 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp2_loc, src, class_offset, /* needs_null_check */ false);
+ invoke, temp2_loc, src, class_offset, /* needs_null_check= */ false);
// Note: if heap poisoning is on, we are comparing two unpoisoned references here.
__ cmpl(temp1, temp2);
@@ -2656,7 +2761,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
__ j(kEqual, &do_copy);
// /* HeapReference<Class> */ temp1 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, temp1, component_offset, /* needs_null_check */ false);
+ invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false);
// We do not need to emit a read barrier for the following
// heap reference load, as `temp1` is only used in a
// comparison with null below, and this reference is not
@@ -2710,10 +2815,10 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, src, class_offset, /* needs_null_check */ false);
+ invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false);
// /* HeapReference<Class> */ temp1 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, temp1, component_offset, /* needs_null_check */ false);
+ invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false);
__ testl(temp1, temp1);
__ j(kEqual, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp1` has been unpoisoned
@@ -2846,7 +2951,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
}
// We only need one card marking on the destination array.
- codegen_->MarkGCCard(temp1, temp2, dest, Register(kNoRegister), /* value_can_be_null */ false);
+ codegen_->MarkGCCard(temp1, temp2, dest, Register(kNoRegister), /* value_can_be_null= */ false);
__ Bind(intrinsic_slow_path->GetExitLabel());
}
@@ -2973,8 +3078,9 @@ UNIMPLEMENTED_INTRINSIC(X86, FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86, DoubleIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86, IntegerHighestOneBit)
UNIMPLEMENTED_INTRINSIC(X86, LongHighestOneBit)
-UNIMPLEMENTED_INTRINSIC(X86, IntegerLowestOneBit)
-UNIMPLEMENTED_INTRINSIC(X86, LongLowestOneBit)
+UNIMPLEMENTED_INTRINSIC(X86, CRC32Update)
+UNIMPLEMENTED_INTRINSIC(X86, CRC32UpdateBytes)
+UNIMPLEMENTED_INTRINSIC(X86, CRC32UpdateByteBuffer)
UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 0469b02129..e79c0c9adf 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -162,10 +162,10 @@ void IntrinsicLocationsBuilderX86_64::VisitDoubleLongBitsToDouble(HInvoke* invok
}
void IntrinsicCodeGeneratorX86_64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
void IntrinsicCodeGeneratorX86_64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
void IntrinsicLocationsBuilderX86_64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
@@ -176,10 +176,10 @@ void IntrinsicLocationsBuilderX86_64::VisitFloatIntBitsToFloat(HInvoke* invoke)
}
void IntrinsicCodeGeneratorX86_64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
void IntrinsicCodeGeneratorX86_64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -430,12 +430,12 @@ void IntrinsicCodeGeneratorX86_64::VisitMathRoundDouble(HInvoke* invoke) {
// direct x86 instruction, since NaN should map to 0 and large positive
// values need to be clipped to the extreme value.
codegen_->Load64BitValue(out, kPrimLongMax);
- __ cvtsi2sd(t2, out, /* is64bit */ true);
+ __ cvtsi2sd(t2, out, /* is64bit= */ true);
__ comisd(t1, t2);
__ j(kAboveEqual, &done); // clipped to max (already in out), does not jump on unordered
__ movl(out, Immediate(0)); // does not change flags, implicit zero extension to 64-bit
__ j(kUnordered, &done); // NaN mapped to 0 (just moved in out)
- __ cvttsd2si(out, t1, /* is64bit */ true);
+ __ cvttsd2si(out, t1, /* is64bit= */ true);
__ Bind(&done);
}
@@ -979,7 +979,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ temp1 = dest->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, dest, class_offset, /* needs_null_check */ false);
+ invoke, temp1_loc, dest, class_offset, /* needs_null_check= */ false);
// Register `temp1` is not trashed by the read barrier emitted
// by GenerateFieldLoadWithBakerReadBarrier below, as that
// method produces a call to a ReadBarrierMarkRegX entry point,
@@ -987,7 +987,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
// temporaries such a `temp1`.
// /* HeapReference<Class> */ temp2 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp2_loc, src, class_offset, /* needs_null_check */ false);
+ invoke, temp2_loc, src, class_offset, /* needs_null_check= */ false);
// If heap poisoning is enabled, `temp1` and `temp2` have been
// unpoisoned by the the previous calls to
// GenerateFieldLoadWithBakerReadBarrier.
@@ -1011,7 +1011,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ TMP = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, TMP_loc, temp1, component_offset, /* needs_null_check */ false);
+ invoke, TMP_loc, temp1, component_offset, /* needs_null_check= */ false);
__ testl(CpuRegister(TMP), CpuRegister(TMP));
__ j(kEqual, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `TMP` has been unpoisoned by
@@ -1034,7 +1034,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
// read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below.
// /* HeapReference<Class> */ TMP = temp2->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, TMP_loc, temp2, component_offset, /* needs_null_check */ false);
+ invoke, TMP_loc, temp2, component_offset, /* needs_null_check= */ false);
__ testl(CpuRegister(TMP), CpuRegister(TMP));
__ j(kEqual, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `TMP` has been unpoisoned by
@@ -1058,7 +1058,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ temp1 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, temp1, component_offset, /* needs_null_check */ false);
+ invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false);
// We do not need to emit a read barrier for the following
// heap reference load, as `temp1` is only used in a
// comparison with null below, and this reference is not
@@ -1086,10 +1086,10 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, src, class_offset, /* needs_null_check */ false);
+ invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false);
// /* HeapReference<Class> */ TMP = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, TMP_loc, temp1, component_offset, /* needs_null_check */ false);
+ invoke, TMP_loc, temp1, component_offset, /* needs_null_check= */ false);
__ testl(CpuRegister(TMP), CpuRegister(TMP));
__ j(kEqual, intrinsic_slow_path->GetEntryLabel());
} else {
@@ -1198,7 +1198,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
}
// We only need one card marking on the destination array.
- codegen_->MarkGCCard(temp1, temp2, dest, CpuRegister(kNoRegister), /* value_can_be_null */ false);
+ codegen_->MarkGCCard(temp1, temp2, dest, CpuRegister(kNoRegister), /* value_can_be_null= */ false);
__ Bind(intrinsic_slow_path->GetExitLabel());
}
@@ -1452,7 +1452,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
// Ensure we have a start index >= 0;
__ xorl(counter, counter);
__ cmpl(start_index, Immediate(0));
- __ cmov(kGreater, counter, start_index, /* is64bit */ false); // 32-bit copy is enough.
+ __ cmov(kGreater, counter, start_index, /* is64bit= */ false); // 32-bit copy is enough.
if (mirror::kUseStringCompression) {
NearLabel modify_counter, offset_uncompressed_label;
@@ -1514,19 +1514,19 @@ static void GenerateStringIndexOf(HInvoke* invoke,
}
void IntrinsicLocationsBuilderX86_64::VisitStringIndexOf(HInvoke* invoke) {
- CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ true);
+ CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero= */ true);
}
void IntrinsicCodeGeneratorX86_64::VisitStringIndexOf(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ true);
}
void IntrinsicLocationsBuilderX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
- CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ false);
+ CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero= */ false);
}
void IntrinsicCodeGeneratorX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ false);
}
void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -1840,7 +1840,7 @@ void IntrinsicLocationsBuilderX86_64::VisitThreadCurrentThread(HInvoke* invoke)
void IntrinsicCodeGeneratorX86_64::VisitThreadCurrentThread(HInvoke* invoke) {
CpuRegister out = invoke->GetLocations()->Out().AsRegister<CpuRegister>();
GetAssembler()->gs()->movl(out, Address::Absolute(Thread::PeerOffset<kX86_64PointerSize>(),
- /* no_rip */ true));
+ /* no_rip= */ true));
}
static void GenUnsafeGet(HInvoke* invoke,
@@ -1866,7 +1866,7 @@ static void GenUnsafeGet(HInvoke* invoke,
if (kUseBakerReadBarrier) {
Address src(base, offset, ScaleFactor::TIMES_1, 0);
codegen->GenerateReferenceLoadWithBakerReadBarrier(
- invoke, output_loc, base, src, /* needs_null_check */ false);
+ invoke, output_loc, base, src, /* needs_null_check= */ false);
} else {
__ movl(output, Address(base, offset, ScaleFactor::TIMES_1, 0));
codegen->GenerateReadBarrierSlow(
@@ -1930,22 +1930,22 @@ void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetObjectVolatile(HInvoke* invo
void IntrinsicCodeGeneratorX86_64::VisitUnsafeGet(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetLong(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetObject(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_);
}
@@ -2028,34 +2028,34 @@ static void GenUnsafePut(LocationSummary* locations, DataType::Type type, bool i
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafePut(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafePutOrdered(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafePutVolatile(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafePutObject(HInvoke* invoke) {
GenUnsafePut(
- invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_);
+ invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
GenUnsafePut(
- invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_);
+ invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
GenUnsafePut(
- invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ true, codegen_);
+ invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafePutLong(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
}
static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator,
@@ -2140,8 +2140,8 @@ static void GenCAS(DataType::Type type, HInvoke* invoke, CodeGeneratorX86_64* co
out_loc, // Unused, used only as a "temporary" within the read barrier.
base,
field_addr,
- /* needs_null_check */ false,
- /* always_update_field */ true,
+ /* needs_null_check= */ false,
+ /* always_update_field= */ true,
&temp1,
&temp2);
}
@@ -2369,7 +2369,7 @@ void IntrinsicLocationsBuilderX86_64::VisitIntegerBitCount(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerBitCount(HInvoke* invoke) {
- GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ false);
+ GenBitCount(GetAssembler(), codegen_, invoke, /* is_long= */ false);
}
void IntrinsicLocationsBuilderX86_64::VisitLongBitCount(HInvoke* invoke) {
@@ -2377,7 +2377,7 @@ void IntrinsicLocationsBuilderX86_64::VisitLongBitCount(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorX86_64::VisitLongBitCount(HInvoke* invoke) {
- GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ true);
+ GenBitCount(GetAssembler(), codegen_, invoke, /* is_long= */ true);
}
static void CreateOneBitLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_high) {
@@ -2421,93 +2421,98 @@ static void GenOneBit(X86_64Assembler* assembler,
}
// Handle the non-constant cases.
- CpuRegister tmp = locations->GetTemp(0).AsRegister<CpuRegister>();
- if (is_high) {
- // Use architectural support: basically 1 << bsr.
- if (src.IsRegister()) {
+ if (!is_high && codegen->GetInstructionSetFeatures().HasAVX2() &&
+ src.IsRegister()) {
+ __ blsi(out, src.AsRegister<CpuRegister>());
+ } else {
+ CpuRegister tmp = locations->GetTemp(0).AsRegister<CpuRegister>();
+ if (is_high) {
+ // Use architectural support: basically 1 << bsr.
+ if (src.IsRegister()) {
+ if (is_long) {
+ __ bsrq(tmp, src.AsRegister<CpuRegister>());
+ } else {
+ __ bsrl(tmp, src.AsRegister<CpuRegister>());
+ }
+ } else if (is_long) {
+ DCHECK(src.IsDoubleStackSlot());
+ __ bsrq(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
+ } else {
+ DCHECK(src.IsStackSlot());
+ __ bsrl(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
+ }
+ // BSR sets ZF if the input was zero.
+ NearLabel is_zero, done;
+ __ j(kEqual, &is_zero);
+ __ movl(out, Immediate(1)); // Clears upper bits too.
if (is_long) {
- __ bsrq(tmp, src.AsRegister<CpuRegister>());
+ __ shlq(out, tmp);
} else {
- __ bsrl(tmp, src.AsRegister<CpuRegister>());
+ __ shll(out, tmp);
}
- } else if (is_long) {
- DCHECK(src.IsDoubleStackSlot());
- __ bsrq(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
- } else {
- DCHECK(src.IsStackSlot());
- __ bsrl(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
- }
- // BSR sets ZF if the input was zero.
- NearLabel is_zero, done;
- __ j(kEqual, &is_zero);
- __ movl(out, Immediate(1)); // Clears upper bits too.
- if (is_long) {
- __ shlq(out, tmp);
- } else {
- __ shll(out, tmp);
- }
- __ jmp(&done);
- __ Bind(&is_zero);
- __ xorl(out, out); // Clears upper bits too.
- __ Bind(&done);
- } else {
- // Copy input into temporary.
- if (src.IsRegister()) {
+ __ jmp(&done);
+ __ Bind(&is_zero);
+ __ xorl(out, out); // Clears upper bits too.
+ __ Bind(&done);
+ } else {
+ // Copy input into temporary.
+ if (src.IsRegister()) {
+ if (is_long) {
+ __ movq(tmp, src.AsRegister<CpuRegister>());
+ } else {
+ __ movl(tmp, src.AsRegister<CpuRegister>());
+ }
+ } else if (is_long) {
+ DCHECK(src.IsDoubleStackSlot());
+ __ movq(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
+ } else {
+ DCHECK(src.IsStackSlot());
+ __ movl(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
+ }
+ // Do the bit twiddling: basically tmp & -tmp;
if (is_long) {
- __ movq(tmp, src.AsRegister<CpuRegister>());
+ __ movq(out, tmp);
+ __ negq(tmp);
+ __ andq(out, tmp);
} else {
- __ movl(tmp, src.AsRegister<CpuRegister>());
+ __ movl(out, tmp);
+ __ negl(tmp);
+ __ andl(out, tmp);
}
- } else if (is_long) {
- DCHECK(src.IsDoubleStackSlot());
- __ movq(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
- } else {
- DCHECK(src.IsStackSlot());
- __ movl(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
- }
- // Do the bit twiddling: basically tmp & -tmp;
- if (is_long) {
- __ movq(out, tmp);
- __ negq(tmp);
- __ andq(out, tmp);
- } else {
- __ movl(out, tmp);
- __ negl(tmp);
- __ andl(out, tmp);
}
}
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerHighestOneBit(HInvoke* invoke) {
- CreateOneBitLocations(allocator_, invoke, /* is_high */ true);
+ CreateOneBitLocations(allocator_, invoke, /* is_high= */ true);
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerHighestOneBit(HInvoke* invoke) {
- GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ true, /* is_long */ false);
+ GenOneBit(GetAssembler(), codegen_, invoke, /* is_high= */ true, /* is_long= */ false);
}
void IntrinsicLocationsBuilderX86_64::VisitLongHighestOneBit(HInvoke* invoke) {
- CreateOneBitLocations(allocator_, invoke, /* is_high */ true);
+ CreateOneBitLocations(allocator_, invoke, /* is_high= */ true);
}
void IntrinsicCodeGeneratorX86_64::VisitLongHighestOneBit(HInvoke* invoke) {
- GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ true, /* is_long */ true);
+ GenOneBit(GetAssembler(), codegen_, invoke, /* is_high= */ true, /* is_long= */ true);
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerLowestOneBit(HInvoke* invoke) {
- CreateOneBitLocations(allocator_, invoke, /* is_high */ false);
+ CreateOneBitLocations(allocator_, invoke, /* is_high= */ false);
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerLowestOneBit(HInvoke* invoke) {
- GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ false, /* is_long */ false);
+ GenOneBit(GetAssembler(), codegen_, invoke, /* is_high= */ false, /* is_long= */ false);
}
void IntrinsicLocationsBuilderX86_64::VisitLongLowestOneBit(HInvoke* invoke) {
- CreateOneBitLocations(allocator_, invoke, /* is_high */ false);
+ CreateOneBitLocations(allocator_, invoke, /* is_high= */ false);
}
void IntrinsicCodeGeneratorX86_64::VisitLongLowestOneBit(HInvoke* invoke) {
- GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ false, /* is_long */ true);
+ GenOneBit(GetAssembler(), codegen_, invoke, /* is_high= */ false, /* is_long= */ true);
}
static void CreateLeadingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -2572,7 +2577,7 @@ void IntrinsicLocationsBuilderX86_64::VisitIntegerNumberOfLeadingZeros(HInvoke*
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ false);
+ GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ false);
}
void IntrinsicLocationsBuilderX86_64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
@@ -2580,7 +2585,7 @@ void IntrinsicLocationsBuilderX86_64::VisitLongNumberOfLeadingZeros(HInvoke* inv
}
void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
+ GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ true);
}
static void CreateTrailingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -2640,7 +2645,7 @@ void IntrinsicLocationsBuilderX86_64::VisitIntegerNumberOfTrailingZeros(HInvoke*
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ false);
+ GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ false);
}
void IntrinsicLocationsBuilderX86_64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
@@ -2648,7 +2653,7 @@ void IntrinsicLocationsBuilderX86_64::VisitLongNumberOfTrailingZeros(HInvoke* in
}
void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
+ GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ true);
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerValueOf(HInvoke* invoke) {
@@ -2719,7 +2724,7 @@ void IntrinsicCodeGeneratorX86_64::VisitThreadInterrupted(HInvoke* invoke) {
X86_64Assembler* assembler = GetAssembler();
CpuRegister out = invoke->GetLocations()->Out().AsRegister<CpuRegister>();
Address address = Address::Absolute
- (Thread::InterruptedOffset<kX86_64PointerSize>().Int32Value(), /* no_rip */ true);
+ (Thread::InterruptedOffset<kX86_64PointerSize>().Int32Value(), /* no_rip= */ true);
NearLabel done;
__ gs()->movl(out, address);
__ testl(out, out);
@@ -2740,6 +2745,9 @@ void IntrinsicCodeGeneratorX86_64::VisitReachabilityFence(HInvoke* invoke ATTRIB
UNIMPLEMENTED_INTRINSIC(X86_64, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(X86_64, FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite)
+UNIMPLEMENTED_INTRINSIC(X86_64, CRC32Update)
+UNIMPLEMENTED_INTRINSIC(X86_64, CRC32UpdateBytes)
+UNIMPLEMENTED_INTRINSIC(X86_64, CRC32UpdateByteBuffer)
UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 7f71745a43..b33d0f488e 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -692,7 +692,7 @@ class LSEVisitor : public HGraphDelegateVisitor {
VisitSetLocation(instruction, idx, instruction->InputAt(2));
}
- void VisitDeoptimize(HDeoptimize* instruction) {
+ void VisitDeoptimize(HDeoptimize* instruction) override {
const ScopedArenaVector<HInstruction*>& heap_values =
heap_values_for_[instruction->GetBlock()->GetBlockId()];
for (HInstruction* heap_value : heap_values) {
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 7d66155b39..12b180d5ff 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -351,7 +351,10 @@ static bool HasReductionFormat(HInstruction* reduction, HInstruction* phi) {
// Translates vector operation to reduction kind.
static HVecReduce::ReductionKind GetReductionKind(HVecOperation* reduction) {
- if (reduction->IsVecAdd() || reduction->IsVecSub() || reduction->IsVecSADAccumulate()) {
+ if (reduction->IsVecAdd() ||
+ reduction->IsVecSub() ||
+ reduction->IsVecSADAccumulate() ||
+ reduction->IsVecDotProd()) {
return HVecReduce::kSum;
}
LOG(FATAL) << "Unsupported SIMD reduction " << reduction->GetId();
@@ -431,6 +434,23 @@ static void PeelByCount(HLoopInformation* loop_info, int count) {
}
}
+// Returns the narrower type out of instructions a and b types.
+static DataType::Type GetNarrowerType(HInstruction* a, HInstruction* b) {
+ DataType::Type type = a->GetType();
+ if (DataType::Size(b->GetType()) < DataType::Size(type)) {
+ type = b->GetType();
+ }
+ if (a->IsTypeConversion() &&
+ DataType::Size(a->InputAt(0)->GetType()) < DataType::Size(type)) {
+ type = a->InputAt(0)->GetType();
+ }
+ if (b->IsTypeConversion() &&
+ DataType::Size(b->InputAt(0)->GetType()) < DataType::Size(type)) {
+ type = b->InputAt(0)->GetType();
+ }
+ return type;
+}
+
//
// Public methods.
//
@@ -1289,6 +1309,7 @@ bool HLoopOptimization::VectorizeDef(LoopNode* node,
DataType::Type type = instruction->GetType();
// Recognize SAD idiom or direct reduction.
if (VectorizeSADIdiom(node, instruction, generate_code, type, restrictions) ||
+ VectorizeDotProdIdiom(node, instruction, generate_code, type, restrictions) ||
(TrySetVectorType(type, &restrictions) &&
VectorizeUse(node, instruction, generate_code, type, restrictions))) {
if (generate_code) {
@@ -1531,11 +1552,11 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict
case DataType::Type::kBool:
case DataType::Type::kUint8:
case DataType::Type::kInt8:
- *restrictions |= kNoDiv | kNoReduction;
+ *restrictions |= kNoDiv | kNoReduction | kNoDotProd;
return TrySetVectorLength(8);
case DataType::Type::kUint16:
case DataType::Type::kInt16:
- *restrictions |= kNoDiv | kNoStringCharAt | kNoReduction;
+ *restrictions |= kNoDiv | kNoStringCharAt | kNoReduction | kNoDotProd;
return TrySetVectorLength(4);
case DataType::Type::kInt32:
*restrictions |= kNoDiv | kNoWideSAD;
@@ -1580,12 +1601,23 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict
case DataType::Type::kBool:
case DataType::Type::kUint8:
case DataType::Type::kInt8:
- *restrictions |=
- kNoMul | kNoDiv | kNoShift | kNoAbs | kNoSignedHAdd | kNoUnroundedHAdd | kNoSAD;
+ *restrictions |= kNoMul |
+ kNoDiv |
+ kNoShift |
+ kNoAbs |
+ kNoSignedHAdd |
+ kNoUnroundedHAdd |
+ kNoSAD |
+ kNoDotProd;
return TrySetVectorLength(16);
case DataType::Type::kUint16:
case DataType::Type::kInt16:
- *restrictions |= kNoDiv | kNoAbs | kNoSignedHAdd | kNoUnroundedHAdd | kNoSAD;
+ *restrictions |= kNoDiv |
+ kNoAbs |
+ kNoSignedHAdd |
+ kNoUnroundedHAdd |
+ kNoSAD|
+ kNoDotProd;
return TrySetVectorLength(8);
case DataType::Type::kInt32:
*restrictions |= kNoDiv | kNoSAD;
@@ -1610,11 +1642,11 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict
case DataType::Type::kBool:
case DataType::Type::kUint8:
case DataType::Type::kInt8:
- *restrictions |= kNoDiv;
+ *restrictions |= kNoDiv | kNoDotProd;
return TrySetVectorLength(16);
case DataType::Type::kUint16:
case DataType::Type::kInt16:
- *restrictions |= kNoDiv | kNoStringCharAt;
+ *restrictions |= kNoDiv | kNoStringCharAt | kNoDotProd;
return TrySetVectorLength(8);
case DataType::Type::kInt32:
*restrictions |= kNoDiv;
@@ -1639,11 +1671,11 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict
case DataType::Type::kBool:
case DataType::Type::kUint8:
case DataType::Type::kInt8:
- *restrictions |= kNoDiv;
+ *restrictions |= kNoDiv | kNoDotProd;
return TrySetVectorLength(16);
case DataType::Type::kUint16:
case DataType::Type::kInt16:
- *restrictions |= kNoDiv | kNoStringCharAt;
+ *restrictions |= kNoDiv | kNoStringCharAt | kNoDotProd;
return TrySetVectorLength(8);
case DataType::Type::kInt32:
*restrictions |= kNoDiv;
@@ -2071,18 +2103,7 @@ bool HLoopOptimization::VectorizeSADIdiom(LoopNode* node,
HInstruction* r = a;
HInstruction* s = b;
bool is_unsigned = false;
- DataType::Type sub_type = a->GetType();
- if (DataType::Size(b->GetType()) < DataType::Size(sub_type)) {
- sub_type = b->GetType();
- }
- if (a->IsTypeConversion() &&
- DataType::Size(a->InputAt(0)->GetType()) < DataType::Size(sub_type)) {
- sub_type = a->InputAt(0)->GetType();
- }
- if (b->IsTypeConversion() &&
- DataType::Size(b->InputAt(0)->GetType()) < DataType::Size(sub_type)) {
- sub_type = b->InputAt(0)->GetType();
- }
+ DataType::Type sub_type = GetNarrowerType(a, b);
if (reduction_type != sub_type &&
(!IsNarrowerOperands(a, b, sub_type, &r, &s, &is_unsigned) || is_unsigned)) {
return false;
@@ -2123,6 +2144,75 @@ bool HLoopOptimization::VectorizeSADIdiom(LoopNode* node,
return false;
}
+// Method recognises the following dot product idiom:
+// q += a * b for operands a, b whose type is narrower than the reduction one.
+// Provided that the operands have the same type or are promoted to a wider form.
+// Since this may involve a vector length change, the idiom is handled by going directly
+// to a dot product node (rather than relying combining finer grained nodes later).
+bool HLoopOptimization::VectorizeDotProdIdiom(LoopNode* node,
+ HInstruction* instruction,
+ bool generate_code,
+ DataType::Type reduction_type,
+ uint64_t restrictions) {
+ if (!instruction->IsAdd() || (reduction_type != DataType::Type::kInt32)) {
+ return false;
+ }
+
+ HInstruction* q = instruction->InputAt(0);
+ HInstruction* v = instruction->InputAt(1);
+ if (!v->IsMul() || v->GetType() != reduction_type) {
+ return false;
+ }
+
+ HInstruction* a = v->InputAt(0);
+ HInstruction* b = v->InputAt(1);
+ HInstruction* r = a;
+ HInstruction* s = b;
+ DataType::Type op_type = GetNarrowerType(a, b);
+ bool is_unsigned = false;
+
+ if (!IsNarrowerOperands(a, b, op_type, &r, &s, &is_unsigned)) {
+ return false;
+ }
+ op_type = HVecOperation::ToProperType(op_type, is_unsigned);
+
+ if (!TrySetVectorType(op_type, &restrictions) ||
+ HasVectorRestrictions(restrictions, kNoDotProd)) {
+ return false;
+ }
+
+ DCHECK(r != nullptr && s != nullptr);
+ // Accept dot product idiom for vectorizable operands. Vectorized code uses the shorthand
+ // idiomatic operation. Sequential code uses the original scalar expressions.
+ if (generate_code && vector_mode_ != kVector) { // de-idiom
+ r = a;
+ s = b;
+ }
+ if (VectorizeUse(node, q, generate_code, op_type, restrictions) &&
+ VectorizeUse(node, r, generate_code, op_type, restrictions) &&
+ VectorizeUse(node, s, generate_code, op_type, restrictions)) {
+ if (generate_code) {
+ if (vector_mode_ == kVector) {
+ vector_map_->Put(instruction, new (global_allocator_) HVecDotProd(
+ global_allocator_,
+ vector_map_->Get(q),
+ vector_map_->Get(r),
+ vector_map_->Get(s),
+ reduction_type,
+ is_unsigned,
+ GetOtherVL(reduction_type, op_type, vector_length_),
+ kNoDexPc));
+ MaybeRecordStat(stats_, MethodCompilationStat::kLoopVectorizedIdiom);
+ } else {
+ GenerateVecOp(v, vector_map_->Get(r), vector_map_->Get(s), reduction_type);
+ GenerateVecOp(instruction, vector_map_->Get(q), vector_map_->Get(v), reduction_type);
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
//
// Vectorization heuristics.
//
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 2b202fda75..1a842c4bf3 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -82,6 +82,7 @@ class HLoopOptimization : public HOptimization {
kNoReduction = 1 << 9, // no reduction
kNoSAD = 1 << 10, // no sum of absolute differences (SAD)
kNoWideSAD = 1 << 11, // no sum of absolute differences (SAD) with operand widening
+ kNoDotProd = 1 << 12, // no dot product
};
/*
@@ -217,6 +218,11 @@ class HLoopOptimization : public HOptimization {
bool generate_code,
DataType::Type type,
uint64_t restrictions);
+ bool VectorizeDotProdIdiom(LoopNode* node,
+ HInstruction* instruction,
+ bool generate_code,
+ DataType::Type type,
+ uint64_t restrictions);
// Vectorization heuristics.
Alignment ComputeAlignment(HInstruction* offset,
diff --git a/compiler/optimizing/loop_optimization_test.cc b/compiler/optimizing/loop_optimization_test.cc
index c7cc661303..310d98b5b0 100644
--- a/compiler/optimizing/loop_optimization_test.cc
+++ b/compiler/optimizing/loop_optimization_test.cc
@@ -30,7 +30,7 @@ class LoopOptimizationTest : public OptimizingUnitTest {
: graph_(CreateGraph()),
iva_(new (GetAllocator()) HInductionVarAnalysis(graph_)),
loop_opt_(new (GetAllocator()) HLoopOptimization(
- graph_, /* compiler_options */ nullptr, iva_, /* stats */ nullptr)) {
+ graph_, /* compiler_options= */ nullptr, iva_, /* stats= */ nullptr)) {
BuildGraph();
}
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 79a7e2c858..f7c16d1d02 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -20,6 +20,7 @@
#include "art_method-inl.h"
#include "base/bit_utils.h"
#include "base/bit_vector-inl.h"
+#include "base/logging.h"
#include "base/stl_util.h"
#include "class_linker-inl.h"
#include "class_root.h"
@@ -43,7 +44,7 @@ void HGraph::InitializeInexactObjectRTI(VariableSizedHandleScope* handles) {
// Create the inexact Object reference type and store it in the HGraph.
inexact_object_rti_ = ReferenceTypeInfo::Create(
handles->NewHandle(GetClassRoot<mirror::Object>()),
- /* is_exact */ false);
+ /* is_exact= */ false);
}
void HGraph::AddBlock(HBasicBlock* block) {
@@ -59,7 +60,7 @@ void HGraph::FindBackEdges(ArenaBitVector* visited) {
ScopedArenaAllocator allocator(GetArenaStack());
// Nodes that we're currently visiting, indexed by block id.
ArenaBitVector visiting(
- &allocator, blocks_.size(), /* expandable */ false, kArenaAllocGraphBuilder);
+ &allocator, blocks_.size(), /* expandable= */ false, kArenaAllocGraphBuilder);
visiting.ClearAllBits();
// Number of successors visited from a given node, indexed by block id.
ScopedArenaVector<size_t> successors_visited(blocks_.size(),
@@ -688,7 +689,7 @@ HCurrentMethod* HGraph::GetCurrentMethod() {
}
const char* HGraph::GetMethodName() const {
- const DexFile::MethodId& method_id = dex_file_.GetMethodId(method_idx_);
+ const dex::MethodId& method_id = dex_file_.GetMethodId(method_idx_);
return dex_file_.GetMethodName(method_id);
}
@@ -825,7 +826,7 @@ void HLoopInformation::Populate() {
ScopedArenaAllocator allocator(graph->GetArenaStack());
ArenaBitVector visited(&allocator,
graph->GetBlocks().size(),
- /* expandable */ false,
+ /* expandable= */ false,
kArenaAllocGraphBuilder);
visited.ClearAllBits();
// Stop marking blocks at the loop header.
@@ -1230,7 +1231,7 @@ bool HInstructionList::FoundBefore(const HInstruction* instruction1,
}
}
LOG(FATAL) << "Did not find an order between two instructions of the same block.";
- return true;
+ UNREACHABLE();
}
bool HInstruction::StrictlyDominates(HInstruction* other_instruction) const {
@@ -1253,7 +1254,7 @@ bool HInstruction::StrictlyDominates(HInstruction* other_instruction) const {
} else {
// There is no order among phis.
LOG(FATAL) << "There is no dominance between phis of a same block.";
- return false;
+ UNREACHABLE();
}
} else {
// `this` is not a phi.
@@ -2526,7 +2527,7 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
current->SetGraph(outer_graph);
outer_graph->AddBlock(current);
outer_graph->reverse_post_order_[++index_of_at] = current;
- UpdateLoopAndTryInformationOfNewBlock(current, at, /* replace_if_back_edge */ false);
+ UpdateLoopAndTryInformationOfNewBlock(current, at, /* replace_if_back_edge= */ false);
}
}
@@ -2536,7 +2537,7 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
outer_graph->reverse_post_order_[++index_of_at] = to;
// Only `to` can become a back edge, as the inlined blocks
// are predecessors of `to`.
- UpdateLoopAndTryInformationOfNewBlock(to, at, /* replace_if_back_edge */ true);
+ UpdateLoopAndTryInformationOfNewBlock(to, at, /* replace_if_back_edge= */ true);
// Update all predecessors of the exit block (now the `to` block)
// to not `HReturn` but `HGoto` instead. Special case throwing blocks
@@ -2710,13 +2711,13 @@ void HGraph::TransformLoopHeaderForBCE(HBasicBlock* header) {
DCHECK((old_pre_header->GetLoopInformation() == nullptr) ||
!old_pre_header->GetLoopInformation()->IsBackEdge(*old_pre_header));
UpdateLoopAndTryInformationOfNewBlock(
- if_block, old_pre_header, /* replace_if_back_edge */ false);
+ if_block, old_pre_header, /* replace_if_back_edge= */ false);
UpdateLoopAndTryInformationOfNewBlock(
- true_block, old_pre_header, /* replace_if_back_edge */ false);
+ true_block, old_pre_header, /* replace_if_back_edge= */ false);
UpdateLoopAndTryInformationOfNewBlock(
- false_block, old_pre_header, /* replace_if_back_edge */ false);
+ false_block, old_pre_header, /* replace_if_back_edge= */ false);
UpdateLoopAndTryInformationOfNewBlock(
- new_pre_header, old_pre_header, /* replace_if_back_edge */ false);
+ new_pre_header, old_pre_header, /* replace_if_back_edge= */ false);
}
HBasicBlock* HGraph::TransformLoopForVectorization(HBasicBlock* header,
@@ -3180,4 +3181,77 @@ std::ostream& operator<<(std::ostream& os, const MemBarrierKind& kind) {
}
}
+// Check that intrinsic enum values fit within space set aside in ArtMethod modifier flags.
+#define CHECK_INTRINSICS_ENUM_VALUES(Name, InvokeType, _, SideEffects, Exceptions, ...) \
+ static_assert( \
+ static_cast<uint32_t>(Intrinsics::k ## Name) <= (kAccIntrinsicBits >> CTZ(kAccIntrinsicBits)), \
+ "Instrinsics enumeration space overflow.");
+#include "intrinsics_list.h"
+ INTRINSICS_LIST(CHECK_INTRINSICS_ENUM_VALUES)
+#undef INTRINSICS_LIST
+#undef CHECK_INTRINSICS_ENUM_VALUES
+
+// Function that returns whether an intrinsic needs an environment or not.
+static inline IntrinsicNeedsEnvironmentOrCache NeedsEnvironmentOrCacheIntrinsic(Intrinsics i) {
+ switch (i) {
+ case Intrinsics::kNone:
+ return kNeedsEnvironmentOrCache; // Non-sensical for intrinsic.
+#define OPTIMIZING_INTRINSICS(Name, InvokeType, NeedsEnvOrCache, SideEffects, Exceptions, ...) \
+ case Intrinsics::k ## Name: \
+ return NeedsEnvOrCache;
+#include "intrinsics_list.h"
+ INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
+#undef INTRINSICS_LIST
+#undef OPTIMIZING_INTRINSICS
+ }
+ return kNeedsEnvironmentOrCache;
+}
+
+// Function that returns whether an intrinsic has side effects.
+static inline IntrinsicSideEffects GetSideEffectsIntrinsic(Intrinsics i) {
+ switch (i) {
+ case Intrinsics::kNone:
+ return kAllSideEffects;
+#define OPTIMIZING_INTRINSICS(Name, InvokeType, NeedsEnvOrCache, SideEffects, Exceptions, ...) \
+ case Intrinsics::k ## Name: \
+ return SideEffects;
+#include "intrinsics_list.h"
+ INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
+#undef INTRINSICS_LIST
+#undef OPTIMIZING_INTRINSICS
+ }
+ return kAllSideEffects;
+}
+
+// Function that returns whether an intrinsic can throw exceptions.
+static inline IntrinsicExceptions GetExceptionsIntrinsic(Intrinsics i) {
+ switch (i) {
+ case Intrinsics::kNone:
+ return kCanThrow;
+#define OPTIMIZING_INTRINSICS(Name, InvokeType, NeedsEnvOrCache, SideEffects, Exceptions, ...) \
+ case Intrinsics::k ## Name: \
+ return Exceptions;
+#include "intrinsics_list.h"
+ INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
+#undef INTRINSICS_LIST
+#undef OPTIMIZING_INTRINSICS
+ }
+ return kCanThrow;
+}
+
+void HInvoke::SetResolvedMethod(ArtMethod* method) {
+ // TODO: b/65872996 The intent is that polymorphic signature methods should
+ // be compiler intrinsics. At present, they are only interpreter intrinsics.
+ if (method != nullptr &&
+ method->IsIntrinsic() &&
+ !method->IsPolymorphicSignature()) {
+ Intrinsics intrinsic = static_cast<Intrinsics>(method->GetIntrinsic());
+ SetIntrinsic(intrinsic,
+ NeedsEnvironmentOrCacheIntrinsic(intrinsic),
+ GetSideEffectsIntrinsic(intrinsic),
+ GetExceptionsIntrinsic(intrinsic));
+ }
+ resolved_method_ = method;
+}
+
} // namespace art
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 68f1a2406a..c70674b0ad 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -26,9 +26,11 @@
#include "base/arena_object.h"
#include "base/array_ref.h"
#include "base/iteration_range.h"
+#include "base/mutex.h"
#include "base/quasi_atomic.h"
#include "base/stl_util.h"
#include "base/transform_array_ref.h"
+#include "art_method.h"
#include "data_type.h"
#include "deoptimization_kind.h"
#include "dex/dex_file.h"
@@ -128,6 +130,7 @@ enum GraphAnalysisResult {
kAnalysisInvalidBytecode,
kAnalysisFailThrowCatchLoop,
kAnalysisFailAmbiguousArrayOp,
+ kAnalysisFailIrreducibleLoopAndStringInit,
kAnalysisSuccess,
};
@@ -314,6 +317,7 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
uint32_t method_idx,
InstructionSet instruction_set,
InvokeType invoke_type = kInvalidInvokeType,
+ bool dead_reference_safe = false,
bool debuggable = false,
bool osr = false,
int start_instruction_id = 0)
@@ -333,6 +337,7 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
has_simd_(false),
has_loops_(false),
has_irreducible_loops_(false),
+ dead_reference_safe_(dead_reference_safe),
debuggable_(debuggable),
current_instruction_id_(start_instruction_id),
dex_file_(dex_file),
@@ -523,6 +528,12 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
has_bounds_checks_ = value;
}
+ // Is the code known to be robust against eliminating dead references
+ // and the effects of early finalization?
+ bool IsDeadReferenceSafe() const { return dead_reference_safe_; }
+
+ void MarkDeadReferenceUnsafe() { dead_reference_safe_ = false; }
+
bool IsDebuggable() const { return debuggable_; }
// Returns a constant of the given type and value. If it does not exist
@@ -701,6 +712,14 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
// so there might be false positives.
bool has_irreducible_loops_;
+ // Is the code known to be robust against eliminating dead references
+ // and the effects of early finalization? If false, dead reference variables
+ // are kept if they might be visible to the garbage collector.
+ // Currently this means that the class was declared to be dead-reference-safe,
+ // the method accesses no reachability-sensitive fields or data, and the same
+ // is true for any methods that were inlined into the current one.
+ bool dead_reference_safe_;
+
// Indicates whether the graph should be compiled in a way that
// ensures full debuggability. If false, we can apply more
// aggressive optimizations that may limit the level of debugging.
@@ -892,7 +911,7 @@ class TryCatchInformation : public ArenaObject<kArenaAllocTryCatchInfo> {
explicit TryCatchInformation(const HTryBoundary& try_entry)
: try_entry_(&try_entry),
catch_dex_file_(nullptr),
- catch_type_index_(DexFile::kDexNoIndex16) {
+ catch_type_index_(dex::TypeIndex::Invalid()) {
DCHECK(try_entry_ != nullptr);
}
@@ -911,9 +930,9 @@ class TryCatchInformation : public ArenaObject<kArenaAllocTryCatchInfo> {
bool IsCatchBlock() const { return catch_dex_file_ != nullptr; }
- bool IsCatchAllTypeIndex() const {
+ bool IsValidTypeIndex() const {
DCHECK(IsCatchBlock());
- return !catch_type_index_.IsValid();
+ return catch_type_index_.IsValid();
}
dex::TypeIndex GetCatchTypeIndex() const {
@@ -926,6 +945,10 @@ class TryCatchInformation : public ArenaObject<kArenaAllocTryCatchInfo> {
return *catch_dex_file_;
}
+ void SetInvalidTypeIndex() {
+ catch_type_index_ = dex::TypeIndex::Invalid();
+ }
+
private:
// One of possibly several TryBoundary instructions entering the block's try.
// Only set for try blocks.
@@ -933,7 +956,7 @@ class TryCatchInformation : public ArenaObject<kArenaAllocTryCatchInfo> {
// Exception type information. Only set for catch blocks.
const DexFile* catch_dex_file_;
- const dex::TypeIndex catch_type_index_;
+ dex::TypeIndex catch_type_index_;
};
static constexpr size_t kNoLifetime = -1;
@@ -1453,6 +1476,7 @@ class HLoopInformationOutwardIterator : public ValueObject {
M(VecSetScalars, VecOperation) \
M(VecMultiplyAccumulate, VecOperation) \
M(VecSADAccumulate, VecOperation) \
+ M(VecDotProd, VecOperation) \
M(VecLoad, VecMemoryOperation) \
M(VecStore, VecMemoryOperation) \
@@ -1494,6 +1518,14 @@ class HLoopInformationOutwardIterator : public ValueObject {
M(X86PackedSwitch, Instruction)
#endif
+#if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
+#define FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(M) \
+ M(X86AndNot, Instruction) \
+ M(X86MaskOrResetLeastSetBit, Instruction)
+#else
+#define FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(M)
+#endif
+
#define FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M)
#define FOR_EACH_CONCRETE_INSTRUCTION(M) \
@@ -1504,7 +1536,8 @@ class HLoopInformationOutwardIterator : public ValueObject {
FOR_EACH_CONCRETE_INSTRUCTION_MIPS(M) \
FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(M) \
FOR_EACH_CONCRETE_INSTRUCTION_X86(M) \
- FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M)
+ FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M) \
+ FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(M)
#define FOR_EACH_ABSTRACT_INSTRUCTION(M) \
M(Condition, BinaryOperation) \
@@ -3229,7 +3262,7 @@ class HDeoptimize final : public HVariableInputSizeInstruction {
SideEffects::All(),
dex_pc,
allocator,
- /* number_of_inputs */ 1,
+ /* number_of_inputs= */ 1,
kArenaAllocMisc) {
SetPackedFlag<kFieldCanBeMoved>(false);
SetPackedField<DeoptimizeKindField>(kind);
@@ -3254,7 +3287,7 @@ class HDeoptimize final : public HVariableInputSizeInstruction {
SideEffects::CanTriggerGC(),
dex_pc,
allocator,
- /* number_of_inputs */ 2,
+ /* number_of_inputs= */ 2,
kArenaAllocMisc) {
SetPackedFlag<kFieldCanBeMoved>(true);
SetPackedField<DeoptimizeKindField>(kind);
@@ -4322,7 +4355,7 @@ class HInvoke : public HVariableInputSizeInstruction {
bool IsIntrinsic() const { return intrinsic_ != Intrinsics::kNone; }
ArtMethod* GetResolvedMethod() const { return resolved_method_; }
- void SetResolvedMethod(ArtMethod* method) { resolved_method_ = method; }
+ void SetResolvedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
DECLARE_ABSTRACT_INSTRUCTION(Invoke);
@@ -4354,12 +4387,14 @@ class HInvoke : public HVariableInputSizeInstruction {
number_of_arguments + number_of_other_inputs,
kArenaAllocInvokeInputs),
number_of_arguments_(number_of_arguments),
- resolved_method_(resolved_method),
dex_method_index_(dex_method_index),
intrinsic_(Intrinsics::kNone),
intrinsic_optimizations_(0) {
SetPackedField<InvokeTypeField>(invoke_type);
SetPackedFlag<kFlagCanThrow>(true);
+ // Check mutator lock, constructors lack annotalysis support.
+ Locks::mutator_lock_->AssertNotExclusiveHeld(Thread::Current());
+ SetResolvedMethod(resolved_method);
}
DEFAULT_COPY_CONSTRUCTOR(Invoke);
@@ -4384,7 +4419,7 @@ class HInvokeUnresolved final : public HInvoke {
: HInvoke(kInvokeUnresolved,
allocator,
number_of_arguments,
- 0u /* number_of_other_inputs */,
+ /* number_of_other_inputs= */ 0u,
return_type,
dex_pc,
dex_method_index,
@@ -4410,7 +4445,7 @@ class HInvokePolymorphic final : public HInvoke {
: HInvoke(kInvokePolymorphic,
allocator,
number_of_arguments,
- 0u /* number_of_other_inputs */,
+ /* number_of_other_inputs= */ 0u,
return_type,
dex_pc,
dex_method_index,
@@ -4436,11 +4471,11 @@ class HInvokeCustom final : public HInvoke {
: HInvoke(kInvokeCustom,
allocator,
number_of_arguments,
- /* number_of_other_inputs */ 0u,
+ /* number_of_other_inputs= */ 0u,
return_type,
dex_pc,
- /* dex_method_index */ dex::kDexNoIndex,
- /* resolved_method */ nullptr,
+ /* dex_method_index= */ dex::kDexNoIndex,
+ /* resolved_method= */ nullptr,
kStatic),
call_site_index_(call_site_index) {
}
@@ -4533,8 +4568,7 @@ class HInvokeStaticOrDirect final : public HInvoke {
allocator,
number_of_arguments,
// There is potentially one extra argument for the HCurrentMethod node, and
- // potentially one other if the clinit check is explicit, and potentially
- // one other if the method is a string factory.
+ // potentially one other if the clinit check is explicit.
(NeedsCurrentMethodInput(dispatch_info.method_load_kind) ? 1u : 0u) +
(clinit_check_requirement == ClinitCheckRequirement::kExplicit ? 1u : 0u),
return_type,
@@ -4845,10 +4879,11 @@ class HNeg final : public HUnaryOperation {
class HNewArray final : public HExpression<2> {
public:
- HNewArray(HInstruction* cls, HInstruction* length, uint32_t dex_pc)
+ HNewArray(HInstruction* cls, HInstruction* length, uint32_t dex_pc, size_t component_size_shift)
: HExpression(kNewArray, DataType::Type::kReference, SideEffects::CanTriggerGC(), dex_pc) {
SetRawInputAt(0, cls);
SetRawInputAt(1, length);
+ SetPackedField<ComponentSizeShiftField>(component_size_shift);
}
bool IsClonable() const override { return true; }
@@ -4870,10 +4905,23 @@ class HNewArray final : public HExpression<2> {
return InputAt(1);
}
+ size_t GetComponentSizeShift() {
+ return GetPackedField<ComponentSizeShiftField>();
+ }
+
DECLARE_INSTRUCTION(NewArray);
protected:
DEFAULT_COPY_CONSTRUCTOR(NewArray);
+
+ private:
+ static constexpr size_t kFieldComponentSizeShift = kNumberOfGenericPackedBits;
+ static constexpr size_t kFieldComponentSizeShiftSize = MinimumBitsToStore(3u);
+ static constexpr size_t kNumberOfNewArrayPackedBits =
+ kFieldComponentSizeShift + kFieldComponentSizeShiftSize;
+ static_assert(kNumberOfNewArrayPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
+ using ComponentSizeShiftField =
+ BitField<size_t, kFieldComponentSizeShift, kFieldComponentSizeShift>;
};
class HAdd final : public HBinaryOperation {
@@ -5656,6 +5704,10 @@ class HTypeConversion final : public HExpression<1> {
bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
+ // Return whether the conversion is implicit. This includes conversion to the same type.
+ bool IsImplicitConversion() const {
+ return DataType::IsTypeConversionImplicit(GetInputType(), GetResultType());
+ }
// Try to statically evaluate the conversion and return a HConstant
// containing the result. If the input cannot be converted, return nullptr.
@@ -5862,7 +5914,7 @@ class HArrayGet final : public HExpression<2> {
type,
SideEffects::ArrayReadOfType(type),
dex_pc,
- /* is_string_char_at */ false) {
+ /* is_string_char_at= */ false) {
}
HArrayGet(HInstruction* array,
@@ -6136,6 +6188,9 @@ class HBoundsCheck final : public HExpression<2> {
private:
static constexpr size_t kFlagIsStringCharAt = kNumberOfGenericPackedBits;
+ static constexpr size_t kNumberOfBoundsCheckPackedBits = kFlagIsStringCharAt + 1;
+ static_assert(kNumberOfBoundsCheckPackedBits <= HInstruction::kMaxNumberOfPackedBits,
+ "Too many packed fields.");
};
class HSuspendCheck final : public HExpression<0> {
@@ -6301,7 +6356,7 @@ class HLoadClass final : public HInstruction {
ReferenceTypeInfo GetLoadedClassRTI() {
if (GetPackedFlag<kFlagValidLoadedClassRTI>()) {
// Note: The is_exact flag from the return value should not be used.
- return ReferenceTypeInfo::CreateUnchecked(klass_, /* is_exact */ true);
+ return ReferenceTypeInfo::CreateUnchecked(klass_, /* is_exact= */ true);
} else {
return ReferenceTypeInfo::CreateInvalid();
}
@@ -7054,7 +7109,7 @@ class HTypeCheckInstruction : public HVariableInputSizeInstruction {
side_effects,
dex_pc,
allocator,
- /* number_of_inputs */ check_kind == TypeCheckKind::kBitstringCheck ? 4u : 2u,
+ /* number_of_inputs= */ check_kind == TypeCheckKind::kBitstringCheck ? 4u : 2u,
kArenaAllocTypeCheckInputs),
klass_(klass) {
SetPackedField<TypeCheckKindField>(check_kind);
@@ -7110,7 +7165,7 @@ class HTypeCheckInstruction : public HVariableInputSizeInstruction {
ReferenceTypeInfo GetTargetClassRTI() {
if (GetPackedFlag<kFlagValidTargetClassRTI>()) {
// Note: The is_exact flag from the return value should not be used.
- return ReferenceTypeInfo::CreateUnchecked(klass_, /* is_exact */ true);
+ return ReferenceTypeInfo::CreateUnchecked(klass_, /* is_exact= */ true);
} else {
return ReferenceTypeInfo::CreateInvalid();
}
@@ -7377,7 +7432,7 @@ class HMemoryBarrier final : public HExpression<0> {
// }
//
// See also:
-// * CompilerDriver::RequiresConstructorBarrier
+// * DexCompilationUnit::RequiresConstructorBarrier
// * QuasiAtomic::ThreadFenceForConstructor
//
class HConstructorFence final : public HVariableInputSizeInstruction {
@@ -7423,7 +7478,7 @@ class HConstructorFence final : public HVariableInputSizeInstruction {
SideEffects::AllReads(),
dex_pc,
allocator,
- /* number_of_inputs */ 1,
+ /* number_of_inputs= */ 1,
kArenaAllocConstructorFenceInputs) {
DCHECK(fence_object != nullptr);
SetRawInputAt(0, fence_object);
@@ -7741,7 +7796,7 @@ class HIntermediateAddress final : public HExpression<2> {
#ifdef ART_ENABLE_CODEGEN_mips
#include "nodes_mips.h"
#endif
-#ifdef ART_ENABLE_CODEGEN_x86
+#if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
#include "nodes_x86.h"
#endif
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index c7539f2846..efe4d6b000 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -207,7 +207,7 @@ class HVecUnaryOperation : public HVecOperation {
allocator,
packed_type,
SideEffects::None(),
- /* number_of_inputs */ 1,
+ /* number_of_inputs= */ 1,
vector_length,
dex_pc) {
SetRawInputAt(0, input);
@@ -235,7 +235,7 @@ class HVecBinaryOperation : public HVecOperation {
allocator,
packed_type,
SideEffects::None(),
- /* number_of_inputs */ 2,
+ /* number_of_inputs= */ 2,
vector_length,
dex_pc) {
SetRawInputAt(0, left);
@@ -384,21 +384,21 @@ class HVecReduce final : public HVecUnaryOperation {
HInstruction* input,
DataType::Type packed_type,
size_t vector_length,
- ReductionKind kind,
+ ReductionKind reduction_kind,
uint32_t dex_pc)
: HVecUnaryOperation(kVecReduce, allocator, input, packed_type, vector_length, dex_pc),
- kind_(kind) {
+ reduction_kind_(reduction_kind) {
DCHECK(HasConsistentPackedTypes(input, packed_type));
}
- ReductionKind GetKind() const { return kind_; }
+ ReductionKind GetReductionKind() const { return reduction_kind_; }
bool CanBeMoved() const override { return true; }
bool InstructionDataEquals(const HInstruction* other) const override {
DCHECK(other->IsVecReduce());
const HVecReduce* o = other->AsVecReduce();
- return HVecOperation::InstructionDataEquals(o) && GetKind() == o->GetKind();
+ return HVecOperation::InstructionDataEquals(o) && GetReductionKind() == o->GetReductionKind();
}
DECLARE_INSTRUCTION(VecReduce);
@@ -407,7 +407,7 @@ class HVecReduce final : public HVecUnaryOperation {
DEFAULT_COPY_CONSTRUCTOR(VecReduce);
private:
- const ReductionKind kind_;
+ const ReductionKind reduction_kind_;
};
// Converts every component in the vector,
@@ -948,7 +948,7 @@ class HVecMultiplyAccumulate final : public HVecOperation {
allocator,
packed_type,
SideEffects::None(),
- /* number_of_inputs */ 3,
+ /* number_of_inputs= */ 3,
vector_length,
dex_pc),
op_kind_(op) {
@@ -1002,7 +1002,7 @@ class HVecSADAccumulate final : public HVecOperation {
allocator,
packed_type,
SideEffects::None(),
- /* number_of_inputs */ 3,
+ /* number_of_inputs= */ 3,
vector_length,
dex_pc) {
DCHECK(HasConsistentPackedTypes(accumulator, packed_type));
@@ -1021,6 +1021,66 @@ class HVecSADAccumulate final : public HVecOperation {
DEFAULT_COPY_CONSTRUCTOR(VecSADAccumulate);
};
+// Performs dot product of two vectors and adds the result to wider precision components in
+// the accumulator.
+//
+// viz. DOT_PRODUCT([ a1, .. , am], [ x1, .. , xn ], [ y1, .. , yn ]) =
+// [ a1 + sum(xi * yi), .. , am + sum(xj * yj) ],
+// for m <= n, non-overlapping sums,
+// for either both signed or both unsigned operands x, y.
+//
+// Notes:
+// - packed type reflects the type of sum reduction, not the type of the operands.
+// - IsZeroExtending() is used to determine the kind of signed/zero extension to be
+// performed for the operands.
+//
+// TODO: Support types other than kInt32 for packed type.
+class HVecDotProd final : public HVecOperation {
+ public:
+ HVecDotProd(ArenaAllocator* allocator,
+ HInstruction* accumulator,
+ HInstruction* left,
+ HInstruction* right,
+ DataType::Type packed_type,
+ bool is_zero_extending,
+ size_t vector_length,
+ uint32_t dex_pc)
+ : HVecOperation(kVecDotProd,
+ allocator,
+ packed_type,
+ SideEffects::None(),
+ /* number_of_inputs= */ 3,
+ vector_length,
+ dex_pc) {
+ DCHECK(HasConsistentPackedTypes(accumulator, packed_type));
+ DCHECK(DataType::IsIntegralType(packed_type));
+ DCHECK(left->IsVecOperation());
+ DCHECK(right->IsVecOperation());
+ DCHECK_EQ(ToSignedType(left->AsVecOperation()->GetPackedType()),
+ ToSignedType(right->AsVecOperation()->GetPackedType()));
+ SetRawInputAt(0, accumulator);
+ SetRawInputAt(1, left);
+ SetRawInputAt(2, right);
+ SetPackedFlag<kFieldHDotProdIsZeroExtending>(is_zero_extending);
+ }
+
+ bool IsZeroExtending() const { return GetPackedFlag<kFieldHDotProdIsZeroExtending>(); }
+
+ bool CanBeMoved() const override { return true; }
+
+ DECLARE_INSTRUCTION(VecDotProd);
+
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecDotProd);
+
+ private:
+ // Additional packed bits.
+ static constexpr size_t kFieldHDotProdIsZeroExtending =
+ HVecOperation::kNumberOfVectorOpPackedBits;
+ static constexpr size_t kNumberOfHDotProdPackedBits = kFieldHDotProdIsZeroExtending + 1;
+ static_assert(kNumberOfHDotProdPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
+};
+
// Loads a vector from memory, viz. load(mem, 1)
// yield the vector [ mem(1), .. , mem(n) ].
class HVecLoad final : public HVecMemoryOperation {
@@ -1037,7 +1097,7 @@ class HVecLoad final : public HVecMemoryOperation {
allocator,
packed_type,
side_effects,
- /* number_of_inputs */ 2,
+ /* number_of_inputs= */ 2,
vector_length,
dex_pc) {
SetRawInputAt(0, base);
@@ -1083,7 +1143,7 @@ class HVecStore final : public HVecMemoryOperation {
allocator,
packed_type,
side_effects,
- /* number_of_inputs */ 3,
+ /* number_of_inputs= */ 3,
vector_length,
dex_pc) {
DCHECK(HasConsistentPackedTypes(value, packed_type));
diff --git a/compiler/optimizing/nodes_vector_test.cc b/compiler/optimizing/nodes_vector_test.cc
index af13449646..b0a665d704 100644
--- a/compiler/optimizing/nodes_vector_test.cc
+++ b/compiler/optimizing/nodes_vector_test.cc
@@ -401,9 +401,9 @@ TEST_F(NodesVectorTest, VectorKindMattersOnReduce) {
EXPECT_TRUE(v2->CanBeMoved());
EXPECT_TRUE(v3->CanBeMoved());
- EXPECT_EQ(HVecReduce::kSum, v1->GetKind());
- EXPECT_EQ(HVecReduce::kMin, v2->GetKind());
- EXPECT_EQ(HVecReduce::kMax, v3->GetKind());
+ EXPECT_EQ(HVecReduce::kSum, v1->GetReductionKind());
+ EXPECT_EQ(HVecReduce::kMin, v2->GetReductionKind());
+ EXPECT_EQ(HVecReduce::kMax, v3->GetReductionKind());
EXPECT_TRUE(v1->Equals(v1));
EXPECT_TRUE(v2->Equals(v2));
diff --git a/compiler/optimizing/nodes_x86.h b/compiler/optimizing/nodes_x86.h
index a55110426b..8e8fbc1581 100644
--- a/compiler/optimizing/nodes_x86.h
+++ b/compiler/optimizing/nodes_x86.h
@@ -128,6 +128,92 @@ class HX86PackedSwitch final : public HExpression<2> {
const int32_t num_entries_;
};
+class HX86AndNot final : public HBinaryOperation {
+ public:
+ HX86AndNot(DataType::Type result_type,
+ HInstruction* left,
+ HInstruction* right,
+ uint32_t dex_pc = kNoDexPc)
+ : HBinaryOperation(kX86AndNot, result_type, left, right, SideEffects::None(), dex_pc) {
+ }
+
+ bool IsCommutative() const override { return false; }
+
+ template <typename T> static T Compute(T x, T y) { return ~x & y; }
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ }
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
+ return GetBlock()->GetGraph()->GetLongConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ }
+ HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
+ HFloatConstant* y ATTRIBUTE_UNUSED) const override {
+ LOG(FATAL) << DebugName() << " is not defined for float values";
+ UNREACHABLE();
+ }
+ HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
+ LOG(FATAL) << DebugName() << " is not defined for double values";
+ UNREACHABLE();
+ }
+
+ DECLARE_INSTRUCTION(X86AndNot);
+
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(X86AndNot);
+};
+
+class HX86MaskOrResetLeastSetBit final : public HUnaryOperation {
+ public:
+ HX86MaskOrResetLeastSetBit(DataType::Type result_type, InstructionKind op,
+ HInstruction* input, uint32_t dex_pc = kNoDexPc)
+ : HUnaryOperation(kX86MaskOrResetLeastSetBit, result_type, input, dex_pc),
+ op_kind_(op) {
+ DCHECK_EQ(result_type, DataType::Kind(input->GetType()));
+ DCHECK(op == HInstruction::kAnd || op == HInstruction::kXor) << op;
+ }
+ template <typename T>
+ auto Compute(T x) const -> decltype(x & (x-1)) {
+ static_assert(std::is_same<decltype(x & (x-1)), decltype(x ^(x-1))>::value,
+ "Inconsistent bitwise types");
+ switch (op_kind_) {
+ case HInstruction::kAnd:
+ return x & (x-1);
+ case HInstruction::kXor:
+ return x ^ (x-1);
+ default:
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ }
+ }
+
+ HConstant* Evaluate(HIntConstant* x) const override {
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()), GetDexPc());
+ }
+ HConstant* Evaluate(HLongConstant* x) const override {
+ return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue()), GetDexPc());
+ }
+ HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED) const override {
+ LOG(FATAL) << DebugName() << "is not defined for float values";
+ UNREACHABLE();
+ }
+ HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED) const override {
+ LOG(FATAL) << DebugName() << "is not defined for double values";
+ UNREACHABLE();
+ }
+ InstructionKind GetOpKind() const { return op_kind_; }
+
+ DECLARE_INSTRUCTION(X86MaskOrResetLeastSetBit);
+
+ protected:
+ const InstructionKind op_kind_;
+
+ DEFAULT_COPY_CONSTRUCTOR(X86MaskOrResetLeastSetBit);
+};
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_NODES_X86_H_
diff --git a/compiler/optimizing/optimization.cc b/compiler/optimizing/optimization.cc
index 142ddb5fbb..8864a12301 100644
--- a/compiler/optimizing/optimization.cc
+++ b/compiler/optimizing/optimization.cc
@@ -28,10 +28,14 @@
#endif
#ifdef ART_ENABLE_CODEGEN_x86
#include "pc_relative_fixups_x86.h"
+#include "instruction_simplifier_x86.h"
#endif
#if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
#include "x86_memory_gen.h"
#endif
+#ifdef ART_ENABLE_CODEGEN_x86_64
+#include "instruction_simplifier_x86_64.h"
+#endif
#include "bounds_check_elimination.h"
#include "cha_guard_optimization.h"
@@ -84,14 +88,10 @@ const char* OptimizationPassName(OptimizationPass pass) {
return HDeadCodeElimination::kDeadCodeEliminationPassName;
case OptimizationPass::kInliner:
return HInliner::kInlinerPassName;
- case OptimizationPass::kSharpening:
- return HSharpening::kSharpeningPassName;
case OptimizationPass::kSelectGenerator:
return HSelectGenerator::kSelectGeneratorPassName;
case OptimizationPass::kInstructionSimplifier:
return InstructionSimplifier::kInstructionSimplifierPassName;
- case OptimizationPass::kIntrinsicsRecognizer:
- return IntrinsicsRecognizer::kIntrinsicsRecognizerPassName;
case OptimizationPass::kCHAGuardOptimization:
return CHAGuardOptimization::kCHAGuardOptimizationPassName;
case OptimizationPass::kCodeSinking:
@@ -117,6 +117,12 @@ const char* OptimizationPassName(OptimizationPass pass) {
#ifdef ART_ENABLE_CODEGEN_x86
case OptimizationPass::kPcRelativeFixupsX86:
return x86::PcRelativeFixups::kPcRelativeFixupsX86PassName;
+ case OptimizationPass::kInstructionSimplifierX86:
+ return x86::InstructionSimplifierX86::kInstructionSimplifierX86PassName;
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86_64
+ case OptimizationPass::kInstructionSimplifierX86_64:
+ return x86_64::InstructionSimplifierX86_64::kInstructionSimplifierX86_64PassName;
#endif
#if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
case OptimizationPass::kX86MemoryOperandGeneration:
@@ -141,14 +147,12 @@ OptimizationPass OptimizationPassByName(const std::string& pass_name) {
X(OptimizationPass::kInductionVarAnalysis);
X(OptimizationPass::kInliner);
X(OptimizationPass::kInstructionSimplifier);
- X(OptimizationPass::kIntrinsicsRecognizer);
X(OptimizationPass::kInvariantCodeMotion);
X(OptimizationPass::kLoadStoreAnalysis);
X(OptimizationPass::kLoadStoreElimination);
X(OptimizationPass::kLoopOptimization);
X(OptimizationPass::kScheduling);
X(OptimizationPass::kSelectGenerator);
- X(OptimizationPass::kSharpening);
X(OptimizationPass::kSideEffectsAnalysis);
#ifdef ART_ENABLE_CODEGEN_arm
X(OptimizationPass::kInstructionSimplifierArm);
@@ -177,7 +181,6 @@ ArenaVector<HOptimization*> ConstructOptimizations(
HGraph* graph,
OptimizingCompilerStats* stats,
CodeGenerator* codegen,
- CompilerDriver* driver,
const DexCompilationUnit& dex_compilation_unit,
VariableSizedHandleScope* handles) {
ArenaVector<HOptimization*> optimizations(allocator->Adapter());
@@ -254,28 +257,21 @@ ArenaVector<HOptimization*> ConstructOptimizations(
codegen,
dex_compilation_unit, // outer_compilation_unit
dex_compilation_unit, // outermost_compilation_unit
- driver,
handles,
stats,
accessor.RegistersSize(),
- /* total_number_of_instructions */ 0,
- /* parent */ nullptr,
- /* depth */ 0,
+ /* total_number_of_instructions= */ 0,
+ /* parent= */ nullptr,
+ /* depth= */ 0,
pass_name);
break;
}
- case OptimizationPass::kSharpening:
- opt = new (allocator) HSharpening(graph, codegen, pass_name);
- break;
case OptimizationPass::kSelectGenerator:
opt = new (allocator) HSelectGenerator(graph, handles, stats, pass_name);
break;
case OptimizationPass::kInstructionSimplifier:
opt = new (allocator) InstructionSimplifier(graph, codegen, stats, pass_name);
break;
- case OptimizationPass::kIntrinsicsRecognizer:
- opt = new (allocator) IntrinsicsRecognizer(graph, stats, pass_name);
- break;
case OptimizationPass::kCHAGuardOptimization:
opt = new (allocator) CHAGuardOptimization(graph, pass_name);
break;
@@ -323,6 +319,14 @@ ArenaVector<HOptimization*> ConstructOptimizations(
DCHECK(alt_name == nullptr) << "arch-specific pass does not support alternative name";
opt = new (allocator) x86::X86MemoryOperandGeneration(graph, codegen, stats);
break;
+ case OptimizationPass::kInstructionSimplifierX86:
+ opt = new (allocator) x86::InstructionSimplifierX86(graph, codegen, stats);
+ break;
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86_64
+ case OptimizationPass::kInstructionSimplifierX86_64:
+ opt = new (allocator) x86_64::InstructionSimplifierX86_64(graph, codegen, stats);
+ break;
#endif
case OptimizationPass::kNone:
LOG(FATAL) << "kNone does not represent an actual pass";
diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h
index 88b283cebf..ce44b5f81a 100644
--- a/compiler/optimizing/optimization.h
+++ b/compiler/optimizing/optimization.h
@@ -77,14 +77,12 @@ enum class OptimizationPass {
kInductionVarAnalysis,
kInliner,
kInstructionSimplifier,
- kIntrinsicsRecognizer,
kInvariantCodeMotion,
kLoadStoreAnalysis,
kLoadStoreElimination,
kLoopOptimization,
kScheduling,
kSelectGenerator,
- kSharpening,
kSideEffectsAnalysis,
#ifdef ART_ENABLE_CODEGEN_arm
kInstructionSimplifierArm,
@@ -98,6 +96,10 @@ enum class OptimizationPass {
#endif
#ifdef ART_ENABLE_CODEGEN_x86
kPcRelativeFixupsX86,
+ kInstructionSimplifierX86,
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86_64
+ kInstructionSimplifierX86_64,
#endif
#if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
kX86MemoryOperandGeneration,
@@ -145,7 +147,6 @@ ArenaVector<HOptimization*> ConstructOptimizations(
HGraph* graph,
OptimizingCompilerStats* stats,
CodeGenerator* codegen,
- CompilerDriver* driver,
const DexCompilationUnit& dex_compilation_unit,
VariableSizedHandleScope* handles);
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index be1f7ea5b4..a52031cced 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -128,7 +128,7 @@ class OptimizingCFITest : public CFITest, public OptimizingUnitTestHelper {
public:
InternalCodeAllocator() {}
- virtual uint8_t* Allocate(size_t size) {
+ uint8_t* Allocate(size_t size) override {
memory_.resize(size);
return memory_.data();
}
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 0a747053cf..e8f8d32525 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -26,6 +26,7 @@
#include "base/arena_allocator.h"
#include "base/arena_containers.h"
#include "base/dumpable.h"
+#include "base/logging.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "base/scoped_arena_allocator.h"
@@ -79,7 +80,7 @@ class CodeVectorAllocator final : public CodeAllocator {
explicit CodeVectorAllocator(ArenaAllocator* allocator)
: memory_(allocator->Adapter(kArenaAllocCodeBuffer)) {}
- virtual uint8_t* Allocate(size_t size) {
+ uint8_t* Allocate(size_t size) override {
memory_.resize(size);
return &memory_[0];
}
@@ -161,7 +162,7 @@ class PassObserver : public ValueObject {
VLOG(compiler) << "Starting pass: " << pass_name;
// Dump graph first, then start timer.
if (visualizer_enabled_) {
- visualizer_.DumpGraph(pass_name, /* is_after_pass */ false, graph_in_bad_state_);
+ visualizer_.DumpGraph(pass_name, /* is_after_pass= */ false, graph_in_bad_state_);
FlushVisualizer();
}
if (timing_logger_enabled_) {
@@ -183,7 +184,7 @@ class PassObserver : public ValueObject {
timing_logger_.EndTiming();
}
if (visualizer_enabled_) {
- visualizer_.DumpGraph(pass_name, /* is_after_pass */ true, graph_in_bad_state_);
+ visualizer_.DumpGraph(pass_name, /* is_after_pass= */ true, graph_in_bad_state_);
FlushVisualizer();
}
@@ -271,7 +272,7 @@ class OptimizingCompiler final : public Compiler {
bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const override;
- CompiledMethod* Compile(const DexFile::CodeItem* code_item,
+ CompiledMethod* Compile(const dex::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
uint16_t class_def_idx,
@@ -298,6 +299,7 @@ class OptimizingCompiler final : public Compiler {
bool JitCompile(Thread* self,
jit::JitCodeCache* code_cache,
ArtMethod* method,
+ bool baseline,
bool osr,
jit::JitLogger* jit_logger)
override
@@ -319,7 +321,6 @@ class OptimizingCompiler final : public Compiler {
graph,
compilation_stats_.get(),
codegen,
- GetCompilerDriver(),
dex_compilation_unit,
handles);
DCHECK_EQ(length, optimizations.size());
@@ -369,7 +370,7 @@ class OptimizingCompiler final : public Compiler {
CompiledMethod* Emit(ArenaAllocator* allocator,
CodeVectorAllocator* code_allocator,
CodeGenerator* codegen,
- const DexFile::CodeItem* item) const;
+ const dex::CodeItem* item) const;
// Try compiling a method and return the code generator used for
// compiling it.
@@ -383,6 +384,7 @@ class OptimizingCompiler final : public Compiler {
CodeVectorAllocator* code_allocator,
const DexCompilationUnit& dex_compilation_unit,
ArtMethod* method,
+ bool baseline,
bool osr,
VariableSizedHandleScope* handles) const;
@@ -399,7 +401,14 @@ class OptimizingCompiler final : public Compiler {
PassObserver* pass_observer,
VariableSizedHandleScope* handles) const;
- void GenerateJitDebugInfo(ArtMethod* method, debug::MethodDebugInfo method_debug_info)
+ bool RunBaselineOptimizations(HGraph* graph,
+ CodeGenerator* codegen,
+ const DexCompilationUnit& dex_compilation_unit,
+ PassObserver* pass_observer,
+ VariableSizedHandleScope* handles) const;
+
+ void GenerateJitDebugInfo(ArtMethod* method,
+ const debug::MethodDebugInfo& method_debug_info)
REQUIRES_SHARED(Locks::mutator_lock_);
std::unique_ptr<OptimizingCompilerStats> compilation_stats_;
@@ -456,6 +465,48 @@ static bool IsInstructionSetSupported(InstructionSet instruction_set) {
|| instruction_set == InstructionSet::kX86_64;
}
+bool OptimizingCompiler::RunBaselineOptimizations(HGraph* graph,
+ CodeGenerator* codegen,
+ const DexCompilationUnit& dex_compilation_unit,
+ PassObserver* pass_observer,
+ VariableSizedHandleScope* handles) const {
+ switch (codegen->GetCompilerOptions().GetInstructionSet()) {
+#ifdef ART_ENABLE_CODEGEN_mips
+ case InstructionSet::kMips: {
+ OptimizationDef mips_optimizations[] = {
+ OptDef(OptimizationPass::kPcRelativeFixupsMips)
+ };
+ return RunOptimizations(graph,
+ codegen,
+ dex_compilation_unit,
+ pass_observer,
+ handles,
+ mips_optimizations);
+ }
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86
+ case InstructionSet::kX86: {
+ OptimizationDef x86_optimizations[] = {
+ OptDef(OptimizationPass::kPcRelativeFixupsX86),
+ };
+ return RunOptimizations(graph,
+ codegen,
+ dex_compilation_unit,
+ pass_observer,
+ handles,
+ x86_optimizations);
+ }
+#endif
+ default:
+ UNUSED(graph);
+ UNUSED(codegen);
+ UNUSED(dex_compilation_unit);
+ UNUSED(pass_observer);
+ UNUSED(handles);
+ return false;
+ }
+}
+
bool OptimizingCompiler::RunArchOptimizations(HGraph* graph,
CodeGenerator* codegen,
const DexCompilationUnit& dex_compilation_unit,
@@ -528,6 +579,7 @@ bool OptimizingCompiler::RunArchOptimizations(HGraph* graph,
#ifdef ART_ENABLE_CODEGEN_x86
case InstructionSet::kX86: {
OptimizationDef x86_optimizations[] = {
+ OptDef(OptimizationPass::kInstructionSimplifierX86),
OptDef(OptimizationPass::kSideEffectsAnalysis),
OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
OptDef(OptimizationPass::kPcRelativeFixupsX86),
@@ -544,6 +596,7 @@ bool OptimizingCompiler::RunArchOptimizations(HGraph* graph,
#ifdef ART_ENABLE_CODEGEN_x86_64
case InstructionSet::kX86_64: {
OptimizationDef x86_64_optimizations[] = {
+ OptDef(OptimizationPass::kInstructionSimplifierX86_64),
OptDef(OptimizationPass::kSideEffectsAnalysis),
OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
OptDef(OptimizationPass::kX86MemoryOperandGeneration)
@@ -623,8 +676,6 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph,
OptimizationDef optimizations[] = {
// Initial optimizations.
- OptDef(OptimizationPass::kIntrinsicsRecognizer),
- OptDef(OptimizationPass::kSharpening),
OptDef(OptimizationPass::kConstantFolding),
OptDef(OptimizationPass::kInstructionSimplifier),
OptDef(OptimizationPass::kDeadCodeElimination,
@@ -709,12 +760,12 @@ static ArenaVector<linker::LinkerPatch> EmitAndSortLinkerPatches(CodeGenerator*
CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* allocator,
CodeVectorAllocator* code_allocator,
CodeGenerator* codegen,
- const DexFile::CodeItem* code_item_for_osr_check) const {
+ const dex::CodeItem* code_item_for_osr_check) const {
ArenaVector<linker::LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen);
ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item_for_osr_check);
CompiledMethod* compiled_method = CompiledMethod::SwapAllocCompiledMethod(
- GetCompilerDriver(),
+ GetCompilerDriver()->GetCompiledMethodStorage(),
codegen->GetInstructionSet(),
code_allocator->GetMemory(),
ArrayRef<const uint8_t>(stack_map),
@@ -739,6 +790,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
CodeVectorAllocator* code_allocator,
const DexCompilationUnit& dex_compilation_unit,
ArtMethod* method,
+ bool baseline,
bool osr,
VariableSizedHandleScope* handles) const {
MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptBytecodeCompilation);
@@ -747,7 +799,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
InstructionSet instruction_set = compiler_options.GetInstructionSet();
const DexFile& dex_file = *dex_compilation_unit.GetDexFile();
uint32_t method_idx = dex_compilation_unit.GetDexMethodIndex();
- const DexFile::CodeItem* code_item = dex_compilation_unit.GetCodeItem();
+ const dex::CodeItem* code_item = dex_compilation_unit.GetCodeItem();
// Always use the Thumb-2 assembler: some runtime functionality
// (like implicit stack overflow checks) assume Thumb-2.
@@ -776,6 +828,29 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
}
CodeItemDebugInfoAccessor code_item_accessor(dex_file, code_item, method_idx);
+
+ bool dead_reference_safe;
+ ArrayRef<const uint8_t> interpreter_metadata;
+ // For AOT compilation, we may not get a method, for example if its class is erroneous,
+ // possibly due to an unavailable superclass. JIT should always have a method.
+ DCHECK(Runtime::Current()->IsAotCompiler() || method != nullptr);
+ if (method != nullptr) {
+ const dex::ClassDef* containing_class;
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ containing_class = &method->GetClassDef();
+ interpreter_metadata = method->GetQuickenedInfo();
+ }
+ // MethodContainsRSensitiveAccess is currently slow, but HasDeadReferenceSafeAnnotation()
+ // is currently rarely true.
+ dead_reference_safe =
+ annotations::HasDeadReferenceSafeAnnotation(dex_file, *containing_class)
+ && !annotations::MethodContainsRSensitiveAccess(dex_file, *containing_class, method_idx);
+ } else {
+ // If we could not resolve the class, conservatively assume it's dead-reference unsafe.
+ dead_reference_safe = false;
+ }
+
HGraph* graph = new (allocator) HGraph(
allocator,
arena_stack,
@@ -783,17 +858,12 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
method_idx,
compiler_options.GetInstructionSet(),
kInvalidInvokeType,
+ dead_reference_safe,
compiler_driver->GetCompilerOptions().GetDebuggable(),
- osr);
+ /* osr= */ osr);
- ArrayRef<const uint8_t> interpreter_metadata;
- // For AOT compilation, we may not get a method, for example if its class is erroneous.
- // JIT should always have a method.
- DCHECK(Runtime::Current()->IsAotCompiler() || method != nullptr);
if (method != nullptr) {
graph->SetArtMethod(method);
- ScopedObjectAccess soa(Thread::Current());
- interpreter_metadata = method->GetQuickenedInfo();
}
std::unique_ptr<CodeGenerator> codegen(
@@ -820,7 +890,6 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
code_item_accessor,
&dex_compilation_unit,
&dex_compilation_unit,
- compiler_driver,
codegen.get(),
compilation_stats_.get(),
interpreter_metadata,
@@ -848,6 +917,11 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
MethodCompilationStat::kNotCompiledAmbiguousArrayOp);
break;
}
+ case kAnalysisFailIrreducibleLoopAndStringInit: {
+ MaybeRecordStat(compilation_stats_.get(),
+ MethodCompilationStat::kNotCompiledIrreducibleLoopAndStringInit);
+ break;
+ }
case kAnalysisSuccess:
UNREACHABLE();
}
@@ -856,11 +930,11 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
}
}
- RunOptimizations(graph,
- codegen.get(),
- dex_compilation_unit,
- &pass_observer,
- handles);
+ if (baseline) {
+ RunBaselineOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer, handles);
+ } else {
+ RunOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer, handles);
+ }
RegisterAllocator::Strategy regalloc_strategy =
compiler_options.GetRegisterAllocationStrategy();
@@ -905,10 +979,11 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
arena_stack,
dex_file,
method_idx,
- compiler_driver->GetCompilerOptions().GetInstructionSet(),
+ compiler_options.GetInstructionSet(),
kInvalidInvokeType,
- compiler_driver->GetCompilerOptions().GetDebuggable(),
- /* osr */ false);
+ /* dead_reference_safe= */ true, // Intrinsics don't affect dead reference safety.
+ compiler_options.GetDebuggable(),
+ /* osr= */ false);
DCHECK(Runtime::Current()->IsAotCompiler());
DCHECK(method != nullptr);
@@ -936,18 +1011,16 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
CodeItemDebugInfoAccessor(), // Null code item.
&dex_compilation_unit,
&dex_compilation_unit,
- compiler_driver,
codegen.get(),
compilation_stats_.get(),
- /* interpreter_metadata */ ArrayRef<const uint8_t>(),
+ /* interpreter_metadata= */ ArrayRef<const uint8_t>(),
handles);
builder.BuildIntrinsicGraph(method);
}
OptimizationDef optimizations[] = {
- OptDef(OptimizationPass::kIntrinsicsRecognizer),
- // Some intrinsics are converted to HIR by the simplifier and the codegen also
- // has a few assumptions that only the instruction simplifier can satisfy.
+ // The codegen has a few assumptions that only the instruction simplifier
+ // can satisfy.
OptDef(OptimizationPass::kInstructionSimplifier),
};
RunOptimizations(graph,
@@ -979,7 +1052,7 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
return codegen.release();
}
-CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
+CompiledMethod* OptimizingCompiler::Compile(const dex::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
uint16_t class_def_idx,
@@ -988,12 +1061,13 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache) const {
CompilerDriver* compiler_driver = GetCompilerDriver();
+ const CompilerOptions& compiler_options = compiler_driver->GetCompilerOptions();
CompiledMethod* compiled_method = nullptr;
Runtime* runtime = Runtime::Current();
DCHECK(runtime->IsAotCompiler());
- const VerifiedMethod* verified_method = compiler_driver->GetVerifiedMethod(&dex_file, method_idx);
+ const VerifiedMethod* verified_method = compiler_options.GetVerifiedMethod(&dex_file, method_idx);
DCHECK(!verified_method->HasRuntimeThrow());
- if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file) ||
+ if (compiler_options.IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file) ||
verifier::CanCompilerHandleVerificationFailure(
verified_method->GetEncounteredVerificationFailures())) {
ArenaAllocator allocator(runtime->GetArenaPool());
@@ -1002,6 +1076,15 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
std::unique_ptr<CodeGenerator> codegen;
bool compiled_intrinsic = false;
{
+ ScopedObjectAccess soa(Thread::Current());
+ ArtMethod* method =
+ runtime->GetClassLinker()->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
+ method_idx, dex_cache, jclass_loader, /*referrer=*/ nullptr, invoke_type);
+ DCHECK_EQ(method == nullptr, soa.Self()->IsExceptionPending());
+ soa.Self()->ClearException(); // Suppress exception if any.
+ VariableSizedHandleScope handles(soa.Self());
+ Handle<mirror::Class> compiling_class =
+ handles.NewHandle(method != nullptr ? method->GetDeclaringClass() : nullptr);
DexCompilationUnit dex_compilation_unit(
jclass_loader,
runtime->GetClassLinker(),
@@ -1010,16 +1093,13 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
class_def_idx,
method_idx,
access_flags,
- /* verified_method */ nullptr, // Not needed by the Optimizing compiler.
- dex_cache);
- ScopedObjectAccess soa(Thread::Current());
- ArtMethod* method = compiler_driver->ResolveMethod(
- soa, dex_cache, jclass_loader, &dex_compilation_unit, method_idx, invoke_type);
- VariableSizedHandleScope handles(soa.Self());
+ /*verified_method=*/ nullptr, // Not needed by the Optimizing compiler.
+ dex_cache,
+ compiling_class);
// Go to native so that we don't block GC during compilation.
ScopedThreadSuspension sts(soa.Self(), kNative);
if (method != nullptr && UNLIKELY(method->IsIntrinsic())) {
- DCHECK(compiler_driver->GetCompilerOptions().IsBootImage());
+ DCHECK(compiler_options.IsBootImage());
codegen.reset(
TryCompileIntrinsic(&allocator,
&arena_stack,
@@ -1038,7 +1118,8 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
&code_allocator,
dex_compilation_unit,
method,
- /* osr */ false,
+ compiler_options.IsBaseline(),
+ /* osr= */ false,
&handles));
}
}
@@ -1066,7 +1147,7 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
}
} else {
MethodCompilationStat method_stat;
- if (compiler_driver->GetCompilerOptions().VerifyAtRuntime()) {
+ if (compiler_options.VerifyAtRuntime()) {
method_stat = MethodCompilationStat::kNotCompiledVerifyAtRuntime;
} else {
method_stat = MethodCompilationStat::kNotCompiledVerificationError;
@@ -1075,8 +1156,8 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
}
if (kIsDebugBuild &&
- IsCompilingWithCoreImage() &&
- IsInstructionSetSupported(compiler_driver->GetCompilerOptions().GetInstructionSet())) {
+ compiler_options.CompilingWithCoreImage() &&
+ IsInstructionSetSupported(compiler_options.GetInstructionSet())) {
// For testing purposes, we put a special marker on method names
// that should be compiled with this compiler (when the
// instruction set is supported). This makes sure we're not
@@ -1099,7 +1180,7 @@ static ScopedArenaVector<uint8_t> CreateJniStackMap(ScopedArenaAllocator* alloca
jni_compiled_method.GetFrameSize(),
jni_compiled_method.GetCoreSpillMask(),
jni_compiled_method.GetFpSpillMask(),
- /* num_dex_registers */ 0);
+ /* num_dex_registers= */ 0);
stack_map_stream->EndMethod();
return stack_map_stream->Encode();
}
@@ -1116,21 +1197,23 @@ CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
if (compiler_options.IsBootImage()) {
ScopedObjectAccess soa(Thread::Current());
ArtMethod* method = runtime->GetClassLinker()->LookupResolvedMethod(
- method_idx, dex_cache.Get(), /* class_loader */ nullptr);
+ method_idx, dex_cache.Get(), /*class_loader=*/ nullptr);
if (method != nullptr && UNLIKELY(method->IsIntrinsic())) {
+ VariableSizedHandleScope handles(soa.Self());
ScopedNullHandle<mirror::ClassLoader> class_loader; // null means boot class path loader.
+ Handle<mirror::Class> compiling_class = handles.NewHandle(method->GetDeclaringClass());
DexCompilationUnit dex_compilation_unit(
class_loader,
runtime->GetClassLinker(),
dex_file,
- /* code_item */ nullptr,
- /* class_def_idx */ DexFile::kDexNoIndex16,
+ /*code_item=*/ nullptr,
+ /*class_def_idx=*/ DexFile::kDexNoIndex16,
method_idx,
access_flags,
- /* verified_method */ nullptr,
- dex_cache);
+ /*verified_method=*/ nullptr,
+ dex_cache,
+ compiling_class);
CodeVectorAllocator code_allocator(&allocator);
- VariableSizedHandleScope handles(soa.Self());
// Go to native so that we don't block GC during compilation.
ScopedThreadSuspension sts(soa.Self(), kNative);
std::unique_ptr<CodeGenerator> codegen(
@@ -1144,7 +1227,7 @@ CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
CompiledMethod* compiled_method = Emit(&allocator,
&code_allocator,
codegen.get(),
- /* code_item_for_osr_check */ nullptr);
+ /* item= */ nullptr);
compiled_method->MarkAsIntrinsic();
return compiled_method;
}
@@ -1159,45 +1242,27 @@ CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(&stack_map_allocator,
jni_compiled_method);
return CompiledMethod::SwapAllocCompiledMethod(
- GetCompilerDriver(),
+ GetCompilerDriver()->GetCompiledMethodStorage(),
jni_compiled_method.GetInstructionSet(),
jni_compiled_method.GetCode(),
ArrayRef<const uint8_t>(stack_map),
jni_compiled_method.GetCfi(),
- /* patches */ ArrayRef<const linker::LinkerPatch>());
+ /* patches= */ ArrayRef<const linker::LinkerPatch>());
}
Compiler* CreateOptimizingCompiler(CompilerDriver* driver) {
return new OptimizingCompiler(driver);
}
-bool IsCompilingWithCoreImage() {
- const std::string& image = Runtime::Current()->GetImageLocation();
- return CompilerDriver::IsCoreImageFilename(image);
-}
-
bool EncodeArtMethodInInlineInfo(ArtMethod* method ATTRIBUTE_UNUSED) {
// Note: the runtime is null only for unit testing.
return Runtime::Current() == nullptr || !Runtime::Current()->IsAotCompiler();
}
-bool CanEncodeInlinedMethodInStackMap(const DexFile& caller_dex_file, ArtMethod* callee) {
- if (!Runtime::Current()->IsAotCompiler()) {
- // JIT can always encode methods in stack maps.
- return true;
- }
- if (IsSameDexFile(caller_dex_file, *callee->GetDexFile())) {
- return true;
- }
- // TODO(ngeoffray): Support more AOT cases for inlining:
- // - methods in multidex
- // - methods in boot image for on-device non-PIC compilation.
- return false;
-}
-
bool OptimizingCompiler::JitCompile(Thread* self,
jit::JitCodeCache* code_cache,
ArtMethod* method,
+ bool baseline,
bool osr,
jit::JitLogger* jit_logger) {
StackHandleScope<3> hs(self);
@@ -1208,7 +1273,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
const DexFile* dex_file = method->GetDexFile();
const uint16_t class_def_idx = method->GetClassDefIndex();
- const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
+ const dex::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
const uint32_t method_idx = method->GetDexMethodIndex();
const uint32_t access_flags = method->GetAccessFlags();
@@ -1219,7 +1284,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions();
JniCompiledMethod jni_compiled_method = ArtQuickJniCompileMethod(
compiler_options, access_flags, method_idx, *dex_file);
- ScopedNullHandle<mirror::ObjectArray<mirror::Object>> roots;
+ std::vector<Handle<mirror::Object>> roots;
ArenaSet<ArtMethod*, std::less<ArtMethod*>> cha_single_implementation_list(
allocator.Adapter(kArenaAllocCHA));
ArenaStack arena_stack(runtime->GetJitArenaPool());
@@ -1231,7 +1296,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
uint8_t* roots_data = nullptr;
uint32_t data_size = code_cache->ReserveData(self,
stack_map.size(),
- /* number_of_roots */ 0,
+ /* number_of_roots= */ 0,
method,
&stack_map_data,
&roots_data);
@@ -1251,7 +1316,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
data_size,
osr,
roots,
- /* has_should_deoptimize_flag */ false,
+ /* has_should_deoptimize_flag= */ false,
cha_single_implementation_list);
if (code == nullptr) {
return false;
@@ -1293,6 +1358,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
std::unique_ptr<CodeGenerator> codegen;
{
+ Handle<mirror::Class> compiling_class = handles.NewHandle(method->GetDeclaringClass());
DexCompilationUnit dex_compilation_unit(
class_loader,
runtime->GetClassLinker(),
@@ -1301,8 +1367,9 @@ bool OptimizingCompiler::JitCompile(Thread* self,
class_def_idx,
method_idx,
access_flags,
- /* verified_method */ nullptr,
- dex_cache);
+ /*verified_method=*/ nullptr,
+ dex_cache,
+ compiling_class);
// Go to native so that we don't block GC during compilation.
ScopedThreadSuspension sts(self, kNative);
@@ -1312,6 +1379,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
&code_allocator,
dex_compilation_unit,
method,
+ baseline,
osr,
&handles));
if (codegen.get() == nullptr) {
@@ -1321,19 +1389,6 @@ bool OptimizingCompiler::JitCompile(Thread* self,
ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item);
size_t number_of_roots = codegen->GetNumberOfJitRoots();
- // We allocate an object array to ensure the JIT roots that we will collect in EmitJitRoots
- // will be visible by the GC between EmitLiterals and CommitCode. Once CommitCode is
- // executed, this array is not needed.
- Handle<mirror::ObjectArray<mirror::Object>> roots(
- hs.NewHandle(mirror::ObjectArray<mirror::Object>::Alloc(
- self, GetClassRoot<mirror::ObjectArray<mirror::Object>>(), number_of_roots)));
- if (roots == nullptr) {
- // Out of memory, just clear the exception to avoid any Java exception uncaught problems.
- MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
- DCHECK(self->IsExceptionPending());
- self->ClearException();
- return false;
- }
uint8_t* stack_map_data = nullptr;
uint8_t* roots_data = nullptr;
uint32_t data_size = code_cache->ReserveData(self,
@@ -1347,7 +1402,14 @@ bool OptimizingCompiler::JitCompile(Thread* self,
return false;
}
memcpy(stack_map_data, stack_map.data(), stack_map.size());
- codegen->EmitJitRoots(code_allocator.GetData(), roots, roots_data);
+ std::vector<Handle<mirror::Object>> roots;
+ codegen->EmitJitRoots(code_allocator.GetData(), roots_data, &roots);
+ // The root Handle<>s filled by the codegen reference entries in the VariableSizedHandleScope.
+ DCHECK(std::all_of(roots.begin(),
+ roots.end(),
+ [&handles](Handle<mirror::Object> root){
+ return handles.Contains(root.GetReference());
+ }));
const void* code = code_cache->CommitCode(
self,
@@ -1413,26 +1475,31 @@ bool OptimizingCompiler::JitCompile(Thread* self,
return true;
}
-void OptimizingCompiler::GenerateJitDebugInfo(ArtMethod* method, debug::MethodDebugInfo info) {
+void OptimizingCompiler::GenerateJitDebugInfo(ArtMethod* method ATTRIBUTE_UNUSED,
+ const debug::MethodDebugInfo& info) {
const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions();
DCHECK(compiler_options.GenerateAnyDebugInfo());
-
- // If both flags are passed, generate full debug info.
- const bool mini_debug_info = !compiler_options.GetGenerateDebugInfo();
-
- // Create entry for the single method that we just compiled.
- std::vector<uint8_t> elf_file = debug::MakeElfFileForJIT(
- compiler_options.GetInstructionSet(),
- compiler_options.GetInstructionSetFeatures(),
- mini_debug_info,
- ArrayRef<const debug::MethodDebugInfo>(&info, 1));
- MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
- AddNativeDebugInfoForJit(reinterpret_cast<const void*>(info.code_address), elf_file);
-
- VLOG(jit)
- << "JIT mini-debug-info added for " << ArtMethod::PrettyMethod(method)
- << " size=" << PrettySize(elf_file.size())
- << " total_size=" << PrettySize(GetJitNativeDebugInfoMemUsage());
+ TimingLogger logger("Generate JIT debug info logger", true, VLOG_IS_ON(jit));
+ {
+ TimingLogger::ScopedTiming st("Generate JIT debug info", &logger);
+
+ // If both flags are passed, generate full debug info.
+ const bool mini_debug_info = !compiler_options.GetGenerateDebugInfo();
+
+ // Create entry for the single method that we just compiled.
+ std::vector<uint8_t> elf_file = debug::MakeElfFileForJIT(
+ compiler_options.GetInstructionSet(),
+ compiler_options.GetInstructionSetFeatures(),
+ mini_debug_info,
+ info);
+ AddNativeDebugInfoForJit(Thread::Current(),
+ reinterpret_cast<const void*>(info.code_address),
+ elf_file,
+ debug::PackElfFileForJIT,
+ compiler_options.GetInstructionSet(),
+ compiler_options.GetInstructionSetFeatures());
+ }
+ Runtime::Current()->GetJit()->AddTimingLogger(logger);
}
} // namespace art
diff --git a/compiler/optimizing/optimizing_compiler.h b/compiler/optimizing/optimizing_compiler.h
index 6ee9c70fdb..f5279e83eb 100644
--- a/compiler/optimizing/optimizing_compiler.h
+++ b/compiler/optimizing/optimizing_compiler.h
@@ -29,14 +29,7 @@ class DexFile;
Compiler* CreateOptimizingCompiler(CompilerDriver* driver);
-// Returns whether we are compiling against a "core" image, which
-// is an indicative we are running tests. The compiler will use that
-// information for checking invariants.
-bool IsCompilingWithCoreImage();
-
bool EncodeArtMethodInInlineInfo(ArtMethod* method);
-bool CanEncodeInlinedMethodInStackMap(const DexFile& caller_dex_file, ArtMethod* callee)
- REQUIRES_SHARED(Locks::mutator_lock_);
} // namespace art
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index 9a26f2f6c4..ddd57f5f1a 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -22,9 +22,10 @@
#include <string>
#include <type_traits>
+#include <android-base/logging.h>
+
#include "base/atomic.h"
#include "base/globals.h"
-#include "base/logging.h" // For VLOG_IS_ON.
namespace art {
@@ -59,6 +60,7 @@ enum class MethodCompilationStat {
kNotCompiledUnsupportedIsa,
kNotCompiledVerificationError,
kNotCompiledVerifyAtRuntime,
+ kNotCompiledIrreducibleLoopAndStringInit,
kInlinedMonomorphicCall,
kInlinedPolymorphicCall,
kMonomorphicCall,
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index f903f82d50..e5f694109a 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -155,7 +155,7 @@ class OptimizingUnitTestHelper {
void* aligned_data = GetAllocator()->Alloc(code_item_size);
memcpy(aligned_data, &data[0], code_item_size);
CHECK_ALIGNED(aligned_data, StandardDexFile::CodeItem::kAlignment);
- const DexFile::CodeItem* code_item = reinterpret_cast<const DexFile::CodeItem*>(aligned_data);
+ const dex::CodeItem* code_item = reinterpret_cast<const dex::CodeItem*>(aligned_data);
{
ScopedObjectAccess soa(Thread::Current());
@@ -165,13 +165,13 @@ class OptimizingUnitTestHelper {
const DexCompilationUnit* dex_compilation_unit =
new (graph->GetAllocator()) DexCompilationUnit(
handles_->NewHandle<mirror::ClassLoader>(nullptr),
- /* class_linker */ nullptr,
+ /* class_linker= */ nullptr,
graph->GetDexFile(),
code_item,
- /* class_def_index */ DexFile::kDexNoIndex16,
- /* method_idx */ dex::kDexNoIndex,
- /* access_flags */ 0u,
- /* verified_method */ nullptr,
+ /* class_def_index= */ DexFile::kDexNoIndex16,
+ /* method_idx= */ dex::kDexNoIndex,
+ /* access_flags= */ 0u,
+ /* verified_method= */ nullptr,
handles_->NewHandle<mirror::DexCache>(nullptr));
CodeItemDebugInfoAccessor accessor(graph->GetDexFile(), code_item, /*dex_method_idx*/ 0u);
HGraphBuilder builder(graph, dex_compilation_unit, accessor, handles_.get(), return_type);
diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc
index 399a6d8cbd..a8ab6cdd0c 100644
--- a/compiler/optimizing/parallel_move_test.cc
+++ b/compiler/optimizing/parallel_move_test.cc
@@ -174,8 +174,8 @@ class ParallelMoveTest : public ::testing::Test {
template<> const bool ParallelMoveTest<TestParallelMoveResolverWithSwap>::has_swap = true;
template<> const bool ParallelMoveTest<TestParallelMoveResolverNoSwap>::has_swap = false;
-typedef ::testing::Types<TestParallelMoveResolverWithSwap, TestParallelMoveResolverNoSwap>
- ParallelMoveResolverTestTypes;
+using ParallelMoveResolverTestTypes =
+ ::testing::Types<TestParallelMoveResolverWithSwap, TestParallelMoveResolverNoSwap>;
TYPED_TEST_CASE(ParallelMoveTest, ParallelMoveResolverTestTypes);
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index 4b07d5b621..4ff293c46c 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -17,7 +17,6 @@
#include "pc_relative_fixups_x86.h"
#include "code_generator_x86.h"
#include "intrinsics_x86.h"
-#include "runtime.h"
namespace art {
namespace x86 {
@@ -239,7 +238,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
case Intrinsics::kIntegerValueOf:
// This intrinsic can be call free if it loads the address of the boot image object.
// If we're compiling PIC, we need the address base for loading from .data.bimg.rel.ro.
- if (Runtime::Current()->UseJitCompilation()) {
+ if (!codegen_->GetCompilerOptions().GetCompilePic()) {
break;
}
FALLTHROUGH_INTENDED;
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index fc81740013..fbdbf9d086 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -87,9 +87,9 @@ void PrepareForRegisterAllocation::VisitBoundsCheck(HBoundsCheck* check) {
if (GetGraph()->GetArtMethod() != char_at_method) {
ArenaAllocator* allocator = GetGraph()->GetAllocator();
HEnvironment* environment = new (allocator) HEnvironment(allocator,
- /* number_of_vregs */ 0u,
+ /* number_of_vregs= */ 0u,
char_at_method,
- /* dex_pc */ dex::kDexNoIndex,
+ /* dex_pc= */ dex::kDexNoIndex,
check);
check->InsertRawEnvironment(environment);
}
@@ -304,4 +304,13 @@ bool PrepareForRegisterAllocation::CanMoveClinitCheck(HInstruction* input,
return true;
}
+void PrepareForRegisterAllocation::VisitTypeConversion(HTypeConversion* instruction) {
+ // For simplicity, our code generators don't handle implicit type conversion, so ensure
+ // there are none before hitting codegen.
+ if (instruction->IsImplicitConversion()) {
+ instruction->ReplaceWith(instruction->GetInput());
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h
index a8ab256e27..e0bb76eb22 100644
--- a/compiler/optimizing/prepare_for_register_allocation.h
+++ b/compiler/optimizing/prepare_for_register_allocation.h
@@ -55,6 +55,7 @@ class PrepareForRegisterAllocation : public HGraphDelegateVisitor {
void VisitConstructorFence(HConstructorFence* constructor_fence) override;
void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override;
void VisitDeoptimize(HDeoptimize* deoptimize) override;
+ void VisitTypeConversion(HTypeConversion* instruction) override;
bool CanMoveClinitCheck(HInstruction* input, HInstruction* user) const;
bool CanEmitConditionAt(HCondition* condition, HInstruction* user) const;
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index a9d590232c..4929e0a3a1 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -114,9 +114,9 @@ class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor {
void VisitCheckCast(HCheckCast* instr) override;
void VisitBoundType(HBoundType* instr) override;
void VisitNullCheck(HNullCheck* instr) override;
- void VisitPhi(HPhi* phi);
+ void VisitPhi(HPhi* phi) override;
- void VisitBasicBlock(HBasicBlock* block);
+ void VisitBasicBlock(HBasicBlock* block) override;
void ProcessWorklist();
private:
@@ -278,7 +278,7 @@ static void BoundTypeIn(HInstruction* receiver,
if (ShouldCreateBoundType(
insert_point, receiver, class_rti, start_instruction, start_block)) {
bound_type = new (receiver->GetBlock()->GetGraph()->GetAllocator()) HBoundType(receiver);
- bound_type->SetUpperBound(class_rti, /* bound_can_be_null */ false);
+ bound_type->SetUpperBound(class_rti, /* can_be_null= */ false);
start_block->InsertInstructionBefore(bound_type, insert_point);
// To comply with the RTP algorithm, don't type the bound type just yet, it will
// be handled in RTPVisitor::VisitBoundType.
@@ -350,7 +350,7 @@ static void BoundTypeForClassCheck(HInstruction* check) {
HBasicBlock* trueBlock = compare->IsEqual()
? check->AsIf()->IfTrueSuccessor()
: check->AsIf()->IfFalseSuccessor();
- BoundTypeIn(receiver, trueBlock, /* start_instruction */ nullptr, class_rti);
+ BoundTypeIn(receiver, trueBlock, /* start_instruction= */ nullptr, class_rti);
} else {
DCHECK(check->IsDeoptimize());
if (compare->IsEqual() && check->AsDeoptimize()->GuardsAnInput()) {
@@ -427,9 +427,9 @@ void ReferenceTypePropagation::RTPVisitor::BoundTypeForIfNotNull(HBasicBlock* bl
: ifInstruction->IfFalseSuccessor();
ReferenceTypeInfo object_rti = ReferenceTypeInfo::Create(
- handle_cache_->GetObjectClassHandle(), /* is_exact */ false);
+ handle_cache_->GetObjectClassHandle(), /* is_exact= */ false);
- BoundTypeIn(obj, notNullBlock, /* start_instruction */ nullptr, object_rti);
+ BoundTypeIn(obj, notNullBlock, /* start_instruction= */ nullptr, object_rti);
}
// Returns true if one of the patterns below has been recognized. If so, the
@@ -538,10 +538,10 @@ void ReferenceTypePropagation::RTPVisitor::BoundTypeForIfInstanceOf(HBasicBlock*
{
ScopedObjectAccess soa(Thread::Current());
if (!class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes()) {
- class_rti = ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false);
+ class_rti = ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact= */ false);
}
}
- BoundTypeIn(obj, instanceOfTrueBlock, /* start_instruction */ nullptr, class_rti);
+ BoundTypeIn(obj, instanceOfTrueBlock, /* start_instruction= */ nullptr, class_rti);
}
void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* instr,
@@ -561,7 +561,7 @@ void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* inst
// Use a null loader, the target method is in a boot classpath dex file.
Handle<mirror::ClassLoader> loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
ArtMethod* method = cl->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
- dex_method_index, dex_cache, loader, /* referrer */ nullptr, kDirect);
+ dex_method_index, dex_cache, loader, /* referrer= */ nullptr, kDirect);
DCHECK(method != nullptr);
ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
DCHECK(declaring_class != nullptr);
@@ -571,7 +571,7 @@ void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* inst
<< "Expected String.<init>: " << method->PrettyMethod();
}
instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact */ true));
+ ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact= */ true));
} else if (IsAdmissible(klass)) {
ReferenceTypeInfo::TypeHandle handle = handle_cache_->NewHandle(klass);
is_exact = is_exact || handle->CannotBeAssignedFromOtherTypes();
@@ -600,12 +600,12 @@ void ReferenceTypePropagation::RTPVisitor::UpdateReferenceTypeInfo(HInstruction*
void ReferenceTypePropagation::RTPVisitor::VisitNewInstance(HNewInstance* instr) {
ScopedObjectAccess soa(Thread::Current());
- SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact */ true);
+ SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact= */ true);
}
void ReferenceTypePropagation::RTPVisitor::VisitNewArray(HNewArray* instr) {
ScopedObjectAccess soa(Thread::Current());
- SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact */ true);
+ SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact= */ true);
}
void ReferenceTypePropagation::RTPVisitor::VisitParameterValue(HParameterValue* instr) {
@@ -614,7 +614,7 @@ void ReferenceTypePropagation::RTPVisitor::VisitParameterValue(HParameterValue*
UpdateReferenceTypeInfo(instr,
instr->GetTypeIndex(),
instr->GetDexFile(),
- /* is_exact */ false);
+ /* is_exact= */ false);
}
}
@@ -632,7 +632,7 @@ void ReferenceTypePropagation::RTPVisitor::UpdateFieldAccessTypeInfo(HInstructio
klass = info.GetField()->LookupResolvedType();
}
- SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
+ SetClassAsTypeInfo(instr, klass, /* is_exact= */ false);
}
void ReferenceTypePropagation::RTPVisitor::VisitInstanceFieldGet(HInstanceFieldGet* instr) {
@@ -665,7 +665,7 @@ void ReferenceTypePropagation::RTPVisitor::VisitLoadClass(HLoadClass* instr) {
instr->SetValidLoadedClassRTI();
}
instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(handle_cache_->GetClassClassHandle(), /* is_exact */ true));
+ ReferenceTypeInfo::Create(handle_cache_->GetClassClassHandle(), /* is_exact= */ true));
}
void ReferenceTypePropagation::RTPVisitor::VisitInstanceOf(HInstanceOf* instr) {
@@ -682,31 +682,31 @@ void ReferenceTypePropagation::RTPVisitor::VisitClinitCheck(HClinitCheck* instr)
void ReferenceTypePropagation::RTPVisitor::VisitLoadMethodHandle(HLoadMethodHandle* instr) {
instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(
handle_cache_->GetMethodHandleClassHandle(),
- /* is_exact */ true));
+ /* is_exact= */ true));
}
void ReferenceTypePropagation::RTPVisitor::VisitLoadMethodType(HLoadMethodType* instr) {
instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(handle_cache_->GetMethodTypeClassHandle(), /* is_exact */ true));
+ ReferenceTypeInfo::Create(handle_cache_->GetMethodTypeClassHandle(), /* is_exact= */ true));
}
void ReferenceTypePropagation::RTPVisitor::VisitLoadString(HLoadString* instr) {
instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact */ true));
+ ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact= */ true));
}
void ReferenceTypePropagation::RTPVisitor::VisitLoadException(HLoadException* instr) {
DCHECK(instr->GetBlock()->IsCatchBlock());
TryCatchInformation* catch_info = instr->GetBlock()->GetTryCatchInformation();
- if (catch_info->IsCatchAllTypeIndex()) {
- instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(handle_cache_->GetThrowableClassHandle(), /* is_exact */ false));
- } else {
+ if (catch_info->IsValidTypeIndex()) {
UpdateReferenceTypeInfo(instr,
catch_info->GetCatchTypeIndex(),
catch_info->GetCatchDexFile(),
- /* is_exact */ false);
+ /* is_exact= */ false);
+ } else {
+ instr->SetReferenceTypeInfo(
+ ReferenceTypeInfo::Create(handle_cache_->GetThrowableClassHandle(), /* is_exact= */ false));
}
}
@@ -736,7 +736,7 @@ void ReferenceTypePropagation::RTPVisitor::VisitBoundType(HBoundType* instr) {
// bound type is dead. To not confuse potential other optimizations, we mark
// the bound as non-exact.
instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false));
+ ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact= */ false));
}
} else {
// Object not typed yet. Leave BoundType untyped for now rather than
@@ -914,7 +914,7 @@ void ReferenceTypePropagation::RTPVisitor::VisitInvoke(HInvoke* instr) {
ScopedObjectAccess soa(Thread::Current());
ArtMethod* method = instr->GetResolvedMethod();
ObjPtr<mirror::Class> klass = (method == nullptr) ? nullptr : method->LookupResolvedReturnType();
- SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
+ SetClassAsTypeInfo(instr, klass, /* is_exact= */ false);
}
void ReferenceTypePropagation::RTPVisitor::VisitArrayGet(HArrayGet* instr) {
@@ -947,7 +947,7 @@ void ReferenceTypePropagation::RTPVisitor::UpdateBoundType(HBoundType* instr) {
// bound type is dead. To not confuse potential other optimizations, we mark
// the bound as non-exact.
instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(upper_bound_rti.GetTypeHandle(), /* is_exact */ false));
+ ReferenceTypeInfo::Create(upper_bound_rti.GetTypeHandle(), /* is_exact= */ false));
}
}
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
index 27f9ac3990..b1f0a1add9 100644
--- a/compiler/optimizing/register_allocation_resolver.cc
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -280,16 +280,16 @@ size_t RegisterAllocationResolver::CalculateMaximumSafepointSpillSize(
LocationSummary* locations = instruction->GetLocations();
if (locations->OnlyCallsOnSlowPath()) {
size_t core_spills =
- codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ true);
+ codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers= */ true);
size_t fp_spills =
- codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ false);
+ codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers= */ false);
size_t spill_size =
core_register_spill_size * core_spills + fp_register_spill_size * fp_spills;
maximum_safepoint_spill_size = std::max(maximum_safepoint_spill_size, spill_size);
} else if (locations->CallsOnMainAndSlowPath()) {
// Nothing to spill on the slow path if the main path already clobbers caller-saves.
- DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ true));
- DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ false));
+ DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers= */ true));
+ DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers= */ false));
}
}
return maximum_safepoint_spill_size;
diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc
index 1e00003701..0d6c5a3eff 100644
--- a/compiler/optimizing/register_allocator_linear_scan.cc
+++ b/compiler/optimizing/register_allocator_linear_scan.cc
@@ -252,7 +252,7 @@ void RegisterAllocatorLinearScan::ProcessInstruction(HInstruction* instruction)
temp_intervals_.push_back(interval);
interval->AddTempUse(instruction, i);
if (codegen_->NeedsTwoRegisters(DataType::Type::kFloat64)) {
- interval->AddHighInterval(/* is_temp */ true);
+ interval->AddHighInterval(/* is_temp= */ true);
LiveInterval* high = interval->GetHighInterval();
temp_intervals_.push_back(high);
unhandled_fp_intervals_.push_back(high);
@@ -284,7 +284,7 @@ void RegisterAllocatorLinearScan::ProcessInstruction(HInstruction* instruction)
}
if (locations->WillCall()) {
- BlockRegisters(position, position + 1, /* caller_save_only */ true);
+ BlockRegisters(position, position + 1, /* caller_save_only= */ true);
}
for (size_t i = 0; i < locations->GetInputCount(); ++i) {
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index db6a760007..79eb082cd7 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -68,11 +68,11 @@ class RegisterAllocatorTest : public OptimizingUnitTest {
bool ValidateIntervals(const ScopedArenaVector<LiveInterval*>& intervals,
const CodeGenerator& codegen) {
return RegisterAllocator::ValidateIntervals(ArrayRef<LiveInterval* const>(intervals),
- /* number_of_spill_slots */ 0u,
- /* number_of_out_slots */ 0u,
+ /* number_of_spill_slots= */ 0u,
+ /* number_of_out_slots= */ 0u,
codegen,
- /* processing_core_registers */ true,
- /* log_fatal_on_failure */ false);
+ /* processing_core_registers= */ true,
+ /* log_fatal_on_failure= */ false);
}
};
@@ -872,9 +872,9 @@ TEST_F(RegisterAllocatorTest, SpillInactive) {
// Create an interval with lifetime holes.
static constexpr size_t ranges1[][2] = {{0, 2}, {4, 6}, {8, 10}};
LiveInterval* first = BuildInterval(ranges1, arraysize(ranges1), GetScopedAllocator(), -1, one);
- first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 8));
- first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 7));
- first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 6));
+ first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 8));
+ first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 7));
+ first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 6));
locations = new (GetAllocator()) LocationSummary(first->GetDefinedBy(), LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
@@ -895,9 +895,9 @@ TEST_F(RegisterAllocatorTest, SpillInactive) {
// before lifetime position 6 yet.
static constexpr size_t ranges3[][2] = {{2, 4}, {8, 10}};
LiveInterval* third = BuildInterval(ranges3, arraysize(ranges3), GetScopedAllocator(), -1, three);
- third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 8));
- third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 4));
- third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 3));
+ third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 8));
+ third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 4));
+ third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 3));
locations = new (GetAllocator()) LocationSummary(third->GetDefinedBy(), LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
third = third->SplitAt(3);
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index df897a4904..fdef45ec8b 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -680,7 +680,7 @@ static void MoveAfterInBlock(HInstruction* instruction, HInstruction* cursor) {
DCHECK_NE(cursor, cursor->GetBlock()->GetLastInstruction());
DCHECK(!instruction->IsControlFlow());
DCHECK(!cursor->IsControlFlow());
- instruction->MoveBefore(cursor->GetNext(), /* do_checks */ false);
+ instruction->MoveBefore(cursor->GetNext(), /* do_checks= */ false);
}
void HScheduler::Schedule(HInstruction* instruction) {
diff --git a/compiler/optimizing/scheduler_arm.cc b/compiler/optimizing/scheduler_arm.cc
index d89d1171a1..858a555e97 100644
--- a/compiler/optimizing/scheduler_arm.cc
+++ b/compiler/optimizing/scheduler_arm.cc
@@ -563,7 +563,7 @@ void SchedulingLatencyVisitorARM::HandleGenerateDataProc(HDataProcWithShifterOp*
last_visited_internal_latency_ = kArmIntegerOpLatency;
last_visited_latency_ = kArmIntegerOpLatency;
} else {
- HandleGenerateDataProcInstruction(/* internal_latency */ true);
+ HandleGenerateDataProcInstruction(/* internal_latency= */ true);
HandleGenerateDataProcInstruction();
}
}
@@ -585,8 +585,8 @@ void SchedulingLatencyVisitorARM::HandleGenerateLongDataProc(HDataProcWithShifte
DCHECK_LT(shift_value, 32U);
if (kind == HInstruction::kOr || kind == HInstruction::kXor) {
- HandleGenerateDataProcInstruction(/* internal_latency */ true);
- HandleGenerateDataProcInstruction(/* internal_latency */ true);
+ HandleGenerateDataProcInstruction(/* internal_latency= */ true);
+ HandleGenerateDataProcInstruction(/* internal_latency= */ true);
HandleGenerateDataProcInstruction();
} else {
last_visited_internal_latency_ += 2 * kArmIntegerOpLatency;
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index 981fcc42a7..e0e265a04c 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -148,7 +148,7 @@ class SchedulerTest : public OptimizingUnitTest {
SchedulingGraph scheduling_graph(scheduler,
GetScopedAllocator(),
- /* heap_location_collector */ nullptr);
+ /* heap_location_collector= */ nullptr);
// Instructions must be inserted in reverse order into the scheduling graph.
for (HInstruction* instr : ReverseRange(block_instructions)) {
scheduling_graph.AddNode(instr);
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 5c2f57e314..885a08d459 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -35,22 +35,6 @@
namespace art {
-bool HSharpening::Run() {
- // We don't care about the order of the blocks here.
- for (HBasicBlock* block : graph_->GetReversePostOrder()) {
- for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* instruction = it.Current();
- if (instruction->IsInvokeStaticOrDirect()) {
- SharpenInvokeStaticOrDirect(instruction->AsInvokeStaticOrDirect(), codegen_);
- }
- // TODO: Move the sharpening of invoke-virtual/-interface/-super from HGraphBuilder
- // here. Rewrite it to avoid the CompilerDriver's reliance on verifier data
- // because we know the type better when inlining.
- }
- }
- return true;
-}
-
static bool IsInBootImage(ArtMethod* method) {
const std::vector<gc::space::ImageSpace*>& image_spaces =
Runtime::Current()->GetHeap()->GetBootImageSpaces();
@@ -72,17 +56,14 @@ static bool BootImageAOTCanEmbedMethod(ArtMethod* method, const CompilerOptions&
return compiler_options.IsImageClass(dex_file.StringByTypeIdx(klass->GetDexTypeIndex()));
}
-void HSharpening::SharpenInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke,
- CodeGenerator* codegen) {
- if (invoke->IsStringInit()) {
- // Not using the dex cache arrays. But we could still try to use a better dispatch...
- // TODO: Use direct_method and direct_code for the appropriate StringFactory method.
- return;
+HInvokeStaticOrDirect::DispatchInfo HSharpening::SharpenInvokeStaticOrDirect(
+ ArtMethod* callee, CodeGenerator* codegen) {
+ if (kIsDebugBuild) {
+ ScopedObjectAccess soa(Thread::Current()); // Required for GetDeclaringClass below.
+ DCHECK(callee != nullptr);
+ DCHECK(!(callee->IsConstructor() && callee->GetDeclaringClass()->IsStringClass()));
}
- ArtMethod* callee = invoke->GetResolvedMethod();
- DCHECK(callee != nullptr);
-
HInvokeStaticOrDirect::MethodLoadKind method_load_kind;
HInvokeStaticOrDirect::CodePtrLocation code_ptr_location;
uint64_t method_load_data = 0u;
@@ -141,9 +122,7 @@ void HSharpening::SharpenInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke,
HInvokeStaticOrDirect::DispatchInfo desired_dispatch_info = {
method_load_kind, code_ptr_location, method_load_data
};
- HInvokeStaticOrDirect::DispatchInfo dispatch_info =
- codegen->GetSupportedInvokeStaticOrDirectDispatch(desired_dispatch_info, invoke);
- invoke->SetDispatchInfo(dispatch_info);
+ return codegen->GetSupportedInvokeStaticOrDirectDispatch(desired_dispatch_info, callee);
}
HLoadClass::LoadKind HSharpening::ComputeLoadClassKind(
@@ -254,7 +233,7 @@ static inline bool CanUseTypeCheckBitstring(ObjPtr<mirror::Class> klass, CodeGen
// Try to assign a type check bitstring.
MutexLock subtype_check_lock(Thread::Current(), *Locks::subtype_check_lock_);
- if ((false) && // FIXME: Inliner does not respect CompilerDriver::IsClassToCompile()
+ if ((false) && // FIXME: Inliner does not respect CompilerDriver::ShouldCompileMethod()
// and we're hitting an unassigned bitstring in dex2oat_image_test. b/26687569
kIsDebugBuild &&
codegen->GetCompilerOptions().IsBootImage() &&
diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h
index dc55eea683..b81867201f 100644
--- a/compiler/optimizing/sharpening.h
+++ b/compiler/optimizing/sharpening.h
@@ -25,24 +25,13 @@ namespace art {
class CodeGenerator;
class DexCompilationUnit;
-// Optimization that tries to improve the way we dispatch methods and access types,
-// fields, etc. Besides actual method sharpening based on receiver type (for example
-// virtual->direct), this includes selecting the best available dispatch for
-// invoke-static/-direct based on code generator support.
-class HSharpening : public HOptimization {
+// Utility methods that try to improve the way we dispatch methods, and access
+// types and strings.
+class HSharpening {
public:
- HSharpening(HGraph* graph,
- CodeGenerator* codegen,
- const char* name = kSharpeningPassName)
- : HOptimization(graph, name),
- codegen_(codegen) { }
-
- bool Run() override;
-
- static constexpr const char* kSharpeningPassName = "sharpening";
-
- // Used by Sharpening and InstructionSimplifier.
- static void SharpenInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke, CodeGenerator* codegen);
+ // Used by the builder and InstructionSimplifier.
+ static HInvokeStaticOrDirect::DispatchInfo SharpenInvokeStaticOrDirect(
+ ArtMethod* callee, CodeGenerator* codegen);
// Used by the builder and the inliner.
static HLoadClass::LoadKind ComputeLoadClassKind(HLoadClass* load_class,
@@ -61,9 +50,6 @@ class HSharpening : public HOptimization {
CodeGenerator* codegen,
const DexCompilationUnit& dex_compilation_unit,
VariableSizedHandleScope* handles);
-
- private:
- CodeGenerator* codegen_;
};
} // namespace art
diff --git a/compiler/optimizing/side_effects_test.cc b/compiler/optimizing/side_effects_test.cc
index 4b0be07f3b..cf26e79c69 100644
--- a/compiler/optimizing/side_effects_test.cc
+++ b/compiler/optimizing/side_effects_test.cc
@@ -141,13 +141,13 @@ TEST(SideEffectsTest, NoDependences) {
TEST(SideEffectsTest, VolatileDependences) {
SideEffects volatile_write =
- SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile */ true);
+ SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile= */ true);
SideEffects any_write =
- SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile */ false);
+ SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile= */ false);
SideEffects volatile_read =
- SideEffects::FieldReadOfType(DataType::Type::kInt8, /* is_volatile */ true);
+ SideEffects::FieldReadOfType(DataType::Type::kInt8, /* is_volatile= */ true);
SideEffects any_read =
- SideEffects::FieldReadOfType(DataType::Type::kInt8, /* is_volatile */ false);
+ SideEffects::FieldReadOfType(DataType::Type::kInt8, /* is_volatile= */ false);
EXPECT_FALSE(volatile_write.MayDependOn(any_read));
EXPECT_TRUE(any_read.MayDependOn(volatile_write));
@@ -163,15 +163,15 @@ TEST(SideEffectsTest, VolatileDependences) {
TEST(SideEffectsTest, SameWidthTypesNoAlias) {
// Type I/F.
testNoWriteAndReadDependence(
- SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile */ false),
- SideEffects::FieldReadOfType(DataType::Type::kFloat32, /* is_volatile */ false));
+ SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile= */ false),
+ SideEffects::FieldReadOfType(DataType::Type::kFloat32, /* is_volatile= */ false));
testNoWriteAndReadDependence(
SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
SideEffects::ArrayReadOfType(DataType::Type::kFloat32));
// Type L/D.
testNoWriteAndReadDependence(
- SideEffects::FieldWriteOfType(DataType::Type::kInt64, /* is_volatile */ false),
- SideEffects::FieldReadOfType(DataType::Type::kFloat64, /* is_volatile */ false));
+ SideEffects::FieldWriteOfType(DataType::Type::kInt64, /* is_volatile= */ false),
+ SideEffects::FieldReadOfType(DataType::Type::kFloat64, /* is_volatile= */ false));
testNoWriteAndReadDependence(
SideEffects::ArrayWriteOfType(DataType::Type::kInt64),
SideEffects::ArrayReadOfType(DataType::Type::kFloat64));
@@ -181,9 +181,9 @@ TEST(SideEffectsTest, AllWritesAndReads) {
SideEffects s = SideEffects::None();
// Keep taking the union of different writes and reads.
for (DataType::Type type : kTestTypes) {
- s = s.Union(SideEffects::FieldWriteOfType(type, /* is_volatile */ false));
+ s = s.Union(SideEffects::FieldWriteOfType(type, /* is_volatile= */ false));
s = s.Union(SideEffects::ArrayWriteOfType(type));
- s = s.Union(SideEffects::FieldReadOfType(type, /* is_volatile */ false));
+ s = s.Union(SideEffects::FieldReadOfType(type, /* is_volatile= */ false));
s = s.Union(SideEffects::ArrayReadOfType(type));
}
EXPECT_TRUE(s.DoesAllReadWrite());
@@ -254,10 +254,10 @@ TEST(SideEffectsTest, BitStrings) {
"||I|||||",
SideEffects::ArrayReadOfType(DataType::Type::kInt32).ToString().c_str());
SideEffects s = SideEffects::None();
- s = s.Union(SideEffects::FieldWriteOfType(DataType::Type::kUint16, /* is_volatile */ false));
- s = s.Union(SideEffects::FieldWriteOfType(DataType::Type::kInt64, /* is_volatile */ false));
+ s = s.Union(SideEffects::FieldWriteOfType(DataType::Type::kUint16, /* is_volatile= */ false));
+ s = s.Union(SideEffects::FieldWriteOfType(DataType::Type::kInt64, /* is_volatile= */ false));
s = s.Union(SideEffects::ArrayWriteOfType(DataType::Type::kInt16));
- s = s.Union(SideEffects::FieldReadOfType(DataType::Type::kInt32, /* is_volatile */ false));
+ s = s.Union(SideEffects::FieldReadOfType(DataType::Type::kInt32, /* is_volatile= */ false));
s = s.Union(SideEffects::ArrayReadOfType(DataType::Type::kFloat32));
s = s.Union(SideEffects::ArrayReadOfType(DataType::Type::kFloat64));
EXPECT_STREQ("||DF|I||S|JC|", s.ToString().c_str());
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index dda29a1b4b..0d0e1ecf1f 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -16,6 +16,9 @@
#include "ssa_builder.h"
+#include "base/arena_bit_vector.h"
+#include "base/bit_vector-inl.h"
+#include "base/logging.h"
#include "data_type-inl.h"
#include "dex/bytecode_utils.h"
#include "mirror/class-inl.h"
@@ -388,7 +391,7 @@ bool SsaBuilder::FixAmbiguousArrayOps() {
// succeed in code validated by the verifier.
HInstruction* equivalent = GetFloatOrDoubleEquivalent(value, array_type);
DCHECK(equivalent != nullptr);
- aset->ReplaceInput(equivalent, /* input_index */ 2);
+ aset->ReplaceInput(equivalent, /* index= */ 2);
if (equivalent->IsPhi()) {
// Returned equivalent is a phi which may not have had its inputs
// replaced yet. We need to run primitive type propagation on it.
@@ -415,85 +418,36 @@ bool SsaBuilder::FixAmbiguousArrayOps() {
return true;
}
-static bool HasAliasInEnvironments(HInstruction* instruction) {
- HEnvironment* last_user = nullptr;
+bool SsaBuilder::HasAliasInEnvironments(HInstruction* instruction) {
+ ScopedArenaHashSet<size_t> seen_users(
+ local_allocator_->Adapter(kArenaAllocGraphBuilder));
for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) {
DCHECK(use.GetUser() != nullptr);
- // Note: The first comparison (== null) always fails.
- if (use.GetUser() == last_user) {
+ size_t id = use.GetUser()->GetHolder()->GetId();
+ if (seen_users.find(id) != seen_users.end()) {
return true;
}
- last_user = use.GetUser();
- }
-
- if (kIsDebugBuild) {
- // Do a quadratic search to ensure same environment uses are next
- // to each other.
- const HUseList<HEnvironment*>& env_uses = instruction->GetEnvUses();
- for (auto current = env_uses.begin(), end = env_uses.end(); current != end; ++current) {
- auto next = current;
- for (++next; next != end; ++next) {
- DCHECK(next->GetUser() != current->GetUser());
- }
- }
+ seen_users.insert(id);
}
return false;
}
-void SsaBuilder::ReplaceUninitializedStringPhis() {
- ScopedArenaHashSet<HInstruction*> seen_instructions(
- local_allocator_->Adapter(kArenaAllocGraphBuilder));
- ScopedArenaVector<HInstruction*> worklist(local_allocator_->Adapter(kArenaAllocGraphBuilder));
-
- // Iterate over all inputs and uses of the phi, recursively, until all related instructions
- // have been visited.
- for (const auto& pair : uninitialized_string_phis_) {
- HPhi* string_phi = pair.first;
- HInvoke* invoke = pair.second;
- worklist.push_back(string_phi);
- HNewInstance* found_instance = nullptr;
- do {
- HInstruction* current = worklist.back();
- worklist.pop_back();
- if (seen_instructions.find(current) != seen_instructions.end()) {
- continue;
- }
- seen_instructions.insert(current);
- if (current->IsNewInstance()) {
- // If it is the first time we see the allocation, replace its uses. We don't register
- // it through `RemoveRedundantUninitializedStrings`, as that method makes assumption about
- // aliasing and environment uses that don't hold when the string escapes to phis.
- // Note that this also means we will keep the (useless) allocation.
- if (found_instance == nullptr) {
- found_instance = current->AsNewInstance();
- } else {
- DCHECK(found_instance == current);
- }
- } else if (current->IsPhi()) {
- // Push all inputs to the worklist. Those should be Phis or NewInstance.
- for (HInstruction* input : current->GetInputs()) {
- DCHECK(input->IsPhi() || input->IsNewInstance()) << input->DebugName();
- worklist.push_back(input);
- }
- } else {
- // The verifier prevents any other DEX uses of the uninitialized string.
- DCHECK(current->IsEqual() || current->IsNotEqual());
- continue;
- }
- current->ReplaceUsesDominatedBy(invoke, invoke);
- current->ReplaceEnvUsesDominatedBy(invoke, invoke);
- // Push all users to the worklist. Now that we have replaced
- // the uses dominated by the invokes, the remaining users should only
- // be Phi, or Equal/NotEqual.
- for (const HUseListNode<HInstruction*>& use : current->GetUses()) {
- HInstruction* user = use.GetUser();
- DCHECK(user->IsPhi() || user->IsEqual() || user->IsNotEqual()) << user->DebugName();
- worklist.push_back(user);
- }
- } while (!worklist.empty());
- seen_instructions.clear();
- DCHECK(found_instance != nullptr);
+bool SsaBuilder::ReplaceUninitializedStringPhis() {
+ for (HInvoke* invoke : uninitialized_string_phis_) {
+ HInstruction* str = invoke->InputAt(invoke->InputCount() - 1);
+ if (str->IsPhi()) {
+ // If after redundant phi and dead phi elimination, it's still a phi that feeds
+ // the invoke, then we must be compiling a method with irreducible loops. Just bail.
+ DCHECK(graph_->HasIrreducibleLoops());
+ return false;
+ }
+ DCHECK(str->IsNewInstance());
+ AddUninitializedString(str->AsNewInstance());
+ str->ReplaceUsesDominatedBy(invoke, invoke);
+ str->ReplaceEnvUsesDominatedBy(invoke, invoke);
+ invoke->RemoveInputAt(invoke->InputCount() - 1);
}
+ return true;
}
void SsaBuilder::RemoveRedundantUninitializedStrings() {
@@ -508,8 +462,9 @@ void SsaBuilder::RemoveRedundantUninitializedStrings() {
DCHECK(new_instance->IsStringAlloc());
// Replace NewInstance of String with NullConstant if not used prior to
- // calling StringFactory. In case of deoptimization, the interpreter is
- // expected to skip null check on the `this` argument of the StringFactory call.
+ // calling StringFactory. We check for alias environments in case of deoptimization.
+ // The interpreter is expected to skip null check on the `this` argument of the
+ // StringFactory call.
if (!new_instance->HasNonEnvironmentUses() && !HasAliasInEnvironments(new_instance)) {
new_instance->ReplaceWith(graph_->GetNullConstant());
new_instance->GetBlock()->RemoveInstruction(new_instance);
@@ -544,11 +499,6 @@ void SsaBuilder::RemoveRedundantUninitializedStrings() {
GraphAnalysisResult SsaBuilder::BuildSsa() {
DCHECK(!graph_->IsInSsaForm());
- // Replace Phis that feed in a String.<init>, as well as their aliases, with
- // the actual String allocation invocation. We do this first, as the phis stored in
- // the data structure might get removed from the graph in later stages during `BuildSsa`.
- ReplaceUninitializedStringPhis();
-
// Propagate types of phis. At this point, phis are typed void in the general
// case, or float/double/reference if we created an equivalent phi. So we need
// to propagate the types across phis to give them a correct type. If a type
@@ -575,7 +525,7 @@ GraphAnalysisResult SsaBuilder::BuildSsa() {
class_loader_,
dex_cache_,
handles_,
- /* is_first_run */ true).Run();
+ /* is_first_run= */ true).Run();
// HInstructionBuilder duplicated ArrayGet instructions with ambiguous type
// (int/float or long/double) and marked ArraySets with ambiguous input type.
@@ -607,6 +557,14 @@ GraphAnalysisResult SsaBuilder::BuildSsa() {
// input types.
dead_phi_elimimation.EliminateDeadPhis();
+ // Replace Phis that feed in a String.<init> during instruction building. We
+ // run this after redundant and dead phi elimination to make sure the phi will have
+ // been replaced by the actual allocation. Only with an irreducible loop
+ // a phi can still be the input, in which case we bail.
+ if (!ReplaceUninitializedStringPhis()) {
+ return kAnalysisFailIrreducibleLoopAndStringInit;
+ }
+
// HInstructionBuidler replaced uses of NewInstances of String with the
// results of their corresponding StringFactory calls. Unless the String
// objects are used before they are initialized, they can be replaced with
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index 765544508e..bb892c9304 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -97,8 +97,8 @@ class SsaBuilder : public ValueObject {
}
}
- void AddUninitializedStringPhi(HPhi* phi, HInvoke* invoke) {
- uninitialized_string_phis_.push_back(std::make_pair(phi, invoke));
+ void AddUninitializedStringPhi(HInvoke* invoke) {
+ uninitialized_string_phis_.push_back(invoke);
}
private:
@@ -123,7 +123,8 @@ class SsaBuilder : public ValueObject {
HArrayGet* GetFloatOrDoubleEquivalentOfArrayGet(HArrayGet* aget);
void RemoveRedundantUninitializedStrings();
- void ReplaceUninitializedStringPhis();
+ bool ReplaceUninitializedStringPhis();
+ bool HasAliasInEnvironments(HInstruction* instruction);
HGraph* const graph_;
Handle<mirror::ClassLoader> class_loader_;
@@ -137,7 +138,7 @@ class SsaBuilder : public ValueObject {
ScopedArenaVector<HArrayGet*> ambiguous_agets_;
ScopedArenaVector<HArraySet*> ambiguous_asets_;
ScopedArenaVector<HNewInstance*> uninitialized_strings_;
- ScopedArenaVector<std::pair<HPhi*, HInvoke*>> uninitialized_string_phis_;
+ ScopedArenaVector<HInvoke*> uninitialized_string_phis_;
DISALLOW_COPY_AND_ASSIGN(SsaBuilder);
};
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 62a70d6b12..7b2c3a939c 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -120,7 +120,7 @@ void SsaLivenessAnalysis::RecursivelyProcessInputs(HInstruction* current,
DCHECK(input->HasSsaIndex());
// `input` generates a result used by `current`. Add use and update
// the live-in set.
- input->GetLiveInterval()->AddUse(current, /* environment */ nullptr, i, actual_user);
+ input->GetLiveInterval()->AddUse(current, /* environment= */ nullptr, i, actual_user);
live_in->SetBit(input->GetSsaIndex());
} else if (has_out_location) {
// `input` generates a result but it is not used by `current`.
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 92d0b08301..c88390775c 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -1155,10 +1155,11 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
*
* (a) Non-environment uses of an instruction always make
* the instruction live.
- * (b) Environment uses of an instruction whose type is
- * object (that is, non-primitive), make the instruction live.
- * This is due to having to keep alive objects that have
- * finalizers deleting native objects.
+ * (b) Environment uses of an instruction whose type is object (that is, non-primitive), make the
+ * instruction live, unless the class has an @DeadReferenceSafe annotation.
+ * This avoids unexpected premature reference enqueuing or finalization, which could
+ * result in premature deletion of native objects. In the presence of @DeadReferenceSafe,
+ * object references are treated like primitive types.
* (c) When the graph has the debuggable property, environment uses
* of an instruction that has a primitive type make the instruction live.
* If the graph does not have the debuggable property, the environment
@@ -1287,6 +1288,7 @@ class SsaLivenessAnalysis : public ValueObject {
// When compiling in OSR mode, all loops in the compiled method may be entered
// from the interpreter via SuspendCheck; thus we need to preserve the environment.
if (env_holder->IsSuspendCheck() && graph->IsCompilingOsr()) return true;
+ if (graph -> IsDeadReferenceSafe()) return false;
return instruction->GetType() == DataType::Type::kReference;
}
diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc
index 4b525531da..352c44f63a 100644
--- a/compiler/optimizing/ssa_liveness_analysis_test.cc
+++ b/compiler/optimizing/ssa_liveness_analysis_test.cc
@@ -94,25 +94,25 @@ TEST_F(SsaLivenessAnalysisTest, TestAput) {
HInstruction* null_check = new (GetAllocator()) HNullCheck(array, 0);
block->AddInstruction(null_check);
HEnvironment* null_check_env = new (GetAllocator()) HEnvironment(GetAllocator(),
- /* number_of_vregs */ 5,
- /* method */ nullptr,
- /* dex_pc */ 0u,
+ /* number_of_vregs= */ 5,
+ /* method= */ nullptr,
+ /* dex_pc= */ 0u,
null_check);
null_check_env->CopyFrom(ArrayRef<HInstruction* const>(args));
null_check->SetRawEnvironment(null_check_env);
HInstruction* length = new (GetAllocator()) HArrayLength(array, 0);
block->AddInstruction(length);
- HInstruction* bounds_check = new (GetAllocator()) HBoundsCheck(index, length, /* dex_pc */ 0u);
+ HInstruction* bounds_check = new (GetAllocator()) HBoundsCheck(index, length, /* dex_pc= */ 0u);
block->AddInstruction(bounds_check);
HEnvironment* bounds_check_env = new (GetAllocator()) HEnvironment(GetAllocator(),
- /* number_of_vregs */ 5,
- /* method */ nullptr,
- /* dex_pc */ 0u,
+ /* number_of_vregs= */ 5,
+ /* method= */ nullptr,
+ /* dex_pc= */ 0u,
bounds_check);
bounds_check_env->CopyFrom(ArrayRef<HInstruction* const>(args));
bounds_check->SetRawEnvironment(bounds_check_env);
HInstruction* array_set =
- new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
+ new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc= */ 0);
block->AddInstruction(array_set);
graph_->BuildDominatorTree();
@@ -163,9 +163,9 @@ TEST_F(SsaLivenessAnalysisTest, TestDeoptimize) {
HInstruction* null_check = new (GetAllocator()) HNullCheck(array, 0);
block->AddInstruction(null_check);
HEnvironment* null_check_env = new (GetAllocator()) HEnvironment(GetAllocator(),
- /* number_of_vregs */ 5,
- /* method */ nullptr,
- /* dex_pc */ 0u,
+ /* number_of_vregs= */ 5,
+ /* method= */ nullptr,
+ /* dex_pc= */ 0u,
null_check);
null_check_env->CopyFrom(ArrayRef<HInstruction* const>(args));
null_check->SetRawEnvironment(null_check_env);
@@ -175,17 +175,17 @@ TEST_F(SsaLivenessAnalysisTest, TestDeoptimize) {
HInstruction* ae = new (GetAllocator()) HAboveOrEqual(index, length);
block->AddInstruction(ae);
HInstruction* deoptimize = new(GetAllocator()) HDeoptimize(
- GetAllocator(), ae, DeoptimizationKind::kBlockBCE, /* dex_pc */ 0u);
+ GetAllocator(), ae, DeoptimizationKind::kBlockBCE, /* dex_pc= */ 0u);
block->AddInstruction(deoptimize);
HEnvironment* deoptimize_env = new (GetAllocator()) HEnvironment(GetAllocator(),
- /* number_of_vregs */ 5,
- /* method */ nullptr,
- /* dex_pc */ 0u,
+ /* number_of_vregs= */ 5,
+ /* method= */ nullptr,
+ /* dex_pc= */ 0u,
deoptimize);
deoptimize_env->CopyFrom(ArrayRef<HInstruction* const>(args));
deoptimize->SetRawEnvironment(deoptimize_env);
HInstruction* array_set =
- new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
+ new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc= */ 0);
block->AddInstruction(array_set);
graph_->BuildDominatorTree();
diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc
index 5370f43b4f..3fcb72e4fb 100644
--- a/compiler/optimizing/ssa_phi_elimination.cc
+++ b/compiler/optimizing/ssa_phi_elimination.cc
@@ -141,7 +141,7 @@ bool SsaRedundantPhiElimination::Run() {
ArenaBitVector visited_phis_in_cycle(&allocator,
graph_->GetCurrentInstructionId(),
- /* expandable */ false,
+ /* expandable= */ false,
kArenaAllocSsaPhiElimination);
visited_phis_in_cycle.ClearAllBits();
ScopedArenaVector<HPhi*> cycle_worklist(allocator.Adapter(kArenaAllocSsaPhiElimination));
diff --git a/compiler/optimizing/superblock_cloner.h b/compiler/optimizing/superblock_cloner.h
index f21172131b..dbe9008e92 100644
--- a/compiler/optimizing/superblock_cloner.h
+++ b/compiler/optimizing/superblock_cloner.h
@@ -372,8 +372,8 @@ class PeelUnrollHelper : public ValueObject {
// Returns whether the loop can be peeled/unrolled.
bool IsLoopClonable() const { return cloner_.IsSubgraphClonable(); }
- HBasicBlock* DoPeeling() { return DoPeelUnrollImpl(/* to_unroll */ false); }
- HBasicBlock* DoUnrolling() { return DoPeelUnrollImpl(/* to_unroll */ true); }
+ HBasicBlock* DoPeeling() { return DoPeelUnrollImpl(/* to_unroll= */ false); }
+ HBasicBlock* DoUnrolling() { return DoPeelUnrollImpl(/* to_unroll= */ true); }
HLoopInformation* GetRegionToBeAdjusted() const { return cloner_.GetRegionToBeAdjusted(); }
protected: