summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/Android.bp64
-rw-r--r--compiler/cfi_test.h8
-rw-r--r--compiler/common_compiler_test.cc62
-rw-r--r--compiler/common_compiler_test.h8
-rw-r--r--compiler/compiled_method.cc36
-rw-r--r--compiler/compiled_method.h17
-rw-r--r--compiler/compiler.cc3
-rw-r--r--compiler/compiler.h11
-rw-r--r--compiler/debug/dwarf/dwarf_test.cc2
-rw-r--r--compiler/debug/dwarf/headers.h28
-rw-r--r--compiler/debug/elf_debug_info_writer.h37
-rw-r--r--compiler/debug/elf_debug_line_writer.h14
-rw-r--r--compiler/debug/elf_debug_reader.h104
-rw-r--r--compiler/debug/elf_debug_writer.cc244
-rw-r--r--compiler/debug/elf_debug_writer.h9
-rw-r--r--compiler/debug/method_debug_info.h2
-rw-r--r--compiler/debug/xz_utils.cc63
-rw-r--r--compiler/debug/xz_utils.h3
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc21
-rw-r--r--compiler/dex/dex_to_dex_compiler.h13
-rw-r--r--compiler/dex/dex_to_dex_decompiler_test.cc11
-rw-r--r--compiler/dex/inline_method_analyser.cc10
-rw-r--r--compiler/dex/quick_compiler_callbacks.h5
-rw-r--r--compiler/dex/verification_results.cc12
-rw-r--r--compiler/dex/verification_results.h12
-rw-r--r--compiler/dex/verified_method.cc6
-rw-r--r--compiler/driver/compiled_method_storage_test.cc20
-rw-r--r--compiler/driver/compiler_driver-inl.h26
-rw-r--r--compiler/driver/compiler_driver.cc496
-rw-r--r--compiler/driver/compiler_driver.h155
-rw-r--r--compiler/driver/compiler_driver_test.cc20
-rw-r--r--compiler/driver/compiler_options.cc52
-rw-r--r--compiler/driver/compiler_options.h92
-rw-r--r--compiler/driver/compiler_options_map-inl.h34
-rw-r--r--compiler/driver/compiler_options_map.def6
-rw-r--r--compiler/driver/compiler_options_map.h2
-rw-r--r--compiler/driver/dex_compilation_unit.cc40
-rw-r--r--compiler/driver/dex_compilation_unit.h56
-rw-r--r--compiler/driver/simple_compiler_options_map.h2
-rw-r--r--compiler/exception_test.cc10
-rw-r--r--compiler/jit/jit_compiler.cc150
-rw-r--r--compiler/jit/jit_compiler.h10
-rw-r--r--compiler/jni/jni_cfi_test.cc2
-rw-r--r--compiler/jni/jni_compiler_test.cc16
-rw-r--r--compiler/jni/quick/arm/calling_convention_arm.cc3
-rw-r--r--compiler/jni/quick/arm64/calling_convention_arm64.cc3
-rw-r--r--compiler/jni/quick/calling_convention.cc2
-rw-r--r--compiler/jni/quick/calling_convention.h2
-rw-r--r--compiler/jni/quick/jni_compiler.cc16
-rw-r--r--compiler/jni/quick/mips/calling_convention_mips.cc3
-rw-r--r--compiler/jni/quick/mips/calling_convention_mips.h2
-rw-r--r--compiler/jni/quick/mips64/calling_convention_mips64.cc3
-rw-r--r--compiler/jni/quick/x86/calling_convention_x86.cc3
-rw-r--r--compiler/jni/quick/x86_64/calling_convention_x86_64.cc1
-rw-r--r--compiler/linker/elf_builder.h63
-rw-r--r--compiler/linker/linker_patch.h8
-rw-r--r--compiler/optimizing/block_builder.cc30
-rw-r--r--compiler/optimizing/bounds_check_elimination.cc8
-rw-r--r--compiler/optimizing/bounds_check_elimination_test.cc15
-rw-r--r--compiler/optimizing/builder.cc12
-rw-r--r--compiler/optimizing/builder.h3
-rw-r--r--compiler/optimizing/code_generator.cc68
-rw-r--r--compiler/optimizing/code_generator.h11
-rw-r--r--compiler/optimizing/code_generator_arm64.cc164
-rw-r--r--compiler/optimizing/code_generator_arm64.h6
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc247
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h2
-rw-r--r--compiler/optimizing/code_generator_mips.cc130
-rw-r--r--compiler/optimizing/code_generator_mips.h2
-rw-r--r--compiler/optimizing/code_generator_mips64.cc104
-rw-r--r--compiler/optimizing/code_generator_mips64.h2
-rw-r--r--compiler/optimizing/code_generator_vector_arm64.cc72
-rw-r--r--compiler/optimizing/code_generator_vector_arm_vixl.cc10
-rw-r--r--compiler/optimizing/code_generator_vector_mips.cc22
-rw-r--r--compiler/optimizing/code_generator_vector_mips64.cc20
-rw-r--r--compiler/optimizing/code_generator_vector_x86.cc41
-rw-r--r--compiler/optimizing/code_generator_vector_x86_64.cc41
-rw-r--r--compiler/optimizing/code_generator_x86.cc140
-rw-r--r--compiler/optimizing/code_generator_x86.h5
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc206
-rw-r--r--compiler/optimizing/code_generator_x86_64.h5
-rw-r--r--compiler/optimizing/code_sinking.cc12
-rw-r--r--compiler/optimizing/codegen_test.cc27
-rw-r--r--compiler/optimizing/constant_folding_test.cc2
-rw-r--r--compiler/optimizing/constructor_fence_redundancy_elimination.cc2
-rw-r--r--compiler/optimizing/data_type.h15
-rw-r--r--compiler/optimizing/dead_code_elimination_test.cc2
-rw-r--r--compiler/optimizing/graph_checker.cc6
-rw-r--r--compiler/optimizing/graph_visualizer.cc21
-rw-r--r--compiler/optimizing/gvn.cc6
-rw-r--r--compiler/optimizing/induction_var_analysis.cc4
-rw-r--r--compiler/optimizing/induction_var_range.cc34
-rw-r--r--compiler/optimizing/induction_var_range_test.cc14
-rw-r--r--compiler/optimizing/inliner.cc148
-rw-r--r--compiler/optimizing/inliner.h5
-rw-r--r--compiler/optimizing/instruction_builder.cc310
-rw-r--r--compiler/optimizing/instruction_builder.h30
-rw-r--r--compiler/optimizing/instruction_simplifier.cc35
-rw-r--r--compiler/optimizing/instruction_simplifier_arm.cc9
-rw-r--r--compiler/optimizing/instruction_simplifier_arm64.cc4
-rw-r--r--compiler/optimizing/instruction_simplifier_shared.cc4
-rw-r--r--compiler/optimizing/instruction_simplifier_x86.cc88
-rw-r--r--compiler/optimizing/instruction_simplifier_x86.h44
-rw-r--r--compiler/optimizing/instruction_simplifier_x86_64.cc82
-rw-r--r--compiler/optimizing/instruction_simplifier_x86_64.h48
-rw-r--r--compiler/optimizing/instruction_simplifier_x86_shared.cc137
-rw-r--r--compiler/optimizing/instruction_simplifier_x86_shared.h29
-rw-r--r--compiler/optimizing/intrinsic_objects.cc3
-rw-r--r--compiler/optimizing/intrinsics.cc179
-rw-r--r--compiler/optimizing/intrinsics.h33
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc353
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc196
-rw-r--r--compiler/optimizing/intrinsics_mips.cc94
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc93
-rw-r--r--compiler/optimizing/intrinsics_x86.cc242
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc208
-rw-r--r--compiler/optimizing/load_store_elimination.cc2
-rw-r--r--compiler/optimizing/loop_optimization.cc134
-rw-r--r--compiler/optimizing/loop_optimization.h6
-rw-r--r--compiler/optimizing/loop_optimization_test.cc2
-rw-r--r--compiler/optimizing/nodes.cc98
-rw-r--r--compiler/optimizing/nodes.h103
-rw-r--r--compiler/optimizing/nodes_vector.h82
-rw-r--r--compiler/optimizing/nodes_vector_test.cc6
-rw-r--r--compiler/optimizing/nodes_x86.h86
-rw-r--r--compiler/optimizing/optimization.cc38
-rw-r--r--compiler/optimizing/optimization.h7
-rw-r--r--compiler/optimizing/optimizing_cfi_test.cc2
-rw-r--r--compiler/optimizing/optimizing_compiler.cc295
-rw-r--r--compiler/optimizing/optimizing_compiler.h7
-rw-r--r--compiler/optimizing/optimizing_compiler_stats.h4
-rw-r--r--compiler/optimizing/optimizing_unit_test.h12
-rw-r--r--compiler/optimizing/parallel_move_test.cc4
-rw-r--r--compiler/optimizing/pc_relative_fixups_x86.cc3
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.cc13
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.h1
-rw-r--r--compiler/optimizing/reference_type_propagation.cc52
-rw-r--r--compiler/optimizing/register_allocation_resolver.cc8
-rw-r--r--compiler/optimizing/register_allocator_linear_scan.cc4
-rw-r--r--compiler/optimizing/register_allocator_test.cc20
-rw-r--r--compiler/optimizing/scheduler.cc2
-rw-r--r--compiler/optimizing/scheduler_arm.cc6
-rw-r--r--compiler/optimizing/scheduler_test.cc2
-rw-r--r--compiler/optimizing/sharpening.cc37
-rw-r--r--compiler/optimizing/sharpening.h26
-rw-r--r--compiler/optimizing/side_effects_test.cc26
-rw-r--r--compiler/optimizing/ssa_builder.cc116
-rw-r--r--compiler/optimizing/ssa_builder.h9
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.cc2
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h10
-rw-r--r--compiler/optimizing/ssa_liveness_analysis_test.cc32
-rw-r--r--compiler/optimizing/ssa_phi_elimination.cc2
-rw-r--r--compiler/optimizing/superblock_cloner.h4
-rw-r--r--compiler/utils/arm/assembler_arm_vixl.cc2
-rw-r--r--compiler/utils/arm64/assembler_arm64.cc32
-rw-r--r--compiler/utils/arm64/assembler_arm64.h6
-rw-r--r--compiler/utils/assembler.h2
-rw-r--r--compiler/utils/assembler_test_base.h4
-rw-r--r--compiler/utils/assembler_thumb_test.cc6
-rw-r--r--compiler/utils/assembler_thumb_test_expected.cc.inc4
-rw-r--r--compiler/utils/mips/assembler_mips.cc78
-rw-r--r--compiler/utils/mips/assembler_mips.h14
-rw-r--r--compiler/utils/mips/assembler_mips32r5_test.cc14
-rw-r--r--compiler/utils/mips/assembler_mips32r6_test.cc82
-rw-r--r--compiler/utils/mips/assembler_mips_test.cc44
-rw-r--r--compiler/utils/mips64/assembler_mips64.cc56
-rw-r--r--compiler/utils/mips64/assembler_mips64.h10
-rw-r--r--compiler/utils/mips64/assembler_mips64_test.cc64
-rw-r--r--compiler/utils/x86/assembler_x86.cc169
-rw-r--r--compiler/utils/x86/assembler_x86.h10
-rw-r--r--compiler/utils/x86/assembler_x86_test.cc26
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc170
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h10
-rw-r--r--compiler/utils/x86_64/assembler_x86_64_test.cc30
-rw-r--r--compiler/verifier_deps_test.cc205
175 files changed, 5233 insertions, 3056 deletions
diff --git a/compiler/Android.bp b/compiler/Android.bp
index c36553779e..0ebaa5f56c 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -157,6 +157,8 @@ art_cc_defaults {
"optimizing/code_generator_x86.cc",
"optimizing/code_generator_vector_x86.cc",
"optimizing/intrinsics_x86.cc",
+ "optimizing/instruction_simplifier_x86_shared.cc",
+ "optimizing/instruction_simplifier_x86.cc",
"optimizing/pc_relative_fixups_x86.cc",
"optimizing/x86_memory_gen.cc",
"utils/x86/assembler_x86.cc",
@@ -168,6 +170,7 @@ art_cc_defaults {
srcs: [
"jni/quick/x86_64/calling_convention_x86_64.cc",
"optimizing/intrinsics_x86_64.cc",
+ "optimizing/instruction_simplifier_x86_64.cc",
"optimizing/code_generator_x86_64.cc",
"optimizing/code_generator_vector_x86_64.cc",
"utils/x86_64/assembler_x86_64.cc",
@@ -179,8 +182,6 @@ art_cc_defaults {
generated_sources: ["art_compiler_operator_srcs"],
shared_libs: [
"libbase",
- "libcutils", // for atrace.
- "liblzma",
],
include_dirs: ["art/disassembler"],
header_libs: [
@@ -191,6 +192,13 @@ art_cc_defaults {
export_include_dirs: ["."],
}
+cc_defaults {
+ name: "libart-compiler_static_base_defaults",
+ static_libs: [
+ "libbase",
+ ],
+}
+
gensrcs {
name: "art_compiler_operator_srcs",
cmd: "$(location generate_operator_out) art/compiler $(in) > $(out)",
@@ -221,12 +229,12 @@ art_cc_library {
// VIXL assembly support for ARM targets.
static: {
whole_static_libs: [
- "libvixl-arm",
+ "libvixl",
],
},
shared: {
shared_libs: [
- "libvixl-arm",
+ "libvixl",
],
},
},
@@ -234,21 +242,22 @@ art_cc_library {
// VIXL assembly support for ARM64 targets.
static: {
whole_static_libs: [
- "libvixl-arm64",
+ "libvixl",
],
},
shared: {
shared_libs: [
- "libvixl-arm64",
+ "libvixl",
],
},
},
},
shared_libs: [
"libart",
+ "libartbase",
+ "libartpalette",
"libprofile",
"libdexfile",
- "libartbase",
],
target: {
@@ -260,6 +269,18 @@ art_cc_library {
},
}
+cc_defaults {
+ name: "libart-compiler_static_defaults",
+ defaults: [
+ "libart-compiler_static_base_defaults",
+ "libart_static_defaults",
+ "libartbase_static_defaults",
+ "libdexfile_static_defaults",
+ "libprofile_static_defaults",
+ ],
+ static_libs: ["libart-compiler"],
+}
+
art_cc_library {
name: "libartd-compiler",
defaults: [
@@ -271,12 +292,12 @@ art_cc_library {
// VIXL assembly support for ARM targets.
static: {
whole_static_libs: [
- "libvixld-arm",
+ "libvixld",
],
},
shared: {
shared_libs: [
- "libvixld-arm",
+ "libvixld",
],
},
},
@@ -284,24 +305,37 @@ art_cc_library {
// VIXL assembly support for ARM64 targets.
static: {
whole_static_libs: [
- "libvixld-arm64",
+ "libvixld",
],
},
shared: {
shared_libs: [
- "libvixld-arm64",
+ "libvixld",
],
},
},
},
shared_libs: [
+ "libartbased",
"libartd",
+ "libartpalette",
"libprofiled",
"libdexfiled",
- "libartbased",
],
}
+cc_defaults {
+ name: "libartd-compiler_static_defaults",
+ defaults: [
+ "libart-compiler_static_base_defaults",
+ "libartd_static_defaults",
+ "libartbased_static_defaults",
+ "libdexfiled_static_defaults",
+ "libprofiled_static_defaults",
+ ],
+ static_libs: ["libartd-compiler"],
+}
+
art_cc_library {
name: "libart-compiler-gtest",
defaults: ["libart-gtest-defaults"],
@@ -418,8 +452,7 @@ art_cc_test {
"libprofiled",
"libartd-compiler",
"libartd-simulator-container",
- "libvixld-arm",
- "libvixld-arm64",
+ "libvixld",
"libbacktrace",
"libnativeloader",
@@ -476,7 +509,6 @@ art_cc_test {
},
shared_libs: [
"libartd-compiler",
- "libvixld-arm",
- "libvixld-arm64",
+ "libvixld",
],
}
diff --git a/compiler/cfi_test.h b/compiler/cfi_test.h
index 581edaa773..658bdb35ae 100644
--- a/compiler/cfi_test.h
+++ b/compiler/cfi_test.h
@@ -53,13 +53,13 @@ class CFITest : public dwarf::DwarfTest {
dwarf::WriteCIE(is64bit, dwarf::Reg(8), initial_opcodes, kCFIFormat, &debug_frame_data_);
std::vector<uintptr_t> debug_frame_patches;
dwarf::WriteFDE(is64bit,
- /* section_address */ 0,
- /* cie_address */ 0,
- /* code_address */ 0,
+ /* section_address= */ 0,
+ /* cie_address= */ 0,
+ /* code_address= */ 0,
actual_asm.size(),
actual_cfi,
kCFIFormat,
- /* buffer_address */ 0,
+ /* buffer_address= */ 0,
&debug_frame_data_,
&debug_frame_patches);
ReformatCfi(Objdump(false, "-W"), &lines);
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 586891a3ff..07c73c9a20 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -22,6 +22,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/callee_save_type.h"
+#include "base/casts.h"
#include "base/enums.h"
#include "base/utils.h"
#include "class_linker.h"
@@ -152,6 +153,10 @@ void CommonCompilerTest::SetUp() {
CreateCompilerDriver();
}
+ // Note: We cannot use MemMap because some tests tear down the Runtime and destroy
+ // the gMaps, so when destroying the MemMap, the test would crash.
+ inaccessible_page_ = mmap(nullptr, kPageSize, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ CHECK(inaccessible_page_ != MAP_FAILED) << strerror(errno);
}
void CommonCompilerTest::ApplyInstructionSet() {
@@ -184,17 +189,15 @@ void CommonCompilerTest::OverrideInstructionSetFeatures(InstructionSet instructi
void CommonCompilerTest::CreateCompilerDriver() {
ApplyInstructionSet();
- compiler_options_->boot_image_ = true;
+ compiler_options_->image_type_ = CompilerOptions::ImageType::kBootImage;
compiler_options_->compile_pic_ = false; // Non-PIC boot image is a test configuration.
compiler_options_->SetCompilerFilter(GetCompilerFilter());
compiler_options_->image_classes_.swap(*GetImageClasses());
+ compiler_options_->profile_compilation_info_ = GetProfileCompilationInfo();
compiler_driver_.reset(new CompilerDriver(compiler_options_.get(),
- verification_results_.get(),
compiler_kind_,
- &compiler_options_->image_classes_,
number_of_threads_,
- /* swap_fd */ -1,
- GetProfileCompilationInfo()));
+ /* swap_fd= */ -1));
}
void CommonCompilerTest::SetUpRuntimeOptions(RuntimeOptions* options) {
@@ -222,6 +225,10 @@ void CommonCompilerTest::TearDown() {
verification_results_.reset();
compiler_options_.reset();
image_reservation_.Reset();
+ if (inaccessible_page_ != nullptr) {
+ munmap(inaccessible_page_, kPageSize);
+ inaccessible_page_ = nullptr;
+ }
CommonRuntimeTest::TearDown();
}
@@ -257,7 +264,7 @@ void CommonCompilerTest::CompileMethod(ArtMethod* method) {
Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
Handle<mirror::ClassLoader> h_class_loader = hs.NewHandle(
self->DecodeJObject(class_loader)->AsClassLoader());
- const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
+ const dex::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
std::vector<const DexFile*> dex_files;
dex_files.push_back(dex_file);
@@ -267,8 +274,16 @@ void CommonCompilerTest::CompileMethod(ArtMethod* method) {
compiler_driver_->InitializeThreadPools();
- compiler_driver_->PreCompile(class_loader, dex_files, &timings);
+ compiler_driver_->PreCompile(class_loader,
+ dex_files,
+ &timings,
+ &compiler_options_->image_classes_,
+ verification_results_.get());
+ // Verification results in the `callback_` should not be used during compilation.
+ down_cast<QuickCompilerCallbacks*>(callbacks_.get())->SetVerificationResults(
+ reinterpret_cast<VerificationResults*>(inaccessible_page_));
+ compiler_options_->verification_results_ = verification_results_.get();
compiler_driver_->CompileOne(self,
class_loader,
*dex_file,
@@ -279,6 +294,9 @@ void CommonCompilerTest::CompileMethod(ArtMethod* method) {
code_item,
dex_cache,
h_class_loader);
+ compiler_options_->verification_results_ = nullptr;
+ down_cast<QuickCompilerCallbacks*>(callbacks_.get())->SetVerificationResults(
+ verification_results_.get());
compiler_driver_->FreeThreadPools();
@@ -328,10 +346,38 @@ void CommonCompilerTest::ReserveImageSpace() {
(size_t)120 * 1024 * 1024, // 120MB
PROT_NONE,
false /* no need for 4gb flag with fixed mmap */,
+ /*reuse=*/ false,
+ /*reservation=*/ nullptr,
&error_msg);
CHECK(image_reservation_.IsValid()) << error_msg;
}
+void CommonCompilerTest::CompileAll(jobject class_loader,
+ const std::vector<const DexFile*>& dex_files,
+ TimingLogger* timings) {
+ TimingLogger::ScopedTiming t(__FUNCTION__, timings);
+ SetDexFilesForOatFile(dex_files);
+
+ compiler_driver_->InitializeThreadPools();
+
+ compiler_driver_->PreCompile(class_loader,
+ dex_files,
+ timings,
+ &compiler_options_->image_classes_,
+ verification_results_.get());
+
+ // Verification results in the `callback_` should not be used during compilation.
+ down_cast<QuickCompilerCallbacks*>(callbacks_.get())->SetVerificationResults(
+ reinterpret_cast<VerificationResults*>(inaccessible_page_));
+ compiler_options_->verification_results_ = verification_results_.get();
+ compiler_driver_->CompileAll(class_loader, dex_files, timings);
+ compiler_options_->verification_results_ = nullptr;
+ down_cast<QuickCompilerCallbacks*>(callbacks_.get())->SetVerificationResults(
+ verification_results_.get());
+
+ compiler_driver_->FreeThreadPools();
+}
+
void CommonCompilerTest::UnreserveImageSpace() {
image_reservation_.Reset();
}
@@ -343,7 +389,7 @@ void CommonCompilerTest::SetDexFilesForOatFile(const std::vector<const DexFile*>
}
void CommonCompilerTest::ClearBootImageOption() {
- compiler_options_->boot_image_ = false;
+ compiler_options_->image_type_ = CompilerOptions::ImageType::kNone;
}
} // namespace art
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index e6d1564621..a71908e6c8 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -20,6 +20,8 @@
#include <list>
#include <vector>
+#include <jni.h>
+
#include "arch/instruction_set.h"
#include "arch/instruction_set_features.h"
#include "base/hash_set.h"
@@ -37,6 +39,7 @@ class CompilerOptions;
class CumulativeLogger;
class DexFile;
class ProfileCompilationInfo;
+class TimingLogger;
class VerificationResults;
template<class T> class Handle;
@@ -88,6 +91,10 @@ class CommonCompilerTest : public CommonRuntimeTest {
const char* method_name, const char* signature)
REQUIRES_SHARED(Locks::mutator_lock_);
+ void CompileAll(jobject class_loader,
+ const std::vector<const DexFile*>& dex_files,
+ TimingLogger* timings) REQUIRES(!Locks::mutator_lock_);
+
void ApplyInstructionSet();
void OverrideInstructionSetFeatures(InstructionSet instruction_set, const std::string& variant);
@@ -116,6 +123,7 @@ class CommonCompilerTest : public CommonRuntimeTest {
private:
MemMap image_reservation_;
+ void* inaccessible_page_;
// Chunks must not move their storage after being created - use the node-based std::list.
std::list<std::vector<uint8_t>> header_code_and_maps_chunks_;
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 29f004cf87..58f7e4f227 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -17,21 +17,20 @@
#include "compiled_method.h"
#include "driver/compiled_method_storage.h"
-#include "driver/compiler_driver.h"
#include "utils/swap_space.h"
namespace art {
-CompiledCode::CompiledCode(CompilerDriver* compiler_driver,
+CompiledCode::CompiledCode(CompiledMethodStorage* storage,
InstructionSet instruction_set,
const ArrayRef<const uint8_t>& quick_code)
- : compiler_driver_(compiler_driver),
- quick_code_(compiler_driver_->GetCompiledMethodStorage()->DeduplicateCode(quick_code)),
+ : storage_(storage),
+ quick_code_(storage->DeduplicateCode(quick_code)),
packed_fields_(InstructionSetField::Encode(instruction_set)) {
}
CompiledCode::~CompiledCode() {
- compiler_driver_->GetCompiledMethodStorage()->ReleaseCode(quick_code_);
+ GetStorage()->ReleaseCode(quick_code_);
}
bool CompiledCode::operator==(const CompiledCode& rhs) const {
@@ -74,7 +73,7 @@ size_t CompiledCode::CodeDelta(InstructionSet instruction_set) {
}
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
- return 0;
+ UNREACHABLE();
}
}
@@ -95,33 +94,33 @@ const void* CompiledCode::CodePointer(const void* code_pointer, InstructionSet i
}
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
- return nullptr;
+ UNREACHABLE();
}
}
-CompiledMethod::CompiledMethod(CompilerDriver* driver,
+CompiledMethod::CompiledMethod(CompiledMethodStorage* storage,
InstructionSet instruction_set,
const ArrayRef<const uint8_t>& quick_code,
const ArrayRef<const uint8_t>& vmap_table,
const ArrayRef<const uint8_t>& cfi_info,
const ArrayRef<const linker::LinkerPatch>& patches)
- : CompiledCode(driver, instruction_set, quick_code),
- vmap_table_(driver->GetCompiledMethodStorage()->DeduplicateVMapTable(vmap_table)),
- cfi_info_(driver->GetCompiledMethodStorage()->DeduplicateCFIInfo(cfi_info)),
- patches_(driver->GetCompiledMethodStorage()->DeduplicateLinkerPatches(patches)) {
+ : CompiledCode(storage, instruction_set, quick_code),
+ vmap_table_(storage->DeduplicateVMapTable(vmap_table)),
+ cfi_info_(storage->DeduplicateCFIInfo(cfi_info)),
+ patches_(storage->DeduplicateLinkerPatches(patches)) {
}
CompiledMethod* CompiledMethod::SwapAllocCompiledMethod(
- CompilerDriver* driver,
+ CompiledMethodStorage* storage,
InstructionSet instruction_set,
const ArrayRef<const uint8_t>& quick_code,
const ArrayRef<const uint8_t>& vmap_table,
const ArrayRef<const uint8_t>& cfi_info,
const ArrayRef<const linker::LinkerPatch>& patches) {
- SwapAllocator<CompiledMethod> alloc(driver->GetCompiledMethodStorage()->GetSwapSpaceAllocator());
+ SwapAllocator<CompiledMethod> alloc(storage->GetSwapSpaceAllocator());
CompiledMethod* ret = alloc.allocate(1);
alloc.construct(ret,
- driver,
+ storage,
instruction_set,
quick_code,
vmap_table,
@@ -129,14 +128,15 @@ CompiledMethod* CompiledMethod::SwapAllocCompiledMethod(
return ret;
}
-void CompiledMethod::ReleaseSwapAllocatedCompiledMethod(CompilerDriver* driver, CompiledMethod* m) {
- SwapAllocator<CompiledMethod> alloc(driver->GetCompiledMethodStorage()->GetSwapSpaceAllocator());
+void CompiledMethod::ReleaseSwapAllocatedCompiledMethod(CompiledMethodStorage* storage,
+ CompiledMethod* m) {
+ SwapAllocator<CompiledMethod> alloc(storage->GetSwapSpaceAllocator());
alloc.destroy(m);
alloc.deallocate(m, 1);
}
CompiledMethod::~CompiledMethod() {
- CompiledMethodStorage* storage = GetCompilerDriver()->GetCompiledMethodStorage();
+ CompiledMethodStorage* storage = GetStorage();
storage->ReleaseLinkerPatches(patches_);
storage->ReleaseCFIInfo(cfi_info_);
storage->ReleaseVMapTable(vmap_table_);
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 864ce585cf..e92777ff12 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -28,7 +28,6 @@
namespace art {
template <typename T> class ArrayRef;
-class CompilerDriver;
class CompiledMethodStorage;
template<typename T> class LengthPrefixedArray;
@@ -39,7 +38,7 @@ class LinkerPatch;
class CompiledCode {
public:
// For Quick to supply an code blob
- CompiledCode(CompilerDriver* compiler_driver,
+ CompiledCode(CompiledMethodStorage* storage,
InstructionSet instruction_set,
const ArrayRef<const uint8_t>& quick_code);
@@ -78,8 +77,8 @@ class CompiledCode {
template <typename T>
static ArrayRef<const T> GetArray(const LengthPrefixedArray<T>* array);
- CompilerDriver* GetCompilerDriver() {
- return compiler_driver_;
+ CompiledMethodStorage* GetStorage() {
+ return storage_;
}
template <typename BitFieldType>
@@ -96,7 +95,7 @@ class CompiledCode {
private:
using InstructionSetField = BitField<InstructionSet, 0u, kInstructionSetFieldSize>;
- CompilerDriver* const compiler_driver_;
+ CompiledMethodStorage* const storage_;
// Used to store the compiled code.
const LengthPrefixedArray<uint8_t>* const quick_code_;
@@ -109,7 +108,7 @@ class CompiledMethod final : public CompiledCode {
// Constructs a CompiledMethod.
// Note: Consider using the static allocation methods below that will allocate the CompiledMethod
// in the swap space.
- CompiledMethod(CompilerDriver* driver,
+ CompiledMethod(CompiledMethodStorage* storage,
InstructionSet instruction_set,
const ArrayRef<const uint8_t>& quick_code,
const ArrayRef<const uint8_t>& vmap_table,
@@ -119,14 +118,14 @@ class CompiledMethod final : public CompiledCode {
virtual ~CompiledMethod();
static CompiledMethod* SwapAllocCompiledMethod(
- CompilerDriver* driver,
+ CompiledMethodStorage* storage,
InstructionSet instruction_set,
const ArrayRef<const uint8_t>& quick_code,
const ArrayRef<const uint8_t>& vmap_table,
const ArrayRef<const uint8_t>& cfi_info,
const ArrayRef<const linker::LinkerPatch>& patches);
- static void ReleaseSwapAllocatedCompiledMethod(CompilerDriver* driver, CompiledMethod* m);
+ static void ReleaseSwapAllocatedCompiledMethod(CompiledMethodStorage* storage, CompiledMethod* m);
bool IsIntrinsic() const {
return GetPackedField<IsIntrinsicField>();
@@ -137,7 +136,7 @@ class CompiledMethod final : public CompiledCode {
// This affects debug information generated at link time.
void MarkAsIntrinsic() {
DCHECK(!IsIntrinsic());
- SetPackedField<IsIntrinsicField>(/* value */ true);
+ SetPackedField<IsIntrinsicField>(/* value= */ true);
}
ArrayRef<const uint8_t> GetVmapTable() const;
diff --git a/compiler/compiler.cc b/compiler/compiler.cc
index 646040fd9d..54da446e6d 100644
--- a/compiler/compiler.cc
+++ b/compiler/compiler.cc
@@ -21,6 +21,7 @@
#include "base/macros.h"
#include "base/utils.h"
#include "dex/code_item_accessors-inl.h"
+#include "dex/dex_file.h"
#include "driver/compiler_driver.h"
#include "optimizing/optimizing_compiler.h"
@@ -39,7 +40,7 @@ Compiler* Compiler::Create(CompilerDriver* driver, Compiler::Kind kind) {
}
}
-bool Compiler::IsPathologicalCase(const DexFile::CodeItem& code_item,
+bool Compiler::IsPathologicalCase(const dex::CodeItem& code_item,
uint32_t method_idx,
const DexFile& dex_file) {
/*
diff --git a/compiler/compiler.h b/compiler/compiler.h
index ef3d87f02b..8a67724de0 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -19,10 +19,13 @@
#include "base/mutex.h"
#include "base/os.h"
-#include "dex/dex_file.h"
+#include "dex/invoke_type.h"
namespace art {
+namespace dex {
+struct CodeItem;
+} // namespace dex
namespace jit {
class JitCodeCache;
class JitLogger;
@@ -35,6 +38,7 @@ class DexCache;
class ArtMethod;
class CompilerDriver;
class CompiledMethod;
+class DexFile;
template<class T> class Handle;
class OatWriter;
class Thread;
@@ -54,7 +58,7 @@ class Compiler {
virtual bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const = 0;
- virtual CompiledMethod* Compile(const DexFile::CodeItem* code_item,
+ virtual CompiledMethod* Compile(const dex::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
uint16_t class_def_idx,
@@ -71,6 +75,7 @@ class Compiler {
virtual bool JitCompile(Thread* self ATTRIBUTE_UNUSED,
jit::JitCodeCache* code_cache ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
+ bool baseline ATTRIBUTE_UNUSED,
bool osr ATTRIBUTE_UNUSED,
jit::JitLogger* jit_logger ATTRIBUTE_UNUSED)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -101,7 +106,7 @@ class Compiler {
// Returns whether the method to compile is such a pathological case that
// it's not worth compiling.
- static bool IsPathologicalCase(const DexFile::CodeItem& code_item,
+ static bool IsPathologicalCase(const dex::CodeItem& code_item,
uint32_t method_idx,
const DexFile& dex_file);
diff --git a/compiler/debug/dwarf/dwarf_test.cc b/compiler/debug/dwarf/dwarf_test.cc
index 933034f593..6512314ae8 100644
--- a/compiler/debug/dwarf/dwarf_test.cc
+++ b/compiler/debug/dwarf/dwarf_test.cc
@@ -334,7 +334,7 @@ TEST_F(DwarfTest, DebugInfo) {
std::vector<uintptr_t> debug_info_patches;
std::vector<uintptr_t> expected_patches = { 16, 20, 29, 33, 42, 46 };
- dwarf::WriteDebugInfoCU(0 /* debug_abbrev_offset */, info,
+ dwarf::WriteDebugInfoCU(/* debug_abbrev_offset= */ 0, info,
0, &debug_info_data_, &debug_info_patches);
EXPECT_EQ(expected_patches, debug_info_patches);
diff --git a/compiler/debug/dwarf/headers.h b/compiler/debug/dwarf/headers.h
index 28f108423e..4a27178146 100644
--- a/compiler/debug/dwarf/headers.h
+++ b/compiler/debug/dwarf/headers.h
@@ -107,7 +107,9 @@ void WriteFDE(bool is64bit,
} else {
DCHECK(format == DW_DEBUG_FRAME_FORMAT);
// Relocate code_address if it has absolute value.
- patch_locations->push_back(buffer_address + buffer->size() - section_address);
+ if (patch_locations != nullptr) {
+ patch_locations->push_back(buffer_address + buffer->size() - section_address);
+ }
}
if (is64bit) {
writer.PushUint64(code_address);
@@ -122,6 +124,30 @@ void WriteFDE(bool is64bit,
writer.UpdateUint32(fde_header_start, writer.data()->size() - fde_header_start - 4);
}
+// Read singe FDE entry from 'data' (which is advanced).
+template<typename Addr>
+bool ReadFDE(const uint8_t** data, Addr* addr, Addr* size, ArrayRef<const uint8_t>* opcodes) {
+ struct Header {
+ uint32_t length;
+ int32_t cie_pointer;
+ Addr addr;
+ Addr size;
+ uint8_t augmentaion;
+ uint8_t opcodes[];
+ } PACKED(1);
+ const Header* header = reinterpret_cast<const Header*>(*data);
+ const size_t length = 4 + header->length;
+ *data += length;
+ if (header->cie_pointer == -1) {
+ return false; // Not an FDE entry.
+ }
+ DCHECK_EQ(header->cie_pointer, 0); // Expects single CIE. Assumes DW_DEBUG_FRAME_FORMAT.
+ *addr = header->addr;
+ *size = header->size;
+ *opcodes = ArrayRef<const uint8_t>(header->opcodes, length - offsetof(Header, opcodes));
+ return true;
+}
+
// Write compilation unit (CU) to .debug_info section.
template<typename Vector>
void WriteDebugInfoCU(uint32_t debug_abbrev_offset,
diff --git a/compiler/debug/elf_debug_info_writer.h b/compiler/debug/elf_debug_info_writer.h
index bda7108c74..a63f241f53 100644
--- a/compiler/debug/elf_debug_info_writer.h
+++ b/compiler/debug/elf_debug_info_writer.h
@@ -41,26 +41,14 @@
namespace art {
namespace debug {
-typedef std::vector<DexFile::LocalInfo> LocalInfos;
-
-static void LocalInfoCallback(void* ctx, const DexFile::LocalInfo& entry) {
- static_cast<LocalInfos*>(ctx)->push_back(entry);
-}
-
static std::vector<const char*> GetParamNames(const MethodDebugInfo* mi) {
std::vector<const char*> names;
+ DCHECK(mi->dex_file != nullptr);
CodeItemDebugInfoAccessor accessor(*mi->dex_file, mi->code_item, mi->dex_method_index);
if (accessor.HasCodeItem()) {
- DCHECK(mi->dex_file != nullptr);
- const uint8_t* stream = mi->dex_file->GetDebugInfoStream(accessor.DebugInfoOffset());
- if (stream != nullptr) {
- DecodeUnsignedLeb128(&stream); // line.
- uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
- for (uint32_t i = 0; i < parameters_size; ++i) {
- uint32_t id = DecodeUnsignedLeb128P1(&stream);
- names.push_back(mi->dex_file->StringDataByIdx(dex::StringIndex(id)));
- }
- }
+ accessor.VisitParameterNames([&](const dex::StringIndex& id) {
+ names.push_back(mi->dex_file->StringDataByIdx(id));
+ });
}
return names;
}
@@ -164,9 +152,9 @@ class ElfCompilationUnitWriter {
DCHECK(mi->dex_file != nullptr);
const DexFile* dex = mi->dex_file;
CodeItemDebugInfoAccessor accessor(*dex, mi->code_item, mi->dex_method_index);
- const DexFile::MethodId& dex_method = dex->GetMethodId(mi->dex_method_index);
- const DexFile::ProtoId& dex_proto = dex->GetMethodPrototype(dex_method);
- const DexFile::TypeList* dex_params = dex->GetProtoParameters(dex_proto);
+ const dex::MethodId& dex_method = dex->GetMethodId(mi->dex_method_index);
+ const dex::ProtoId& dex_proto = dex->GetMethodPrototype(dex_method);
+ const dex::TypeList* dex_params = dex->GetProtoParameters(dex_proto);
const char* dex_class_desc = dex->GetMethodDeclaringClassDescriptor(dex_method);
const bool is_static = (mi->access_flags & kAccStatic) != 0;
@@ -257,11 +245,12 @@ class ElfCompilationUnitWriter {
}
// Write local variables.
- LocalInfos local_infos;
+ std::vector<DexFile::LocalInfo> local_infos;
if (accessor.DecodeDebugLocalInfo(is_static,
mi->dex_method_index,
- LocalInfoCallback,
- &local_infos)) {
+ [&](const DexFile::LocalInfo& entry) {
+ local_infos.push_back(entry);
+ })) {
for (const DexFile::LocalInfo& var : local_infos) {
if (var.reg_ < accessor.RegistersSize() - accessor.InsSize()) {
info_.StartTag(DW_TAG_variable);
@@ -383,10 +372,10 @@ class ElfCompilationUnitWriter {
}
// Base class.
- mirror::Class* base_class = type->GetSuperClass();
+ ObjPtr<mirror::Class> base_class = type->GetSuperClass();
if (base_class != nullptr) {
info_.StartTag(DW_TAG_inheritance);
- base_class_references.emplace(info_.size(), base_class);
+ base_class_references.emplace(info_.size(), base_class.Ptr());
info_.WriteRef4(DW_AT_type, 0);
info_.WriteUdata(DW_AT_data_member_location, 0);
info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_public);
diff --git a/compiler/debug/elf_debug_line_writer.h b/compiler/debug/elf_debug_line_writer.h
index 3d78943cd0..0a13a92d07 100644
--- a/compiler/debug/elf_debug_line_writer.h
+++ b/compiler/debug/elf_debug_line_writer.h
@@ -34,11 +34,6 @@ namespace debug {
typedef std::vector<DexFile::PositionInfo> PositionInfos;
-static bool PositionInfoCallback(void* ctx, const DexFile::PositionInfo& entry) {
- static_cast<PositionInfos*>(ctx)->push_back(entry);
- return false;
-}
-
template<typename ElfTypes>
class ElfDebugLineWriter {
using Elf_Addr = typename ElfTypes::Addr;
@@ -154,11 +149,14 @@ class ElfDebugLineWriter {
Elf_Addr method_address = base_address + mi->code_address;
PositionInfos dex2line_map;
- DCHECK(mi->dex_file != nullptr);
const DexFile* dex = mi->dex_file;
+ DCHECK(dex != nullptr);
CodeItemDebugInfoAccessor accessor(*dex, mi->code_item, mi->dex_method_index);
- const uint32_t debug_info_offset = accessor.DebugInfoOffset();
- if (!dex->DecodeDebugPositionInfo(debug_info_offset, PositionInfoCallback, &dex2line_map)) {
+ if (!accessor.DecodeDebugPositionInfo(
+ [&](const DexFile::PositionInfo& entry) {
+ dex2line_map.push_back(entry);
+ return false;
+ })) {
continue;
}
diff --git a/compiler/debug/elf_debug_reader.h b/compiler/debug/elf_debug_reader.h
new file mode 100644
index 0000000000..91b1b3ea81
--- /dev/null
+++ b/compiler/debug/elf_debug_reader.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEBUG_ELF_DEBUG_READER_H_
+#define ART_COMPILER_DEBUG_ELF_DEBUG_READER_H_
+
+#include "base/array_ref.h"
+#include "debug/dwarf/headers.h"
+#include "elf.h"
+#include "xz_utils.h"
+
+namespace art {
+namespace debug {
+
+// Trivial ELF file reader.
+//
+// It is the bare minimum needed to read mini-debug-info symbols for unwinding.
+// We use it to merge JIT mini-debug-infos together or to prune them after GC.
+// The consumed ELF file comes from ART JIT.
+template <typename ElfTypes, typename VisitSym, typename VisitFde>
+static void ReadElfSymbols(const uint8_t* elf, VisitSym visit_sym, VisitFde visit_fde) {
+ // Note that the input buffer might be misaligned.
+ typedef typename ElfTypes::Ehdr ALIGNED(1) Elf_Ehdr;
+ typedef typename ElfTypes::Shdr ALIGNED(1) Elf_Shdr;
+ typedef typename ElfTypes::Sym ALIGNED(1) Elf_Sym;
+ typedef typename ElfTypes::Addr ALIGNED(1) Elf_Addr;
+
+ // Read and check the elf header.
+ const Elf_Ehdr* header = reinterpret_cast<const Elf_Ehdr*>(elf);
+ CHECK(header->checkMagic());
+
+ // Find sections that we are interested in.
+ const Elf_Shdr* sections = reinterpret_cast<const Elf_Shdr*>(elf + header->e_shoff);
+ const Elf_Shdr* strtab = nullptr;
+ const Elf_Shdr* symtab = nullptr;
+ const Elf_Shdr* debug_frame = nullptr;
+ const Elf_Shdr* gnu_debugdata = nullptr;
+ for (size_t i = 1 /* skip null section */; i < header->e_shnum; i++) {
+ const Elf_Shdr* section = sections + i;
+ const char* name = reinterpret_cast<const char*>(
+ elf + sections[header->e_shstrndx].sh_offset + section->sh_name);
+ if (strcmp(name, ".strtab") == 0) {
+ strtab = section;
+ } else if (strcmp(name, ".symtab") == 0) {
+ symtab = section;
+ } else if (strcmp(name, ".debug_frame") == 0) {
+ debug_frame = section;
+ } else if (strcmp(name, ".gnu_debugdata") == 0) {
+ gnu_debugdata = section;
+ }
+ }
+
+ // Visit symbols.
+ if (symtab != nullptr && strtab != nullptr) {
+ const Elf_Sym* symbols = reinterpret_cast<const Elf_Sym*>(elf + symtab->sh_offset);
+ DCHECK_EQ(symtab->sh_entsize, sizeof(Elf_Sym));
+ size_t count = symtab->sh_size / sizeof(Elf_Sym);
+ for (size_t i = 1 /* skip null symbol */; i < count; i++) {
+ Elf_Sym symbol = symbols[i];
+ if (symbol.getBinding() != STB_LOCAL) { // Ignore local symbols (e.g. "$t").
+ const uint8_t* name = elf + strtab->sh_offset + symbol.st_name;
+ visit_sym(symbol, reinterpret_cast<const char*>(name));
+ }
+ }
+ }
+
+ // Visit CFI (unwind) data.
+ if (debug_frame != nullptr) {
+ const uint8_t* data = elf + debug_frame->sh_offset;
+ const uint8_t* end = data + debug_frame->sh_size;
+ while (data < end) {
+ Elf_Addr addr, size;
+ ArrayRef<const uint8_t> opcodes;
+ if (dwarf::ReadFDE<Elf_Addr>(&data, &addr, &size, &opcodes)) {
+ visit_fde(addr, size, opcodes);
+ }
+ }
+ }
+
+ // Process embedded compressed ELF file.
+ if (gnu_debugdata != nullptr) {
+ ArrayRef<const uint8_t> compressed(elf + gnu_debugdata->sh_offset, gnu_debugdata->sh_size);
+ std::vector<uint8_t> decompressed;
+ XzDecompress(compressed, &decompressed);
+ ReadElfSymbols<ElfTypes>(decompressed.data(), visit_sym, visit_fde);
+ }
+}
+
+} // namespace debug
+} // namespace art
+#endif // ART_COMPILER_DEBUG_ELF_DEBUG_READER_H_
diff --git a/compiler/debug/elf_debug_writer.cc b/compiler/debug/elf_debug_writer.cc
index 71422d48a5..e5c09aa379 100644
--- a/compiler/debug/elf_debug_writer.cc
+++ b/compiler/debug/elf_debug_writer.cc
@@ -16,19 +16,23 @@
#include "elf_debug_writer.h"
-#include <vector>
+#include <type_traits>
#include <unordered_map>
+#include <vector>
#include "base/array_ref.h"
+#include "base/stl_util.h"
#include "debug/dwarf/dwarf_constants.h"
#include "debug/elf_compilation_unit.h"
#include "debug/elf_debug_frame_writer.h"
#include "debug/elf_debug_info_writer.h"
#include "debug/elf_debug_line_writer.h"
#include "debug/elf_debug_loc_writer.h"
+#include "debug/elf_debug_reader.h"
#include "debug/elf_symtab_writer.h"
#include "debug/method_debug_info.h"
#include "debug/xz_utils.h"
+#include "elf.h"
#include "linker/elf_builder.h"
#include "linker/vector_output_stream.h"
#include "oat.h"
@@ -36,19 +40,21 @@
namespace art {
namespace debug {
+using ElfRuntimeTypes = std::conditional<sizeof(void*) == 4, ElfTypes32, ElfTypes64>::type;
+
template <typename ElfTypes>
void WriteDebugInfo(linker::ElfBuilder<ElfTypes>* builder,
const DebugInfo& debug_info,
dwarf::CFIFormat cfi_format,
bool write_oat_patches) {
// Write .strtab and .symtab.
- WriteDebugSymbols(builder, false /* mini-debug-info */, debug_info);
+ WriteDebugSymbols(builder, /* mini-debug-info= */ false, debug_info);
// Write .debug_frame.
WriteCFISection(builder, debug_info.compiled_methods, cfi_format, write_oat_patches);
// Group the methods into compilation units based on class.
- std::unordered_map<const DexFile::ClassDef*, ElfCompilationUnit> class_to_compilation_unit;
+ std::unordered_map<const dex::ClassDef*, ElfCompilationUnit> class_to_compilation_unit;
for (const MethodDebugInfo& mi : debug_info.compiled_methods) {
if (mi.dex_file != nullptr) {
auto& dex_class_def = mi.dex_file->GetClassDef(mi.class_def_index);
@@ -119,22 +125,28 @@ static std::vector<uint8_t> MakeMiniDebugInfoInternal(
linker::VectorOutputStream out("Mini-debug-info ELF file", &buffer);
std::unique_ptr<linker::ElfBuilder<ElfTypes>> builder(
new linker::ElfBuilder<ElfTypes>(isa, features, &out));
- builder->Start(false /* write_program_headers */);
+ builder->Start(/* write_program_headers= */ false);
// Mirror ELF sections as NOBITS since the added symbols will reference them.
- builder->GetText()->AllocateVirtualMemory(text_section_address, text_section_size);
+ if (text_section_size != 0) {
+ builder->GetText()->AllocateVirtualMemory(text_section_address, text_section_size);
+ }
if (dex_section_size != 0) {
builder->GetDex()->AllocateVirtualMemory(dex_section_address, dex_section_size);
}
- WriteDebugSymbols(builder.get(), true /* mini-debug-info */, debug_info);
- WriteCFISection(builder.get(),
- debug_info.compiled_methods,
- dwarf::DW_DEBUG_FRAME_FORMAT,
- false /* write_oat_paches */);
+ if (!debug_info.Empty()) {
+ WriteDebugSymbols(builder.get(), /* mini-debug-info= */ true, debug_info);
+ }
+ if (!debug_info.compiled_methods.empty()) {
+ WriteCFISection(builder.get(),
+ debug_info.compiled_methods,
+ dwarf::DW_DEBUG_FRAME_FORMAT,
+ /* write_oat_patches= */ false);
+ }
builder->End();
CHECK(builder->Good());
std::vector<uint8_t> compressed_buffer;
compressed_buffer.reserve(buffer.size() / 4);
- XzCompress(ArrayRef<uint8_t>(buffer), &compressed_buffer);
+ XzCompress(ArrayRef<const uint8_t>(buffer), &compressed_buffer);
return compressed_buffer;
}
@@ -165,107 +177,207 @@ std::vector<uint8_t> MakeMiniDebugInfo(
}
}
-template <typename ElfTypes>
-static std::vector<uint8_t> MakeElfFileForJITInternal(
+std::vector<uint8_t> MakeElfFileForJIT(
InstructionSet isa,
const InstructionSetFeatures* features,
bool mini_debug_info,
- ArrayRef<const MethodDebugInfo> method_infos) {
- CHECK_GT(method_infos.size(), 0u);
- uint64_t min_address = std::numeric_limits<uint64_t>::max();
- uint64_t max_address = 0;
- for (const MethodDebugInfo& mi : method_infos) {
- CHECK_EQ(mi.is_code_address_text_relative, false);
- min_address = std::min(min_address, mi.code_address);
- max_address = std::max(max_address, mi.code_address + mi.code_size);
- }
+ const MethodDebugInfo& method_info) {
+ using ElfTypes = ElfRuntimeTypes;
+ CHECK_EQ(sizeof(ElfTypes::Addr), static_cast<size_t>(GetInstructionSetPointerSize(isa)));
+ CHECK_EQ(method_info.is_code_address_text_relative, false);
DebugInfo debug_info{};
- debug_info.compiled_methods = method_infos;
+ debug_info.compiled_methods = ArrayRef<const MethodDebugInfo>(&method_info, 1);
std::vector<uint8_t> buffer;
buffer.reserve(KB);
linker::VectorOutputStream out("Debug ELF file", &buffer);
std::unique_ptr<linker::ElfBuilder<ElfTypes>> builder(
new linker::ElfBuilder<ElfTypes>(isa, features, &out));
// No program headers since the ELF file is not linked and has no allocated sections.
- builder->Start(false /* write_program_headers */);
+ builder->Start(/* write_program_headers= */ false);
+ builder->GetText()->AllocateVirtualMemory(method_info.code_address, method_info.code_size);
if (mini_debug_info) {
- if (method_infos.size() > 1) {
- std::vector<uint8_t> mdi = MakeMiniDebugInfo(isa,
- features,
- min_address,
- max_address - min_address,
- /* dex_section_address */ 0,
- /* dex_section_size */ 0,
- debug_info);
- builder->WriteSection(".gnu_debugdata", &mdi);
- } else {
- // The compression is great help for multiple methods but it is not worth it for a
- // single method due to the overheads so skip the compression here for performance.
- builder->GetText()->AllocateVirtualMemory(min_address, max_address - min_address);
- WriteDebugSymbols(builder.get(), true /* mini-debug-info */, debug_info);
- WriteCFISection(builder.get(),
- debug_info.compiled_methods,
- dwarf::DW_DEBUG_FRAME_FORMAT,
- false /* write_oat_paches */);
- }
+ // The compression is great help for multiple methods but it is not worth it for a
+ // single method due to the overheads so skip the compression here for performance.
+ WriteDebugSymbols(builder.get(), /* mini-debug-info= */ true, debug_info);
+ WriteCFISection(builder.get(),
+ debug_info.compiled_methods,
+ dwarf::DW_DEBUG_FRAME_FORMAT,
+ /* write_oat_patches= */ false);
} else {
- builder->GetText()->AllocateVirtualMemory(min_address, max_address - min_address);
WriteDebugInfo(builder.get(),
debug_info,
dwarf::DW_DEBUG_FRAME_FORMAT,
- false /* write_oat_patches */);
+ /* write_oat_patches= */ false);
}
builder->End();
CHECK(builder->Good());
+ // Verify the ELF file by reading it back using the trivial reader.
+ if (kIsDebugBuild) {
+ using Elf_Sym = typename ElfTypes::Sym;
+ using Elf_Addr = typename ElfTypes::Addr;
+ size_t num_syms = 0;
+ size_t num_cfis = 0;
+ ReadElfSymbols<ElfTypes>(
+ buffer.data(),
+ [&](Elf_Sym sym, const char*) {
+ DCHECK_EQ(sym.st_value, method_info.code_address + CompiledMethod::CodeDelta(isa));
+ DCHECK_EQ(sym.st_size, method_info.code_size);
+ num_syms++;
+ },
+ [&](Elf_Addr addr, Elf_Addr size, ArrayRef<const uint8_t> opcodes) {
+ DCHECK_EQ(addr, method_info.code_address);
+ DCHECK_EQ(size, method_info.code_size);
+ DCHECK_GE(opcodes.size(), method_info.cfi.size());
+ DCHECK_EQ(memcmp(opcodes.data(), method_info.cfi.data(), method_info.cfi.size()), 0);
+ num_cfis++;
+ });
+ DCHECK_EQ(num_syms, 1u);
+ // CFI might be missing. TODO: Ensure we have CFI for all methods.
+ DCHECK_LE(num_cfis, 1u);
+ }
return buffer;
}
-std::vector<uint8_t> MakeElfFileForJIT(
+// Combine several mini-debug-info ELF files into one, while filtering some symbols.
+std::vector<uint8_t> PackElfFileForJIT(
InstructionSet isa,
const InstructionSetFeatures* features,
- bool mini_debug_info,
- ArrayRef<const MethodDebugInfo> method_infos) {
- if (Is64BitInstructionSet(isa)) {
- return MakeElfFileForJITInternal<ElfTypes64>(isa, features, mini_debug_info, method_infos);
- } else {
- return MakeElfFileForJITInternal<ElfTypes32>(isa, features, mini_debug_info, method_infos);
+ std::vector<const uint8_t*>& added_elf_files,
+ std::vector<const void*>& removed_symbols,
+ /*out*/ size_t* num_symbols) {
+ using ElfTypes = ElfRuntimeTypes;
+ using Elf_Addr = typename ElfTypes::Addr;
+ using Elf_Sym = typename ElfTypes::Sym;
+ CHECK_EQ(sizeof(Elf_Addr), static_cast<size_t>(GetInstructionSetPointerSize(isa)));
+ const bool is64bit = Is64BitInstructionSet(isa);
+ auto is_removed_symbol = [&removed_symbols](Elf_Addr addr) {
+ const void* code_ptr = reinterpret_cast<const void*>(addr);
+ return std::binary_search(removed_symbols.begin(), removed_symbols.end(), code_ptr);
+ };
+ uint64_t min_address = std::numeric_limits<uint64_t>::max();
+ uint64_t max_address = 0;
+
+ // Produce the inner ELF file.
+ // It will contain the symbols (.symtab) and unwind information (.debug_frame).
+ std::vector<uint8_t> inner_elf_file;
+ {
+ inner_elf_file.reserve(1 * KB); // Approximate size of ELF file with a single symbol.
+ linker::VectorOutputStream out("Mini-debug-info ELF file for JIT", &inner_elf_file);
+ std::unique_ptr<linker::ElfBuilder<ElfTypes>> builder(
+ new linker::ElfBuilder<ElfTypes>(isa, features, &out));
+ builder->Start(/*write_program_headers=*/ false);
+ auto* text = builder->GetText();
+ auto* strtab = builder->GetStrTab();
+ auto* symtab = builder->GetSymTab();
+ auto* debug_frame = builder->GetDebugFrame();
+ std::deque<Elf_Sym> symbols;
+ std::vector<uint8_t> debug_frame_buffer;
+ WriteCIE(isa, dwarf::DW_DEBUG_FRAME_FORMAT, &debug_frame_buffer);
+
+ // Write symbols names. All other data is buffered.
+ strtab->Start();
+ strtab->Write(""); // strtab should start with empty string.
+ for (const uint8_t* added_elf_file : added_elf_files) {
+ ReadElfSymbols<ElfTypes>(
+ added_elf_file,
+ [&](Elf_Sym sym, const char* name) {
+ if (is_removed_symbol(sym.st_value)) {
+ return;
+ }
+ sym.st_name = strtab->Write(name);
+ symbols.push_back(sym);
+ min_address = std::min<uint64_t>(min_address, sym.st_value);
+ max_address = std::max<uint64_t>(max_address, sym.st_value + sym.st_size);
+ },
+ [&](Elf_Addr addr, Elf_Addr size, ArrayRef<const uint8_t> opcodes) {
+ if (is_removed_symbol(addr)) {
+ return;
+ }
+ WriteFDE(is64bit,
+ /*section_address=*/ 0,
+ /*cie_address=*/ 0,
+ addr,
+ size,
+ opcodes,
+ dwarf::DW_DEBUG_FRAME_FORMAT,
+ debug_frame_buffer.size(),
+ &debug_frame_buffer,
+ /*patch_locations=*/ nullptr);
+ });
+ }
+ strtab->End();
+
+ // Create .text covering the code range. Needed for gdb to find the symbols.
+ if (max_address > min_address) {
+ text->AllocateVirtualMemory(min_address, max_address - min_address);
+ }
+
+ // Add the symbols.
+ *num_symbols = symbols.size();
+ for (; !symbols.empty(); symbols.pop_front()) {
+ symtab->Add(symbols.front(), text);
+ }
+ symtab->WriteCachedSection();
+
+ // Add the CFI/unwind section.
+ debug_frame->Start();
+ debug_frame->WriteFully(debug_frame_buffer.data(), debug_frame_buffer.size());
+ debug_frame->End();
+
+ builder->End();
+ CHECK(builder->Good());
+ }
+
+ // Produce the outer ELF file.
+ // It contains only the inner ELF file compressed as .gnu_debugdata section.
+ // This extra wrapping is not necessary but the compression saves space.
+ std::vector<uint8_t> outer_elf_file;
+ {
+ std::vector<uint8_t> gnu_debugdata;
+ gnu_debugdata.reserve(inner_elf_file.size() / 4);
+ XzCompress(ArrayRef<const uint8_t>(inner_elf_file), &gnu_debugdata);
+
+ outer_elf_file.reserve(KB + gnu_debugdata.size());
+ linker::VectorOutputStream out("Mini-debug-info ELF file for JIT", &outer_elf_file);
+ std::unique_ptr<linker::ElfBuilder<ElfTypes>> builder(
+ new linker::ElfBuilder<ElfTypes>(isa, features, &out));
+ builder->Start(/*write_program_headers=*/ false);
+ if (max_address > min_address) {
+ builder->GetText()->AllocateVirtualMemory(min_address, max_address - min_address);
+ }
+ builder->WriteSection(".gnu_debugdata", &gnu_debugdata);
+ builder->End();
+ CHECK(builder->Good());
}
+
+ return outer_elf_file;
}
-template <typename ElfTypes>
-static std::vector<uint8_t> WriteDebugElfFileForClassesInternal(
+std::vector<uint8_t> WriteDebugElfFileForClasses(
InstructionSet isa,
const InstructionSetFeatures* features,
const ArrayRef<mirror::Class*>& types)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ using ElfTypes = ElfRuntimeTypes;
+ CHECK_EQ(sizeof(ElfTypes::Addr), static_cast<size_t>(GetInstructionSetPointerSize(isa)));
std::vector<uint8_t> buffer;
buffer.reserve(KB);
linker::VectorOutputStream out("Debug ELF file", &buffer);
std::unique_ptr<linker::ElfBuilder<ElfTypes>> builder(
new linker::ElfBuilder<ElfTypes>(isa, features, &out));
// No program headers since the ELF file is not linked and has no allocated sections.
- builder->Start(false /* write_program_headers */);
+ builder->Start(/* write_program_headers= */ false);
ElfDebugInfoWriter<ElfTypes> info_writer(builder.get());
info_writer.Start();
ElfCompilationUnitWriter<ElfTypes> cu_writer(&info_writer);
cu_writer.Write(types);
- info_writer.End(false /* write_oat_patches */);
+ info_writer.End(/* write_oat_patches= */ false);
builder->End();
CHECK(builder->Good());
return buffer;
}
-std::vector<uint8_t> WriteDebugElfFileForClasses(InstructionSet isa,
- const InstructionSetFeatures* features,
- const ArrayRef<mirror::Class*>& types) {
- if (Is64BitInstructionSet(isa)) {
- return WriteDebugElfFileForClassesInternal<ElfTypes64>(isa, features, types);
- } else {
- return WriteDebugElfFileForClassesInternal<ElfTypes32>(isa, features, types);
- }
-}
-
// Explicit instantiations
template void WriteDebugInfo<ElfTypes32>(
linker::ElfBuilder<ElfTypes32>* builder,
diff --git a/compiler/debug/elf_debug_writer.h b/compiler/debug/elf_debug_writer.h
index e442e0016c..85ab356b0c 100644
--- a/compiler/debug/elf_debug_writer.h
+++ b/compiler/debug/elf_debug_writer.h
@@ -54,7 +54,14 @@ std::vector<uint8_t> MakeElfFileForJIT(
InstructionSet isa,
const InstructionSetFeatures* features,
bool mini_debug_info,
- ArrayRef<const MethodDebugInfo> method_infos);
+ const MethodDebugInfo& method_info);
+
+std::vector<uint8_t> PackElfFileForJIT(
+ InstructionSet isa,
+ const InstructionSetFeatures* features,
+ std::vector<const uint8_t*>& added_elf_files,
+ std::vector<const void*>& removed_symbols,
+ /*out*/ size_t* num_symbols);
std::vector<uint8_t> WriteDebugElfFileForClasses(
InstructionSet isa,
diff --git a/compiler/debug/method_debug_info.h b/compiler/debug/method_debug_info.h
index 729c403f00..152db6eaf0 100644
--- a/compiler/debug/method_debug_info.h
+++ b/compiler/debug/method_debug_info.h
@@ -32,7 +32,7 @@ struct MethodDebugInfo {
size_t class_def_index;
uint32_t dex_method_index;
uint32_t access_flags;
- const DexFile::CodeItem* code_item;
+ const dex::CodeItem* code_item;
InstructionSet isa;
bool deduped;
bool is_native_debuggable;
diff --git a/compiler/debug/xz_utils.cc b/compiler/debug/xz_utils.cc
index a9e30a6970..a8f60ac280 100644
--- a/compiler/debug/xz_utils.cc
+++ b/compiler/debug/xz_utils.cc
@@ -17,13 +17,16 @@
#include "xz_utils.h"
#include <vector>
+#include <mutex>
#include "base/array_ref.h"
-#include "dwarf/writer.h"
+#include "base/bit_utils.h"
#include "base/leb128.h"
+#include "dwarf/writer.h"
// liblzma.
#include "7zCrc.h"
+#include "Xz.h"
#include "XzCrc64.h"
#include "XzEnc.h"
@@ -32,10 +35,17 @@ namespace debug {
constexpr size_t kChunkSize = kPageSize;
-static void XzCompressChunk(ArrayRef<uint8_t> src, std::vector<uint8_t>* dst) {
+static void XzInitCrc() {
+ static std::once_flag crc_initialized;
+ std::call_once(crc_initialized, []() {
+ CrcGenerateTable();
+ Crc64GenerateTable();
+ });
+}
+
+static void XzCompressChunk(ArrayRef<const uint8_t> src, std::vector<uint8_t>* dst) {
// Configure the compression library.
- CrcGenerateTable();
- Crc64GenerateTable();
+ XzInitCrc();
CLzma2EncProps lzma2Props;
Lzma2EncProps_Init(&lzma2Props);
lzma2Props.lzmaProps.level = 1; // Fast compression.
@@ -62,7 +72,7 @@ static void XzCompressChunk(ArrayRef<uint8_t> src, std::vector<uint8_t>* dst) {
return SZ_OK;
}
size_t src_pos_;
- ArrayRef<uint8_t> src_;
+ ArrayRef<const uint8_t> src_;
std::vector<uint8_t>* dst_;
};
XzCallbacks callbacks;
@@ -85,7 +95,7 @@ static void XzCompressChunk(ArrayRef<uint8_t> src, std::vector<uint8_t>* dst) {
// In short, the file format is: [header] [compressed_block]* [index] [footer]
// Where [index] is: [num_records] ([compressed_size] [uncompressed_size])* [crc32]
//
-void XzCompress(ArrayRef<uint8_t> src, std::vector<uint8_t>* dst) {
+void XzCompress(ArrayRef<const uint8_t> src, std::vector<uint8_t>* dst) {
uint8_t header[] = { 0xFD, '7', 'z', 'X', 'Z', 0, 0, 1, 0x69, 0x22, 0xDE, 0x36 };
uint8_t footer[] = { 0, 1, 'Y', 'Z' };
dst->insert(dst->end(), header, header + sizeof(header));
@@ -138,6 +148,47 @@ void XzCompress(ArrayRef<uint8_t> src, std::vector<uint8_t>* dst) {
writer.UpdateUint32(0, CrcCalc(tmp.data() + 4, 6));
dst->insert(dst->end(), tmp.begin(), tmp.end());
}
+
+ // Decompress the data back and check that we get the original.
+ if (kIsDebugBuild) {
+ std::vector<uint8_t> decompressed;
+ XzDecompress(ArrayRef<const uint8_t>(*dst), &decompressed);
+ DCHECK_EQ(decompressed.size(), src.size());
+ DCHECK_EQ(memcmp(decompressed.data(), src.data(), src.size()), 0);
+ }
+}
+
+void XzDecompress(ArrayRef<const uint8_t> src, std::vector<uint8_t>* dst) {
+ XzInitCrc();
+ std::unique_ptr<CXzUnpacker> state(new CXzUnpacker());
+ ISzAlloc alloc;
+ alloc.Alloc = [](ISzAllocPtr, size_t size) { return malloc(size); };
+ alloc.Free = [](ISzAllocPtr, void* ptr) { return free(ptr); };
+ XzUnpacker_Construct(state.get(), &alloc);
+
+ size_t src_offset = 0;
+ size_t dst_offset = 0;
+ ECoderStatus status;
+ do {
+ dst->resize(RoundUp(dst_offset + kPageSize / 4, kPageSize));
+ size_t src_remaining = src.size() - src_offset;
+ size_t dst_remaining = dst->size() - dst_offset;
+ int return_val = XzUnpacker_Code(state.get(),
+ dst->data() + dst_offset,
+ &dst_remaining,
+ src.data() + src_offset,
+ &src_remaining,
+ true,
+ CODER_FINISH_ANY,
+ &status);
+ CHECK_EQ(return_val, SZ_OK);
+ src_offset += src_remaining;
+ dst_offset += dst_remaining;
+ } while (status == CODER_STATUS_NOT_FINISHED);
+ CHECK_EQ(src_offset, src.size());
+ CHECK(XzUnpacker_IsStreamWasFinished(state.get()));
+ XzUnpacker_Free(state.get());
+ dst->resize(dst_offset);
}
} // namespace debug
diff --git a/compiler/debug/xz_utils.h b/compiler/debug/xz_utils.h
index c4076c6581..731b03c7e1 100644
--- a/compiler/debug/xz_utils.h
+++ b/compiler/debug/xz_utils.h
@@ -24,7 +24,8 @@
namespace art {
namespace debug {
-void XzCompress(ArrayRef<uint8_t> src, std::vector<uint8_t>* dst);
+void XzCompress(ArrayRef<const uint8_t> src, std::vector<uint8_t>* dst);
+void XzDecompress(ArrayRef<const uint8_t> src, std::vector<uint8_t>* dst);
} // namespace debug
} // namespace art
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index ad9a30f8d4..23ce37ef1a 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -376,9 +376,7 @@ void DexToDexCompiler::CompilationState::CompileReturnVoid(Instruction* inst, ui
DCHECK_EQ(inst->Opcode(), Instruction::RETURN_VOID);
if (unit_.IsConstructor()) {
// Are we compiling a non clinit constructor which needs a barrier ?
- if (!unit_.IsStatic() &&
- driver_.RequiresConstructorBarrier(Thread::Current(), unit_.GetDexFile(),
- unit_.GetClassDefIndex())) {
+ if (!unit_.IsStatic() && unit_.RequiresConstructorBarrier()) {
return;
}
}
@@ -475,7 +473,7 @@ void DexToDexCompiler::CompilationState::CompileInvokeVirtual(Instruction* inst,
method_idx,
unit_.GetDexCache(),
unit_.GetClassLoader(),
- /* referrer */ nullptr,
+ /* referrer= */ nullptr,
kVirtual);
if (UNLIKELY(resolved_method == nullptr)) {
@@ -507,7 +505,7 @@ void DexToDexCompiler::CompilationState::CompileInvokeVirtual(Instruction* inst,
}
CompiledMethod* DexToDexCompiler::CompileMethod(
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type ATTRIBUTE_UNUSED,
uint16_t class_def_idx,
@@ -530,7 +528,7 @@ CompiledMethod* DexToDexCompiler::CompileMethod(
class_def_idx,
method_idx,
access_flags,
- driver_->GetVerifiedMethod(&dex_file, method_idx),
+ driver_->GetCompilerOptions().GetVerifiedMethod(&dex_file, method_idx),
hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file)));
std::vector<uint8_t> quicken_data;
@@ -616,7 +614,7 @@ CompiledMethod* DexToDexCompiler::CompileMethod(
instruction_set = InstructionSet::kArm;
}
CompiledMethod* ret = CompiledMethod::SwapAllocCompiledMethod(
- driver_,
+ driver_->GetCompiledMethodStorage(),
instruction_set,
ArrayRef<const uint8_t>(), // no code
ArrayRef<const uint8_t>(quicken_data), // vmap_table
@@ -629,11 +627,11 @@ CompiledMethod* DexToDexCompiler::CompileMethod(
void DexToDexCompiler::SetDexFiles(const std::vector<const DexFile*>& dex_files) {
// Record what code items are already seen to detect when multiple methods have the same code
// item.
- std::unordered_set<const DexFile::CodeItem*> seen_code_items;
+ std::unordered_set<const dex::CodeItem*> seen_code_items;
for (const DexFile* dex_file : dex_files) {
for (ClassAccessor accessor : dex_file->GetClasses()) {
for (const ClassAccessor::Method& method : accessor.GetMethods()) {
- const DexFile::CodeItem* code_item = method.GetCodeItem();
+ const dex::CodeItem* code_item = method.GetCodeItem();
// Detect the shared code items.
if (!seen_code_items.insert(code_item).second) {
shared_code_items_.insert(code_item);
@@ -648,7 +646,7 @@ void DexToDexCompiler::UnquickenConflictingMethods() {
MutexLock mu(Thread::Current(), lock_);
size_t unquicken_count = 0;
for (const auto& pair : shared_code_item_quicken_info_) {
- const DexFile::CodeItem* code_item = pair.first;
+ const dex::CodeItem* code_item = pair.first;
const QuickenState& state = pair.second;
CHECK_GE(state.methods_.size(), 1u);
if (state.conflict_) {
@@ -667,7 +665,8 @@ void DexToDexCompiler::UnquickenConflictingMethods() {
// There is up to one compiled method for each method ref. Releasing it leaves the
// deduped data intact, this means its safe to do even when other threads might be
// compiling.
- CompiledMethod::ReleaseSwapAllocatedCompiledMethod(driver_, method);
+ CompiledMethod::ReleaseSwapAllocatedCompiledMethod(driver_->GetCompiledMethodStorage(),
+ method);
}
}
}
diff --git a/compiler/dex/dex_to_dex_compiler.h b/compiler/dex/dex_to_dex_compiler.h
index 7536c3126a..78309ae7ac 100644
--- a/compiler/dex/dex_to_dex_compiler.h
+++ b/compiler/dex/dex_to_dex_compiler.h
@@ -22,7 +22,7 @@
#include <unordered_set>
#include "base/bit_vector.h"
-#include "dex/dex_file.h"
+#include "base/mutex.h"
#include "dex/invoke_type.h"
#include "dex/method_reference.h"
#include "handle.h"
@@ -33,6 +33,11 @@ namespace art {
class CompiledMethod;
class CompilerDriver;
class DexCompilationUnit;
+class DexFile;
+
+namespace dex {
+struct CodeItem;
+} // namespace dex
namespace mirror {
class ClassLoader;
@@ -49,7 +54,7 @@ class DexToDexCompiler {
explicit DexToDexCompiler(CompilerDriver* driver);
- CompiledMethod* CompileMethod(const DexFile::CodeItem* code_item,
+ CompiledMethod* CompileMethod(const dex::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
uint16_t class_def_idx,
@@ -104,9 +109,9 @@ class DexToDexCompiler {
std::unordered_map<const DexFile*, BitVector> should_quicken_;
// Guarded by lock_ during writing, accessed without a lock during quickening.
// This is safe because no thread is adding to the shared code items during the quickening phase.
- std::unordered_set<const DexFile::CodeItem*> shared_code_items_;
+ std::unordered_set<const dex::CodeItem*> shared_code_items_;
// Blacklisted code items are unquickened in UnquickenConflictingMethods.
- std::unordered_map<const DexFile::CodeItem*, QuickenState> shared_code_item_quicken_info_
+ std::unordered_map<const dex::CodeItem*, QuickenState> shared_code_item_quicken_info_
GUARDED_BY(lock_);
// Number of added code items.
size_t num_code_items_ GUARDED_BY(lock_) = 0u;
diff --git a/compiler/dex/dex_to_dex_decompiler_test.cc b/compiler/dex/dex_to_dex_decompiler_test.cc
index 4f83d605a3..1f04546e2d 100644
--- a/compiler/dex/dex_to_dex_decompiler_test.cc
+++ b/compiler/dex/dex_to_dex_decompiler_test.cc
@@ -39,16 +39,15 @@ namespace art {
class DexToDexDecompilerTest : public CommonCompilerTest {
public:
void CompileAll(jobject class_loader) REQUIRES(!Locks::mutator_lock_) {
- TimingLogger timings("CompilerDriverTest::CompileAll", false, false);
- TimingLogger::ScopedTiming t(__FUNCTION__, &timings);
- compiler_options_->boot_image_ = false;
+ TimingLogger timings("DexToDexDecompilerTest::CompileAll", false, false);
+ compiler_options_->image_type_ = CompilerOptions::ImageType::kNone;
compiler_options_->SetCompilerFilter(CompilerFilter::kQuicken);
// Create the main VerifierDeps, here instead of in the compiler since we want to aggregate
// the results for all the dex files, not just the results for the current dex file.
down_cast<QuickCompilerCallbacks*>(Runtime::Current()->GetCompilerCallbacks())->SetVerifierDeps(
new verifier::VerifierDeps(GetDexFiles(class_loader)));
- SetDexFilesForOatFile(GetDexFiles(class_loader));
- compiler_driver_->CompileAll(class_loader, GetDexFiles(class_loader), &timings);
+ std::vector<const DexFile*> dex_files = GetDexFiles(class_loader);
+ CommonCompilerTest::CompileAll(class_loader, dex_files, &timings);
}
void RunTest(const char* dex_name) {
@@ -96,7 +95,7 @@ class DexToDexDecompilerTest : public CommonCompilerTest {
optimizer::ArtDecompileDEX(*updated_dex_file,
*accessor.GetCodeItem(method),
table,
- /* decompile_return_instruction */ true);
+ /* decompile_return_instruction= */ true);
}
}
diff --git a/compiler/dex/inline_method_analyser.cc b/compiler/dex/inline_method_analyser.cc
index fe8b766d0f..b0f025d092 100644
--- a/compiler/dex/inline_method_analyser.cc
+++ b/compiler/dex/inline_method_analyser.cc
@@ -41,7 +41,7 @@ namespace { // anonymous namespace
class Matcher {
public:
// Match function type.
- typedef bool MatchFn(Matcher* matcher);
+ using MatchFn = bool(Matcher*);
template <size_t size>
static bool Match(const CodeItemDataAccessor* code_item, MatchFn* const (&pattern)[size]);
@@ -216,7 +216,7 @@ bool RecordConstructorIPut(ArtMethod* method,
DCHECK(IsInstructionIPut(new_iput->Opcode()));
uint32_t field_index = new_iput->VRegC_22c();
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- ArtField* field = class_linker->LookupResolvedField(field_index, method, /* is_static */ false);
+ ArtField* field = class_linker->LookupResolvedField(field_index, method, /* is_static= */ false);
if (UNLIKELY(field == nullptr)) {
return false;
}
@@ -228,7 +228,7 @@ bool RecordConstructorIPut(ArtMethod* method,
}
ArtField* f = class_linker->LookupResolvedField(iputs[old_pos].field_index,
method,
- /* is_static */ false);
+ /* is_static= */ false);
DCHECK(f != nullptr);
if (f == field) {
auto back_it = std::copy(iputs + old_pos + 1, iputs + arraysize(iputs), iputs + old_pos);
@@ -511,7 +511,7 @@ bool InlineMethodAnalyser::AnalyseMethodCode(const CodeItemDataAccessor* code_it
}
bool InlineMethodAnalyser::IsSyntheticAccessor(MethodReference ref) {
- const DexFile::MethodId& method_id = ref.dex_file->GetMethodId(ref.index);
+ const dex::MethodId& method_id = ref.dex_file->GetMethodId(ref.index);
const char* method_name = ref.dex_file->GetMethodName(method_id);
// javac names synthetic accessors "access$nnn",
// jack names them "-getN", "-putN", "-wrapN".
@@ -713,7 +713,7 @@ bool InlineMethodAnalyser::ComputeSpecialAccessorInfo(ArtMethod* method,
}
ObjPtr<mirror::DexCache> dex_cache = method->GetDexCache();
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- ArtField* field = class_linker->LookupResolvedField(field_idx, method, /* is_static */ false);
+ ArtField* field = class_linker->LookupResolvedField(field_idx, method, /* is_static= */ false);
if (field == nullptr || field->IsStatic()) {
return false;
}
diff --git a/compiler/dex/quick_compiler_callbacks.h b/compiler/dex/quick_compiler_callbacks.h
index b7117bd223..e92b67a0e8 100644
--- a/compiler/dex/quick_compiler_callbacks.h
+++ b/compiler/dex/quick_compiler_callbacks.h
@@ -38,11 +38,6 @@ class QuickCompilerCallbacks final : public CompilerCallbacks {
void ClassRejected(ClassReference ref) override;
- // We are running in an environment where we can call patchoat safely so we should.
- bool IsRelocationPossible() override {
- return true;
- }
-
verifier::VerifierDeps* GetVerifierDeps() const override {
return verifier_deps_.get();
}
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 1e0b94de81..6bd5fe8bd8 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -79,7 +79,7 @@ void VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method
if (inserted) {
// Successfully added, release the unique_ptr since we no longer have ownership.
DCHECK_EQ(GetVerifiedMethod(ref), verified_method.get());
- verified_method.release();
+ verified_method.release(); // NOLINT b/117926937
} else {
// TODO: Investigate why are we doing the work again for this method and try to avoid it.
LOG(WARNING) << "Method processed more than once: " << ref.PrettyMethod();
@@ -97,7 +97,7 @@ void VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method
}
}
-const VerifiedMethod* VerificationResults::GetVerifiedMethod(MethodReference ref) {
+const VerifiedMethod* VerificationResults::GetVerifiedMethod(MethodReference ref) const {
const VerifiedMethod* ret = nullptr;
if (atomic_verified_methods_.Get(ref, &ret)) {
return ret;
@@ -112,12 +112,12 @@ void VerificationResults::CreateVerifiedMethodFor(MethodReference ref) {
// which have no verifier error, nor has methods that we know will throw
// at runtime.
std::unique_ptr<VerifiedMethod> verified_method = std::make_unique<VerifiedMethod>(
- /* encountered_error_types */ 0, /* has_runtime_throw */ false);
+ /* encountered_error_types= */ 0, /* has_runtime_throw= */ false);
if (atomic_verified_methods_.Insert(ref,
/*expected*/ nullptr,
verified_method.get()) ==
AtomicMap::InsertResult::kInsertResultSuccess) {
- verified_method.release();
+ verified_method.release(); // NOLINT b/117926937
}
}
@@ -129,13 +129,13 @@ void VerificationResults::AddRejectedClass(ClassReference ref) {
DCHECK(IsClassRejected(ref));
}
-bool VerificationResults::IsClassRejected(ClassReference ref) {
+bool VerificationResults::IsClassRejected(ClassReference ref) const {
ReaderMutexLock mu(Thread::Current(), rejected_classes_lock_);
return (rejected_classes_.find(ref) != rejected_classes_.end());
}
bool VerificationResults::IsCandidateForCompilation(MethodReference&,
- const uint32_t access_flags) {
+ const uint32_t access_flags) const {
if (!compiler_options_->IsAotCompilationEnabled()) {
return false;
}
diff --git a/compiler/dex/verification_results.h b/compiler/dex/verification_results.h
index 56f00309c0..04c4fa65e6 100644
--- a/compiler/dex/verification_results.h
+++ b/compiler/dex/verification_results.h
@@ -51,13 +51,13 @@ class VerificationResults {
void CreateVerifiedMethodFor(MethodReference ref)
REQUIRES(!verified_methods_lock_);
- const VerifiedMethod* GetVerifiedMethod(MethodReference ref)
+ const VerifiedMethod* GetVerifiedMethod(MethodReference ref) const
REQUIRES(!verified_methods_lock_);
void AddRejectedClass(ClassReference ref) REQUIRES(!rejected_classes_lock_);
- bool IsClassRejected(ClassReference ref) REQUIRES(!rejected_classes_lock_);
+ bool IsClassRejected(ClassReference ref) const REQUIRES(!rejected_classes_lock_);
- bool IsCandidateForCompilation(MethodReference& method_ref, const uint32_t access_flags);
+ bool IsCandidateForCompilation(MethodReference& method_ref, const uint32_t access_flags) const;
// Add a dex file to enable using the atomic map.
void AddDexFile(const DexFile* dex_file) REQUIRES(!verified_methods_lock_);
@@ -74,10 +74,12 @@ class VerificationResults {
// GetVerifiedMethod.
AtomicMap atomic_verified_methods_;
- ReaderWriterMutex verified_methods_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ // TODO: External locking during CompilerDriver::PreCompile(), no locking during compilation.
+ mutable ReaderWriterMutex verified_methods_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
// Rejected classes.
- ReaderWriterMutex rejected_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ // TODO: External locking during CompilerDriver::PreCompile(), no locking during compilation.
+ mutable ReaderWriterMutex rejected_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::set<ClassReference> rejected_classes_ GUARDED_BY(rejected_classes_lock_);
friend class verifier::VerifierDepsTest;
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index f2da3ffc2f..54f216a64d 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -82,7 +82,7 @@ void VerifiedMethod::GenerateSafeCastSet(verifier::MethodVerifier* method_verifi
method_verifier->ResolveCheckedClass(dex::TypeIndex(inst.VRegB_21c()));
// Pass null for the method verifier to not record the VerifierDeps dependency
// if the types are not assignable.
- if (cast_type.IsStrictlyAssignableFrom(reg_type, /* method_verifier */ nullptr)) {
+ if (cast_type.IsStrictlyAssignableFrom(reg_type, /* verifier= */ nullptr)) {
// The types are assignable, we record that dependency in the VerifierDeps so
// that if this changes after OTA, we will re-verify again.
// We check if reg_type has a class, as the verifier may have inferred it's
@@ -92,8 +92,8 @@ void VerifiedMethod::GenerateSafeCastSet(verifier::MethodVerifier* method_verifi
verifier::VerifierDeps::MaybeRecordAssignability(method_verifier->GetDexFile(),
cast_type.GetClass(),
reg_type.GetClass(),
- /* strict */ true,
- /* assignable */ true);
+ /* is_strict= */ true,
+ /* is_assignable= */ true);
}
if (safe_cast_set_ == nullptr) {
safe_cast_set_.reset(new SafeCastSet());
diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc
index 5e2f444a24..05eacd848d 100644
--- a/compiler/driver/compiled_method_storage_test.cc
+++ b/compiler/driver/compiled_method_storage_test.cc
@@ -19,25 +19,13 @@
#include <gtest/gtest.h>
#include "compiled_method-inl.h"
-#include "compiler_driver.h"
-#include "compiler_options.h"
-#include "dex/verification_results.h"
namespace art {
TEST(CompiledMethodStorage, Deduplicate) {
- CompilerOptions compiler_options;
- VerificationResults verification_results(&compiler_options);
- CompilerDriver driver(&compiler_options,
- &verification_results,
- Compiler::kOptimizing,
- /* image_classes */ nullptr,
- /* thread_count */ 1u,
- /* swap_fd */ -1,
- /* profile_compilation_info */ nullptr);
- CompiledMethodStorage* storage = driver.GetCompiledMethodStorage();
+ CompiledMethodStorage storage(/* swap_fd= */ -1);
- ASSERT_TRUE(storage->DedupeEnabled()); // The default.
+ ASSERT_TRUE(storage.DedupeEnabled()); // The default.
const uint8_t raw_code1[] = { 1u, 2u, 3u };
const uint8_t raw_code2[] = { 4u, 3u, 2u, 1u };
@@ -77,7 +65,7 @@ TEST(CompiledMethodStorage, Deduplicate) {
for (auto&& f : cfi_info) {
for (auto&& p : patches) {
compiled_methods.push_back(CompiledMethod::SwapAllocCompiledMethod(
- &driver, InstructionSet::kNone, c, v, f, p));
+ &storage, InstructionSet::kNone, c, v, f, p));
}
}
}
@@ -106,7 +94,7 @@ TEST(CompiledMethodStorage, Deduplicate) {
}
}
for (CompiledMethod* method : compiled_methods) {
- CompiledMethod::ReleaseSwapAllocatedCompiledMethod(&driver, method);
+ CompiledMethod::ReleaseSwapAllocatedCompiledMethod(&storage, method);
}
}
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index 294072d7e7..ec2e38bc7f 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -57,7 +57,7 @@ inline ObjPtr<mirror::Class> CompilerDriver::ResolveCompilingMethodsClass(
const DexCompilationUnit* mUnit) {
DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile());
DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get());
- const DexFile::MethodId& referrer_method_id =
+ const dex::MethodId& referrer_method_id =
mUnit->GetDexFile()->GetMethodId(mUnit->GetDexMethodIndex());
return ResolveClass(soa, dex_cache, class_loader, referrer_method_id.class_idx_, mUnit);
}
@@ -99,30 +99,6 @@ inline std::pair<bool, bool> CompilerDriver::IsFastInstanceField(
return std::make_pair(fast_get, fast_put);
}
-inline ArtMethod* CompilerDriver::ResolveMethod(
- ScopedObjectAccess& soa,
- Handle<mirror::DexCache> dex_cache,
- Handle<mirror::ClassLoader> class_loader,
- const DexCompilationUnit* mUnit,
- uint32_t method_idx,
- InvokeType invoke_type) {
- DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get());
- ArtMethod* resolved_method =
- mUnit->GetClassLinker()->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
- method_idx, dex_cache, class_loader, /* referrer */ nullptr, invoke_type);
- if (UNLIKELY(resolved_method == nullptr)) {
- DCHECK(soa.Self()->IsExceptionPending());
- // Clean up any exception left by type resolution.
- soa.Self()->ClearException();
- }
- return resolved_method;
-}
-
-inline VerificationResults* CompilerDriver::GetVerificationResults() const {
- DCHECK(Runtime::Current()->IsAotCompiler());
- return verification_results_;
-}
-
} // namespace art
#endif // ART_COMPILER_DRIVER_COMPILER_DRIVER_INL_H_
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index f6afe2c958..d46cffb1e8 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -24,6 +24,7 @@
#include <malloc.h> // For mallinfo
#endif
+#include "android-base/logging.h"
#include "android-base/strings.h"
#include "art_field-inl.h"
@@ -111,19 +112,7 @@ static void DumpStat(size_t x, size_t y, const char* str) {
class CompilerDriver::AOTCompilationStats {
public:
AOTCompilationStats()
- : stats_lock_("AOT compilation statistics lock"),
- resolved_instance_fields_(0), unresolved_instance_fields_(0),
- resolved_local_static_fields_(0), resolved_static_fields_(0), unresolved_static_fields_(0),
- type_based_devirtualization_(0),
- safe_casts_(0), not_safe_casts_(0) {
- for (size_t i = 0; i <= kMaxInvokeType; i++) {
- resolved_methods_[i] = 0;
- unresolved_methods_[i] = 0;
- virtual_made_direct_[i] = 0;
- direct_calls_to_boot_[i] = 0;
- direct_methods_to_boot_[i] = 0;
- }
- }
+ : stats_lock_("AOT compilation statistics lock") {}
void Dump() {
DumpStat(resolved_instance_fields_, unresolved_instance_fields_, "instance fields resolved");
@@ -140,6 +129,16 @@ class CompilerDriver::AOTCompilationStats {
type_based_devirtualization_,
"virtual/interface calls made direct based on type information");
+ const size_t total = std::accumulate(
+ class_status_count_,
+ class_status_count_ + static_cast<size_t>(ClassStatus::kLast) + 1,
+ 0u);
+ for (size_t i = 0; i <= static_cast<size_t>(ClassStatus::kLast); ++i) {
+ std::ostringstream oss;
+ oss << "classes with status " << static_cast<ClassStatus>(i);
+ DumpStat(class_status_count_[i], total - class_status_count_[i], oss.str().c_str());
+ }
+
for (size_t i = 0; i <= kMaxInvokeType; i++) {
std::ostringstream oss;
oss << static_cast<InvokeType>(i) << " methods were AOT resolved";
@@ -218,61 +217,57 @@ class CompilerDriver::AOTCompilationStats {
not_safe_casts_++;
}
+ // Register a class status.
+ void AddClassStatus(ClassStatus status) REQUIRES(!stats_lock_) {
+ STATS_LOCK();
+ ++class_status_count_[static_cast<size_t>(status)];
+ }
+
private:
Mutex stats_lock_;
- size_t resolved_instance_fields_;
- size_t unresolved_instance_fields_;
+ size_t resolved_instance_fields_ = 0u;
+ size_t unresolved_instance_fields_ = 0u;
- size_t resolved_local_static_fields_;
- size_t resolved_static_fields_;
- size_t unresolved_static_fields_;
+ size_t resolved_local_static_fields_ = 0u;
+ size_t resolved_static_fields_ = 0u;
+ size_t unresolved_static_fields_ = 0u;
// Type based devirtualization for invoke interface and virtual.
- size_t type_based_devirtualization_;
+ size_t type_based_devirtualization_ = 0u;
- size_t resolved_methods_[kMaxInvokeType + 1];
- size_t unresolved_methods_[kMaxInvokeType + 1];
- size_t virtual_made_direct_[kMaxInvokeType + 1];
- size_t direct_calls_to_boot_[kMaxInvokeType + 1];
- size_t direct_methods_to_boot_[kMaxInvokeType + 1];
+ size_t resolved_methods_[kMaxInvokeType + 1] = {};
+ size_t unresolved_methods_[kMaxInvokeType + 1] = {};
+ size_t virtual_made_direct_[kMaxInvokeType + 1] = {};
+ size_t direct_calls_to_boot_[kMaxInvokeType + 1] = {};
+ size_t direct_methods_to_boot_[kMaxInvokeType + 1] = {};
- size_t safe_casts_;
- size_t not_safe_casts_;
+ size_t safe_casts_ = 0u;
+ size_t not_safe_casts_ = 0u;
+
+ size_t class_status_count_[static_cast<size_t>(ClassStatus::kLast) + 1] = {};
DISALLOW_COPY_AND_ASSIGN(AOTCompilationStats);
};
CompilerDriver::CompilerDriver(
const CompilerOptions* compiler_options,
- VerificationResults* verification_results,
Compiler::Kind compiler_kind,
- HashSet<std::string>* image_classes,
size_t thread_count,
- int swap_fd,
- const ProfileCompilationInfo* profile_compilation_info)
+ int swap_fd)
: compiler_options_(compiler_options),
- verification_results_(verification_results),
compiler_(Compiler::Create(this, compiler_kind)),
compiler_kind_(compiler_kind),
- requires_constructor_barrier_lock_("constructor barrier lock"),
- image_classes_(std::move(image_classes)),
number_of_soft_verifier_failures_(0),
had_hard_verifier_failure_(false),
parallel_thread_count_(thread_count),
stats_(new AOTCompilationStats),
- compiler_context_(nullptr),
compiled_method_storage_(swap_fd),
- profile_compilation_info_(profile_compilation_info),
max_arena_alloc_(0),
dex_to_dex_compiler_(this) {
DCHECK(compiler_options_ != nullptr);
compiler_->Init();
- if (GetCompilerOptions().IsBootImage()) {
- CHECK(image_classes_ != nullptr) << "Expected image classes for boot image";
- }
-
compiled_method_storage_.SetDedupeEnabled(compiler_options_->DeduplicateCode());
}
@@ -280,7 +275,7 @@ CompilerDriver::~CompilerDriver() {
compiled_methods_.Visit([this](const DexFileReference& ref ATTRIBUTE_UNUSED,
CompiledMethod* method) {
if (method != nullptr) {
- CompiledMethod::ReleaseSwapAllocatedCompiledMethod(this, method);
+ CompiledMethod::ReleaseSwapAllocatedCompiledMethod(GetCompiledMethodStorage(), method);
}
});
compiler_->UnInit();
@@ -328,9 +323,8 @@ void CompilerDriver::CompileAll(jobject class_loader,
TimingLogger* timings) {
DCHECK(!Runtime::Current()->IsStarted());
- InitializeThreadPools();
+ CheckThreadPools();
- PreCompile(class_loader, dex_files, timings);
if (GetCompilerOptions().IsBootImage()) {
// We don't need to setup the intrinsics for non boot image compilation, as
// those compilations will pick up a boot image that have the ArtMethod already
@@ -346,13 +340,11 @@ void CompilerDriver::CompileAll(jobject class_loader,
if (GetCompilerOptions().GetDumpStats()) {
stats_->Dump();
}
-
- FreeThreadPools();
}
static optimizer::DexToDexCompiler::CompilationLevel GetDexToDexCompilationLevel(
Thread* self, const CompilerDriver& driver, Handle<mirror::ClassLoader> class_loader,
- const DexFile& dex_file, const DexFile::ClassDef& class_def)
+ const DexFile& dex_file, const dex::ClassDef& class_def)
REQUIRES_SHARED(Locks::mutator_lock_) {
// When the dex file is uncompressed in the APK, we do not generate a copy in the .vdex
// file. As a result, dex2oat will map the dex file read-only, and we only need to check
@@ -397,7 +389,7 @@ static optimizer::DexToDexCompiler::CompilationLevel GetDexToDexCompilationLevel
const CompilerDriver& driver,
jobject jclass_loader,
const DexFile& dex_file,
- const DexFile::ClassDef& class_def) {
+ const dex::ClassDef& class_def) {
ScopedObjectAccess soa(self);
StackHandleScope<1> hs(soa.Self());
Handle<mirror::ClassLoader> class_loader(
@@ -424,7 +416,7 @@ template <typename CompileFn>
static void CompileMethodHarness(
Thread* self,
CompilerDriver* driver,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
uint16_t class_def_idx,
@@ -432,7 +424,6 @@ static void CompileMethodHarness(
Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level,
- bool compilation_enabled,
Handle<mirror::DexCache> dex_cache,
CompileFn compile_fn) {
DCHECK(driver != nullptr);
@@ -450,7 +441,6 @@ static void CompileMethodHarness(
class_loader,
dex_file,
dex_to_dex_compilation_level,
- compilation_enabled,
dex_cache);
if (kTimeCompileMethod) {
@@ -475,7 +465,7 @@ static void CompileMethodHarness(
static void CompileMethodDex2Dex(
Thread* self,
CompilerDriver* driver,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
uint16_t class_def_idx,
@@ -483,11 +473,10 @@ static void CompileMethodDex2Dex(
Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level,
- bool compilation_enabled,
Handle<mirror::DexCache> dex_cache) {
auto dex_2_dex_fn = [](Thread* self ATTRIBUTE_UNUSED,
CompilerDriver* driver,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
uint16_t class_def_idx,
@@ -495,7 +484,6 @@ static void CompileMethodDex2Dex(
Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level,
- bool compilation_enabled ATTRIBUTE_UNUSED,
Handle<mirror::DexCache> dex_cache ATTRIBUTE_UNUSED) -> CompiledMethod* {
DCHECK(driver != nullptr);
MethodReference method_ref(&dex_file, method_idx);
@@ -503,7 +491,7 @@ static void CompileMethodDex2Dex(
optimizer::DexToDexCompiler* const compiler = &driver->GetDexToDexCompiler();
if (compiler->ShouldCompileMethod(method_ref)) {
- VerificationResults* results = driver->GetVerificationResults();
+ const VerificationResults* results = driver->GetCompilerOptions().GetVerificationResults();
DCHECK(results != nullptr);
const VerifiedMethod* verified_method = results->GetVerifiedMethod(method_ref);
// Do not optimize if a VerifiedMethod is missing. SafeCast elision,
@@ -532,7 +520,6 @@ static void CompileMethodDex2Dex(
class_loader,
dex_file,
dex_to_dex_compilation_level,
- compilation_enabled,
dex_cache,
dex_2_dex_fn);
}
@@ -540,7 +527,7 @@ static void CompileMethodDex2Dex(
static void CompileMethodQuick(
Thread* self,
CompilerDriver* driver,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
uint16_t class_def_idx,
@@ -548,12 +535,11 @@ static void CompileMethodQuick(
Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level,
- bool compilation_enabled,
Handle<mirror::DexCache> dex_cache) {
auto quick_fn = [](
Thread* self,
CompilerDriver* driver,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
uint16_t class_def_idx,
@@ -561,7 +547,6 @@ static void CompileMethodQuick(
Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level,
- bool compilation_enabled,
Handle<mirror::DexCache> dex_cache) {
DCHECK(driver != nullptr);
CompiledMethod* compiled_method = nullptr;
@@ -584,10 +569,10 @@ static void CompileMethodQuick(
} else if ((access_flags & kAccAbstract) != 0) {
// Abstract methods don't have code.
} else {
- VerificationResults* results = driver->GetVerificationResults();
+ const VerificationResults* results = driver->GetCompilerOptions().GetVerificationResults();
DCHECK(results != nullptr);
const VerifiedMethod* verified_method = results->GetVerifiedMethod(method_ref);
- bool compile = compilation_enabled &&
+ bool compile =
// Basic checks, e.g., not <clinit>.
results->IsCandidateForCompilation(method_ref, access_flags) &&
// Did not fail to create VerifiedMethod metadata.
@@ -609,6 +594,29 @@ static void CompileMethodQuick(
class_loader,
dex_file,
dex_cache);
+ ProfileMethodsCheck check_type =
+ driver->GetCompilerOptions().CheckProfiledMethodsCompiled();
+ if (UNLIKELY(check_type != ProfileMethodsCheck::kNone)) {
+ bool violation = driver->ShouldCompileBasedOnProfile(method_ref) &&
+ (compiled_method == nullptr);
+ if (violation) {
+ std::ostringstream oss;
+ oss << "Failed to compile "
+ << method_ref.dex_file->PrettyMethod(method_ref.index)
+ << "[" << method_ref.dex_file->GetLocation() << "]"
+ << " as expected by profile";
+ switch (check_type) {
+ case ProfileMethodsCheck::kNone:
+ break;
+ case ProfileMethodsCheck::kLog:
+ LOG(ERROR) << oss.str();
+ break;
+ case ProfileMethodsCheck::kAbort:
+ LOG(FATAL_WITHOUT_ABORT) << oss.str();
+ _exit(1);
+ }
+ }
+ }
}
if (compiled_method == nullptr &&
dex_to_dex_compilation_level !=
@@ -630,7 +638,6 @@ static void CompileMethodQuick(
class_loader,
dex_file,
dex_to_dex_compilation_level,
- compilation_enabled,
dex_cache,
quick_fn);
}
@@ -643,7 +650,7 @@ void CompilerDriver::CompileOne(Thread* self,
uint32_t method_idx,
uint32_t access_flags,
InvokeType invoke_type,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> h_class_loader) {
// Can we run DEX-to-DEX compiler on this class ?
@@ -664,7 +671,6 @@ void CompilerDriver::CompileOne(Thread* self,
h_class_loader,
dex_file,
dex_to_dex_compilation_level,
- true,
dex_cache);
const size_t num_methods = dex_to_dex_compiler_.NumCodeItemsToQuicken(self);
@@ -680,7 +686,6 @@ void CompilerDriver::CompileOne(Thread* self,
h_class_loader,
dex_file,
dex_to_dex_compilation_level,
- true,
dex_cache);
dex_to_dex_compiler_.ClearState();
}
@@ -708,25 +713,44 @@ void CompilerDriver::Resolve(jobject class_loader,
}
}
-static void ResolveConstStrings(CompilerDriver* driver,
- const std::vector<const DexFile*>& dex_files,
- TimingLogger* timings) {
+void CompilerDriver::ResolveConstStrings(const std::vector<const DexFile*>& dex_files,
+ bool only_startup_strings,
+ TimingLogger* timings) {
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<1> hs(soa.Self());
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
MutableHandle<mirror::DexCache> dex_cache(hs.NewHandle<mirror::DexCache>(nullptr));
+ size_t num_instructions = 0u;
for (const DexFile* dex_file : dex_files) {
dex_cache.Assign(class_linker->FindDexCache(soa.Self(), *dex_file));
+ if (only_startup_strings) {
+ // When resolving startup strings, create the preresolved strings array.
+ dex_cache->AddPreResolvedStringsArray();
+ }
TimingLogger::ScopedTiming t("Resolve const-string Strings", timings);
+ // TODO: Implement a profile-based filter for the boot image. See b/76145463.
for (ClassAccessor accessor : dex_file->GetClasses()) {
- if (!driver->IsClassToCompile(accessor.GetDescriptor())) {
- // Compilation is skipped, do not resolve const-string in code of this class.
- // FIXME: Make sure that inlining honors this. b/26687569
- continue;
- }
+ const ProfileCompilationInfo* profile_compilation_info =
+ GetCompilerOptions().GetProfileCompilationInfo();
+
+ const bool is_startup_class =
+ profile_compilation_info != nullptr &&
+ profile_compilation_info->ContainsClass(*dex_file, accessor.GetClassIdx());
+
for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+ const bool is_clinit = (method.GetAccessFlags() & kAccConstructor) != 0 &&
+ (method.GetAccessFlags() & kAccStatic) != 0;
+ const bool is_startup_clinit = is_startup_class && is_clinit;
+
+ if (only_startup_strings &&
+ profile_compilation_info != nullptr &&
+ (!profile_compilation_info->GetMethodHotness(method.GetReference()).IsStartup() &&
+ !is_startup_clinit)) {
+ continue;
+ }
+
// Resolve const-strings in the code. Done to have deterministic allocation behavior. Right
// now this is single-threaded for simplicity.
// TODO: Collect the relevant string indices in parallel, then allocate them sequentially
@@ -740,6 +764,11 @@ static void ResolveConstStrings(CompilerDriver* driver,
: inst->VRegB_31c());
ObjPtr<mirror::String> string = class_linker->ResolveString(string_index, dex_cache);
CHECK(string != nullptr) << "Could not allocate a string when forcing determinism";
+ if (only_startup_strings) {
+ dex_cache->GetPreResolvedStrings()[string_index.index_] =
+ GcRoot<mirror::String>(string);
+ }
+ ++num_instructions;
break;
}
@@ -750,6 +779,7 @@ static void ResolveConstStrings(CompilerDriver* driver,
}
}
}
+ VLOG(compiler) << "Resolved " << num_instructions << " const string instructions";
}
// Initialize type check bit strings for check-cast and instance-of in the code. Done to have
@@ -778,7 +808,7 @@ static void InitializeTypeCheckBitstrings(CompilerDriver* driver,
ObjPtr<mirror::Class> klass =
class_linker->LookupResolvedType(type_index,
dex_cache.Get(),
- /* class_loader */ nullptr);
+ /* class_loader= */ nullptr);
CHECK(klass != nullptr) << descriptor << " should have been previously resolved.";
// Now assign the bitstring if the class is not final. Keep this in sync with sharpening.
if (!klass->IsFinal()) {
@@ -808,12 +838,6 @@ static void InitializeTypeCheckBitstrings(CompilerDriver* driver,
TimingLogger::ScopedTiming t("Initialize type check bitstrings", timings);
for (ClassAccessor accessor : dex_file->GetClasses()) {
- if (!driver->IsClassToCompile(accessor.GetDescriptor())) {
- // Compilation is skipped, do not look for type checks in code of this class.
- // FIXME: Make sure that inlining honors this. b/26687569
- continue;
- }
-
// Direct and virtual methods.
for (const ClassAccessor::Method& method : accessor.GetMethods()) {
InitializeTypeCheckBitstrings(driver, class_linker, dex_cache, *dex_file, method);
@@ -852,7 +876,9 @@ static void EnsureVerifiedOrVerifyAtRuntime(jobject jclass_loader,
void CompilerDriver::PreCompile(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
- TimingLogger* timings) {
+ TimingLogger* timings,
+ /*inout*/ HashSet<std::string>* image_classes,
+ /*out*/ VerificationResults* verification_results) {
CheckThreadPools();
VLOG(compiler) << "Before precompile " << GetMemoryUsageString(false);
@@ -869,7 +895,7 @@ void CompilerDriver::PreCompile(jobject class_loader,
// 6) Update the set of image classes.
// 7) For deterministic boot image, initialize bitstrings for type checking.
- LoadImageClasses(timings);
+ LoadImageClasses(timings, image_classes);
VLOG(compiler) << "LoadImageClasses: " << GetMemoryUsageString(false);
if (compiler_options_->IsAnyCompilationEnabled()) {
@@ -897,11 +923,13 @@ void CompilerDriver::PreCompile(jobject class_loader,
if (GetCompilerOptions().IsForceDeterminism() && GetCompilerOptions().IsBootImage()) {
// Resolve strings from const-string. Do this now to have a deterministic image.
- ResolveConstStrings(this, dex_files, timings);
+ ResolveConstStrings(dex_files, /*only_startup_strings=*/ false, timings);
VLOG(compiler) << "Resolve const-strings: " << GetMemoryUsageString(false);
+ } else if (GetCompilerOptions().ResolveStartupConstStrings()) {
+ ResolveConstStrings(dex_files, /*only_startup_strings=*/ true, timings);
}
- Verify(class_loader, dex_files, timings);
+ Verify(class_loader, dex_files, timings, verification_results);
VLOG(compiler) << "Verify: " << GetMemoryUsageString(false);
if (had_hard_verifier_failure_ && GetCompilerOptions().AbortOnHardVerifierFailure()) {
@@ -926,7 +954,7 @@ void CompilerDriver::PreCompile(jobject class_loader,
VLOG(compiler) << "InitializeClasses: " << GetMemoryUsageString(false);
}
- UpdateImageClasses(timings);
+ UpdateImageClasses(timings, image_classes);
VLOG(compiler) << "UpdateImageClasses: " << GetMemoryUsageString(false);
if (kBitstringSubtypeCheckEnabled &&
@@ -938,13 +966,6 @@ void CompilerDriver::PreCompile(jobject class_loader,
}
}
-bool CompilerDriver::IsClassToCompile(const char* descriptor) const {
- if (classes_to_compile_ == nullptr) {
- return true;
- }
- return classes_to_compile_->find(StringPiece(descriptor)) != classes_to_compile_->end();
-}
-
bool CompilerDriver::ShouldCompileBasedOnProfile(const MethodReference& method_ref) const {
// Profile compilation info may be null if no profile is passed.
if (!CompilerFilter::DependsOnProfile(compiler_options_->GetCompilerFilter())) {
@@ -953,12 +974,14 @@ bool CompilerDriver::ShouldCompileBasedOnProfile(const MethodReference& method_r
return true;
}
// If we are using a profile filter but do not have a profile compilation info, compile nothing.
- if (profile_compilation_info_ == nullptr) {
+ const ProfileCompilationInfo* profile_compilation_info =
+ GetCompilerOptions().GetProfileCompilationInfo();
+ if (profile_compilation_info == nullptr) {
return false;
}
// Compile only hot methods, it is the profile saver's job to decide what startup methods to mark
// as hot.
- bool result = profile_compilation_info_->GetMethodHotness(method_ref).IsHot();
+ bool result = profile_compilation_info->GetMethodHotness(method_ref).IsHot();
if (kDebugProfileGuidedCompilation) {
LOG(INFO) << "[ProfileGuidedCompilation] "
@@ -1045,7 +1068,8 @@ class RecordImageClassesVisitor : public ClassVisitor {
};
// Make a list of descriptors for classes to include in the image
-void CompilerDriver::LoadImageClasses(TimingLogger* timings) {
+void CompilerDriver::LoadImageClasses(TimingLogger* timings,
+ /*inout*/ HashSet<std::string>* image_classes) {
CHECK(timings != nullptr);
if (!GetCompilerOptions().IsBootImage()) {
return;
@@ -1056,15 +1080,15 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- CHECK(image_classes_ != nullptr);
- for (auto it = image_classes_->begin(), end = image_classes_->end(); it != end;) {
+ CHECK(image_classes != nullptr);
+ for (auto it = image_classes->begin(), end = image_classes->end(); it != end;) {
const std::string& descriptor(*it);
StackHandleScope<1> hs(self);
Handle<mirror::Class> klass(
hs.NewHandle(class_linker->FindSystemClass(self, descriptor.c_str())));
if (klass == nullptr) {
VLOG(compiler) << "Failed to find class " << descriptor;
- it = image_classes_->erase(it);
+ it = image_classes->erase(it);
self->ClearException();
} else {
++it;
@@ -1101,7 +1125,7 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings) {
ScopedNullHandle<mirror::ClassLoader>())
: nullptr;
if (klass == nullptr) {
- const DexFile::TypeId& type_id = dex_file->GetTypeId(exception_type_idx);
+ const dex::TypeId& type_id = dex_file->GetTypeId(exception_type_idx);
const char* descriptor = dex_file->GetTypeDescriptor(type_id);
LOG(FATAL) << "Failed to resolve class " << descriptor;
}
@@ -1114,10 +1138,10 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings) {
// We walk the roots looking for classes so that we'll pick up the
// above classes plus any classes them depend on such super
// classes, interfaces, and the required ClassLinker roots.
- RecordImageClassesVisitor visitor(image_classes_);
+ RecordImageClassesVisitor visitor(image_classes);
class_linker->VisitClasses(&visitor);
- CHECK(!image_classes_->empty());
+ CHECK(!image_classes->empty());
}
static void MaybeAddToImageClasses(Thread* self,
@@ -1146,7 +1170,7 @@ static void MaybeAddToImageClasses(Thread* self,
if (klass->IsArrayClass()) {
MaybeAddToImageClasses(self, klass->GetComponentType(), image_classes);
}
- klass.Assign(klass->GetSuperClass());
+ klass = klass->GetSuperClass();
}
}
@@ -1173,7 +1197,7 @@ class ClinitImageUpdate {
// Visitor for VisitReferences.
void operator()(ObjPtr<mirror::Object> object,
MemberOffset field_offset,
- bool /* is_static */) const
+ bool is_static ATTRIBUTE_UNUSED) const
REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* ref = object->GetFieldObject<mirror::Object>(field_offset);
if (ref != nullptr) {
@@ -1213,8 +1237,15 @@ class ClinitImageUpdate {
bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
std::string temp;
StringPiece name(klass->GetDescriptor(&temp));
- if (data_->image_class_descriptors_->find(name) != data_->image_class_descriptors_->end()) {
- data_->image_classes_.push_back(hs_.NewHandle(klass));
+ auto it = data_->image_class_descriptors_->find(name);
+ if (it != data_->image_class_descriptors_->end()) {
+ if (LIKELY(klass->IsResolved())) {
+ data_->image_classes_.push_back(hs_.NewHandle(klass));
+ } else {
+ DCHECK(klass->IsErroneousUnresolved());
+ VLOG(compiler) << "Removing unresolved class from image classes: " << name;
+ data_->image_class_descriptors_->erase(it);
+ }
} else {
// Check whether it is initialized and has a clinit. They must be kept, too.
if (klass->IsInitialized() && klass->FindClassInitializer(
@@ -1286,7 +1317,8 @@ class ClinitImageUpdate {
DISALLOW_COPY_AND_ASSIGN(ClinitImageUpdate);
};
-void CompilerDriver::UpdateImageClasses(TimingLogger* timings) {
+void CompilerDriver::UpdateImageClasses(TimingLogger* timings,
+ /*inout*/ HashSet<std::string>* image_classes) {
if (GetCompilerOptions().IsBootImage()) {
TimingLogger::ScopedTiming t("UpdateImageClasses", timings);
@@ -1298,7 +1330,7 @@ void CompilerDriver::UpdateImageClasses(TimingLogger* timings) {
VariableSizedHandleScope hs(Thread::Current());
std::string error_msg;
std::unique_ptr<ClinitImageUpdate> update(ClinitImageUpdate::Create(hs,
- image_classes_,
+ image_classes,
Thread::Current(),
runtime->GetClassLinker()));
@@ -1335,7 +1367,7 @@ ArtField* CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx,
Handle<mirror::DexCache> dex_cache(mUnit->GetDexCache());
{
Handle<mirror::ClassLoader> class_loader = mUnit->GetClassLoader();
- resolved_field = ResolveField(soa, dex_cache, class_loader, field_idx, /* is_static */ false);
+ resolved_field = ResolveField(soa, dex_cache, class_loader, field_idx, /* is_static= */ false);
referrer_class = resolved_field != nullptr
? ResolveCompilingMethodsClass(soa, dex_cache, class_loader, mUnit) : nullptr;
}
@@ -1367,12 +1399,6 @@ bool CompilerDriver::ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompi
}
}
-const VerifiedMethod* CompilerDriver::GetVerifiedMethod(const DexFile* dex_file,
- uint32_t method_idx) const {
- MethodReference ref(dex_file, method_idx);
- return verification_results_->GetVerifiedMethod(ref);
-}
-
bool CompilerDriver::IsSafeCast(const DexCompilationUnit* mUnit, uint32_t dex_pc) {
if (!compiler_options_->IsVerificationEnabled()) {
// If we didn't verify, every cast has to be treated as non-safe.
@@ -1551,18 +1577,6 @@ static void CheckAndClearResolveException(Thread* self)
self->ClearException();
}
-bool CompilerDriver::RequiresConstructorBarrier(const DexFile& dex_file,
- uint16_t class_def_idx) const {
- ClassAccessor accessor(dex_file, class_def_idx);
- // We require a constructor barrier if there are final instance fields.
- for (const ClassAccessor::Field& field : accessor.GetInstanceFields()) {
- if (field.IsFinal()) {
- return true;
- }
- }
- return false;
-}
-
class ResolveClassFieldsAndMethodsVisitor : public CompilationVisitor {
public:
explicit ResolveClassFieldsAndMethodsVisitor(const ParallelCompilationManager* manager)
@@ -1582,7 +1596,7 @@ class ResolveClassFieldsAndMethodsVisitor : public CompilationVisitor {
// needs it, here we try to resolve fields and methods used in class
// definitions, since many of them many never be referenced by
// generated code.
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+ const dex::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
ScopedObjectAccess soa(self);
StackHandleScope<2> hs(soa.Self());
Handle<mirror::ClassLoader> class_loader(
@@ -1606,57 +1620,42 @@ class ResolveClassFieldsAndMethodsVisitor : public CompilationVisitor {
// We want to resolve the methods and fields eagerly.
resolve_fields_and_methods = true;
}
- // If an instance field is final then we need to have a barrier on the return, static final
- // fields are assigned within the lock held for class initialization.
- bool requires_constructor_barrier = false;
- ClassAccessor accessor(dex_file, class_def_index);
- // Optionally resolve fields and methods and figure out if we need a constructor barrier.
- auto method_visitor = [&](const ClassAccessor::Method& method)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- if (resolve_fields_and_methods) {
+ if (resolve_fields_and_methods) {
+ ClassAccessor accessor(dex_file, class_def_index);
+ // Optionally resolve fields and methods and figure out if we need a constructor barrier.
+ auto method_visitor = [&](const ClassAccessor::Method& method)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* resolved = class_linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
method.GetIndex(),
dex_cache,
class_loader,
- /* referrer */ nullptr,
+ /*referrer=*/ nullptr,
method.GetInvokeType(class_def.access_flags_));
if (resolved == nullptr) {
CheckAndClearResolveException(soa.Self());
}
- }
- };
- accessor.VisitFieldsAndMethods(
- // static fields
- [&](ClassAccessor::Field& field) REQUIRES_SHARED(Locks::mutator_lock_) {
- if (resolve_fields_and_methods) {
+ };
+ accessor.VisitFieldsAndMethods(
+ // static fields
+ [&](ClassAccessor::Field& field) REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* resolved = class_linker->ResolveField(
- field.GetIndex(), dex_cache, class_loader, /* is_static */ true);
+ field.GetIndex(), dex_cache, class_loader, /*is_static=*/ true);
if (resolved == nullptr) {
CheckAndClearResolveException(soa.Self());
}
- }
- },
- // instance fields
- [&](ClassAccessor::Field& field) REQUIRES_SHARED(Locks::mutator_lock_) {
- if (field.IsFinal()) {
- // We require a constructor barrier if there are final instance fields.
- requires_constructor_barrier = true;
- }
- if (resolve_fields_and_methods) {
+ },
+ // instance fields
+ [&](ClassAccessor::Field& field) REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* resolved = class_linker->ResolveField(
- field.GetIndex(), dex_cache, class_loader, /* is_static */ false);
+ field.GetIndex(), dex_cache, class_loader, /*is_static=*/ false);
if (resolved == nullptr) {
CheckAndClearResolveException(soa.Self());
}
- }
- },
- /*direct methods*/ method_visitor,
- /*virtual methods*/ method_visitor);
- manager_->GetCompiler()->SetRequiresConstructorBarrier(self,
- &dex_file,
- class_def_index,
- requires_constructor_barrier);
+ },
+ /*direct_method_visitor=*/ method_visitor,
+ /*virtual_method_visitor=*/ method_visitor);
+ }
}
private:
@@ -1756,6 +1755,9 @@ static void LoadAndUpdateStatus(const ClassAccessor& accessor,
if (&cls->GetDexFile() == &accessor.GetDexFile()) {
ObjectLock<mirror::Class> lock(self, cls);
mirror::Class::SetStatus(cls, status, self);
+ if (status >= ClassStatus::kVerified) {
+ cls->SetVerificationAttempted();
+ }
}
} else {
DCHECK(self->IsExceptionPending());
@@ -1763,9 +1765,46 @@ static void LoadAndUpdateStatus(const ClassAccessor& accessor,
}
}
+// Returns true if any of the given dex files define a class from the boot classpath.
+static bool DexFilesRedefineBootClasses(
+ const std::vector<const DexFile*>& dex_files,
+ TimingLogger* timings) {
+ TimingLogger::ScopedTiming t("Fast Verify: Boot Class Redefinition Check", timings);
+
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+
+ bool foundRedefinition = false;
+ for (const DexFile* dex_file : dex_files) {
+ for (ClassAccessor accessor : dex_file->GetClasses()) {
+ const char* descriptor = accessor.GetDescriptor();
+ StackHandleScope<1> hs_class(self);
+ Handle<mirror::Class> klass =
+ hs_class.NewHandle(class_linker->FindSystemClass(self, descriptor));
+ if (klass == nullptr) {
+ self->ClearException();
+ } else {
+ LOG(WARNING) << "Redefinition of boot class " << descriptor
+ << " App dex file: " << accessor.GetDexFile().GetLocation()
+ << " Boot dex file: " << klass->GetDexFile().GetLocation();
+ foundRedefinition = true;
+ if (!VLOG_IS_ON(verifier)) {
+ // If we are not in verbose mode, return early.
+ // Otherwise continue and log all the collisions for easier debugging.
+ return true;
+ }
+ }
+ }
+ }
+
+ return foundRedefinition;
+}
+
bool CompilerDriver::FastVerify(jobject jclass_loader,
const std::vector<const DexFile*>& dex_files,
- TimingLogger* timings) {
+ TimingLogger* timings,
+ /*out*/ VerificationResults* verification_results) {
verifier::VerifierDeps* verifier_deps =
Runtime::Current()->GetCompilerCallbacks()->GetVerifierDeps();
// If there exist VerifierDeps that aren't the ones we just created to output, use them to verify.
@@ -1773,6 +1812,17 @@ bool CompilerDriver::FastVerify(jobject jclass_loader,
return false;
}
TimingLogger::ScopedTiming t("Fast Verify", timings);
+
+ // We cannot do fast verification if the app redefines classes from the boot classpath.
+ // Vdex does not record resolution chains for boot classes and we might wrongfully
+ // resolve a class to the app when it should have been resolved to the boot classpath
+ // (e.g. if we verified against the SDK and the app redefines a boot class which is not
+ // in the SDK.)
+ if (DexFilesRedefineBootClasses(dex_files, timings)) {
+ LOG(WARNING) << "Found redefinition of boot classes. Not doing fast verification.";
+ return false;
+ }
+
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<2> hs(soa.Self());
Handle<mirror::ClassLoader> class_loader(
@@ -1813,7 +1863,7 @@ bool CompilerDriver::FastVerify(jobject jclass_loader,
// - Quickening will not do checkcast ellision.
// TODO(ngeoffray): Reconsider this once we refactor compiler filters.
for (const ClassAccessor::Method& method : accessor.GetMethods()) {
- verification_results_->CreateVerifiedMethodFor(method.GetReference());
+ verification_results->CreateVerifiedMethodFor(method.GetReference());
}
}
} else if (!compiler_only_verifies) {
@@ -1831,8 +1881,9 @@ bool CompilerDriver::FastVerify(jobject jclass_loader,
void CompilerDriver::Verify(jobject jclass_loader,
const std::vector<const DexFile*>& dex_files,
- TimingLogger* timings) {
- if (FastVerify(jclass_loader, dex_files, timings)) {
+ TimingLogger* timings,
+ /*out*/ VerificationResults* verification_results) {
+ if (FastVerify(jclass_loader, dex_files, timings, verification_results)) {
return;
}
@@ -1894,7 +1945,7 @@ class VerifyClassVisitor : public CompilationVisitor {
ScopedTrace trace(__FUNCTION__);
ScopedObjectAccess soa(Thread::Current());
const DexFile& dex_file = *manager_->GetDexFile();
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+ const dex::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
const char* descriptor = dex_file.GetClassDescriptor(class_def);
ClassLinker* class_linker = manager_->GetClassLinker();
jobject jclass_loader = manager_->GetClassLoader();
@@ -2028,7 +2079,7 @@ class SetVerifiedClassVisitor : public CompilationVisitor {
ScopedTrace trace(__FUNCTION__);
ScopedObjectAccess soa(Thread::Current());
const DexFile& dex_file = *manager_->GetDexFile();
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+ const dex::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
const char* descriptor = dex_file.GetClassDescriptor(class_def);
ClassLinker* class_linker = manager_->GetClassLinker();
jobject jclass_loader = manager_->GetClassLoader();
@@ -2093,8 +2144,8 @@ class InitializeClassVisitor : public CompilationVisitor {
ScopedTrace trace(__FUNCTION__);
jobject jclass_loader = manager_->GetClassLoader();
const DexFile& dex_file = *manager_->GetDexFile();
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
- const DexFile::TypeId& class_type_id = dex_file.GetTypeId(class_def.class_idx_);
+ const dex::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+ const dex::TypeId& class_type_id = dex_file.GetTypeId(class_def.class_idx_);
const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_);
ScopedObjectAccess soa(Thread::Current());
@@ -2104,8 +2155,11 @@ class InitializeClassVisitor : public CompilationVisitor {
Handle<mirror::Class> klass(
hs.NewHandle(manager_->GetClassLinker()->FindClass(soa.Self(), descriptor, class_loader)));
- if (klass != nullptr && !SkipClass(manager_->GetClassLoader(), dex_file, klass.Get())) {
- TryInitializeClass(klass, class_loader);
+ if (klass != nullptr) {
+ if (!SkipClass(manager_->GetClassLoader(), dex_file, klass.Get())) {
+ TryInitializeClass(klass, class_loader);
+ }
+ manager_->GetCompiler()->stats_->AddClassStatus(klass->GetStatus());
}
// Clear any class not found or verification exceptions.
soa.Self()->ClearException();
@@ -2115,8 +2169,8 @@ class InitializeClassVisitor : public CompilationVisitor {
void TryInitializeClass(Handle<mirror::Class> klass, Handle<mirror::ClassLoader>& class_loader)
REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile& dex_file = klass->GetDexFile();
- const DexFile::ClassDef* class_def = klass->GetClassDef();
- const DexFile::TypeId& class_type_id = dex_file.GetTypeId(class_def->class_idx_);
+ const dex::ClassDef* class_def = klass->GetClassDef();
+ const dex::TypeId& class_type_id = dex_file.GetTypeId(class_def->class_idx_);
const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_);
ScopedObjectAccessUnchecked soa(Thread::Current());
StackHandleScope<3> hs(soa.Self());
@@ -2157,10 +2211,9 @@ class InitializeClassVisitor : public CompilationVisitor {
// Otherwise it's in app image but superclasses can't be initialized, no need to proceed.
old_status = klass->GetStatus();
- bool too_many_encoded_fields = false;
- if (!is_boot_image && klass->NumStaticFields() > kMaxEncodedFields) {
- too_many_encoded_fields = true;
- }
+ bool too_many_encoded_fields = !is_boot_image &&
+ klass->NumStaticFields() > kMaxEncodedFields;
+
// If the class was not initialized, we can proceed to see if we can initialize static
// fields. Limit the max number of encoded fields.
if (!klass->IsInitialized() &&
@@ -2210,9 +2263,13 @@ class InitializeClassVisitor : public CompilationVisitor {
if (success) {
runtime->ExitTransactionMode();
DCHECK(!runtime->IsActiveTransaction());
- }
- if (!success) {
+ if (is_boot_image) {
+ // For boot image, we want to put the updated status in the oat class since we
+ // can't reject the image anyways.
+ old_status = klass->GetStatus();
+ }
+ } else {
CHECK(soa.Self()->IsExceptionPending());
mirror::Throwable* exception = soa.Self()->GetException();
VLOG(compiler) << "Initialization of " << descriptor << " aborted because of "
@@ -2226,10 +2283,6 @@ class InitializeClassVisitor : public CompilationVisitor {
soa.Self()->ClearException();
runtime->RollbackAllTransactions();
CHECK_EQ(old_status, klass->GetStatus()) << "Previous class status not restored";
- } else if (is_boot_image) {
- // For boot image, we want to put the updated status in the oat class since we can't
- // reject the image anyways.
- old_status = klass->GetStatus();
}
}
@@ -2272,7 +2325,7 @@ class InitializeClassVisitor : public CompilationVisitor {
StackHandleScope<1> hs(Thread::Current());
Handle<mirror::DexCache> dex_cache = hs.NewHandle(klass->GetDexCache());
- const DexFile::ClassDef* class_def = klass->GetClassDef();
+ const dex::ClassDef* class_def = klass->GetClassDef();
ClassLinker* class_linker = manager_->GetClassLinker();
// Check encoded final field values for strings and intern.
@@ -2314,7 +2367,7 @@ class InitializeClassVisitor : public CompilationVisitor {
self->ClearException();
return false;
}
- const DexFile::TypeList* types = m->GetParameterTypeList();
+ const dex::TypeList* types = m->GetParameterTypeList();
if (types != nullptr) {
for (uint32_t i = 0; i < types->Size(); ++i) {
dex::TypeIndex param_type_idx = types->GetTypeItem(i).type_idx_;
@@ -2532,7 +2585,7 @@ void CompilerDriver::InitializeClasses(jobject class_loader,
// SetVerificationAttempted so that the access flags are set. If we do not do this they get
// changed at runtime resulting in more dirty image pages.
// Also create conflict tables.
- // Only useful if we are compiling an image (image_classes_ is not null).
+ // Only useful if we are compiling an image.
ScopedObjectAccess soa(Thread::Current());
VariableSizedHandleScope hs(soa.Self());
InitializeArrayClassesAndCreateConflictTablesVisitor visitor(hs);
@@ -2541,7 +2594,7 @@ void CompilerDriver::InitializeClasses(jobject class_loader,
}
if (GetCompilerOptions().IsBootImage()) {
// Prune garbage objects created during aborted transactions.
- Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references */ true);
+ Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ true);
}
}
@@ -2564,15 +2617,16 @@ static void CompileDexFile(CompilerDriver* driver,
thread_pool);
auto compile = [&context, &compile_fn](size_t class_def_index) {
- ScopedTrace trace(__FUNCTION__);
const DexFile& dex_file = *context.GetDexFile();
+ SCOPED_TRACE << "compile " << dex_file.GetLocation() << "@" << class_def_index;
ClassLinker* class_linker = context.GetClassLinker();
jobject jclass_loader = context.GetClassLoader();
ClassReference ref(&dex_file, class_def_index);
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
+ const dex::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
ClassAccessor accessor(dex_file, class_def_index);
+ CompilerDriver* const driver = context.GetCompiler();
// Skip compiling classes with generic verifier failures since they will still fail at runtime
- if (context.GetCompiler()->GetVerificationResults()->IsClassRejected(ref)) {
+ if (driver->GetCompilerOptions().GetVerificationResults()->IsClassRejected(ref)) {
return;
}
// Use a scoped object access to perform to the quick SkipClass check.
@@ -2604,15 +2658,10 @@ static void CompileDexFile(CompilerDriver* driver,
// Go to native so that we don't block GC during compilation.
ScopedThreadSuspension sts(soa.Self(), kNative);
- CompilerDriver* const driver = context.GetCompiler();
-
// Can we run DEX-to-DEX compiler on this class ?
optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level =
GetDexToDexCompilationLevel(soa.Self(), *driver, jclass_loader, dex_file, class_def);
-
- const bool compilation_enabled = driver->IsClassToCompile(accessor.GetDescriptor());
-
// Compile direct and virtual methods.
int64_t previous_method_idx = -1;
for (const ClassAccessor::Method& method : accessor.GetMethods()) {
@@ -2633,7 +2682,6 @@ static void CompileDexFile(CompilerDriver* driver,
class_loader,
dex_file,
dex_to_dex_compilation_level,
- compilation_enabled,
dex_cache);
}
};
@@ -2644,10 +2692,12 @@ void CompilerDriver::Compile(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
TimingLogger* timings) {
if (kDebugProfileGuidedCompilation) {
+ const ProfileCompilationInfo* profile_compilation_info =
+ GetCompilerOptions().GetProfileCompilationInfo();
LOG(INFO) << "[ProfileGuidedCompilation] " <<
- ((profile_compilation_info_ == nullptr)
+ ((profile_compilation_info == nullptr)
? "null"
- : profile_compilation_info_->DumpInfo(&dex_files));
+ : profile_compilation_info->DumpInfo(dex_files));
}
dex_to_dex_compiler_.ClearState();
@@ -2779,56 +2829,6 @@ CompiledMethod* CompilerDriver::GetCompiledMethod(MethodReference ref) const {
return compiled_method;
}
-bool CompilerDriver::IsMethodVerifiedWithoutFailures(uint32_t method_idx,
- uint16_t class_def_idx,
- const DexFile& dex_file) const {
- const VerifiedMethod* verified_method = GetVerifiedMethod(&dex_file, method_idx);
- if (verified_method != nullptr) {
- return !verified_method->HasVerificationFailures();
- }
-
- // If we can't find verification metadata, check if this is a system class (we trust that system
- // classes have their methods verified). If it's not, be conservative and assume the method
- // has not been verified successfully.
-
- // TODO: When compiling the boot image it should be safe to assume that everything is verified,
- // even if methods are not found in the verification cache.
- const char* descriptor = dex_file.GetClassDescriptor(dex_file.GetClassDef(class_def_idx));
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- Thread* self = Thread::Current();
- ScopedObjectAccess soa(self);
- bool is_system_class = class_linker->FindSystemClass(self, descriptor) != nullptr;
- if (!is_system_class) {
- self->ClearException();
- }
- return is_system_class;
-}
-
-void CompilerDriver::SetRequiresConstructorBarrier(Thread* self,
- const DexFile* dex_file,
- uint16_t class_def_index,
- bool requires) {
- WriterMutexLock mu(self, requires_constructor_barrier_lock_);
- requires_constructor_barrier_.emplace(ClassReference(dex_file, class_def_index), requires);
-}
-
-bool CompilerDriver::RequiresConstructorBarrier(Thread* self,
- const DexFile* dex_file,
- uint16_t class_def_index) {
- ClassReference class_ref(dex_file, class_def_index);
- {
- ReaderMutexLock mu(self, requires_constructor_barrier_lock_);
- auto it = requires_constructor_barrier_.find(class_ref);
- if (it != requires_constructor_barrier_.end()) {
- return it->second;
- }
- }
- WriterMutexLock mu(self, requires_constructor_barrier_lock_);
- const bool requires = RequiresConstructorBarrier(*dex_file, class_def_index);
- requires_constructor_barrier_.emplace(class_ref, requires);
- return requires;
-}
-
std::string CompilerDriver::GetMemoryUsageString(bool extended) const {
std::ostringstream oss;
const gc::Heap* const heap = Runtime::Current()->GetHeap();
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 343f67c6d5..6f8ec125bc 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -36,7 +36,6 @@
#include "class_status.h"
#include "compiler.h"
#include "dex/class_reference.h"
-#include "dex/dex_file.h"
#include "dex/dex_file_types.h"
#include "dex/dex_to_dex_compiler.h"
#include "dex/method_reference.h"
@@ -47,6 +46,10 @@
namespace art {
+namespace dex {
+struct CodeItem;
+} // namespace dex
+
namespace mirror {
class Class;
class DexCache;
@@ -62,6 +65,7 @@ class BitVector;
class CompiledMethod;
class CompilerOptions;
class DexCompilationUnit;
+class DexFile;
template<class T> class Handle;
struct InlineIGetIPutData;
class InstructionSetFeatures;
@@ -76,7 +80,6 @@ template <class Allocator> class SrcMap;
class TimingLogger;
class VdexFile;
class VerificationResults;
-class VerifiedMethod;
enum EntryPointCallingConvention {
// ABI of invocations to a method's interpreter entry point.
@@ -95,18 +98,26 @@ class CompilerDriver {
// can assume will be in the image, with null implying all available
// classes.
CompilerDriver(const CompilerOptions* compiler_options,
- VerificationResults* verification_results,
Compiler::Kind compiler_kind,
- HashSet<std::string>* image_classes,
size_t thread_count,
- int swap_fd,
- const ProfileCompilationInfo* profile_compilation_info);
+ int swap_fd);
~CompilerDriver();
// Set dex files classpath.
void SetClasspathDexFiles(const std::vector<const DexFile*>& dex_files);
+ // Initialize and destroy thread pools. This is exposed because we do not want
+ // to do this twice, for PreCompile() and CompileAll().
+ void InitializeThreadPools();
+ void FreeThreadPools();
+
+ void PreCompile(jobject class_loader,
+ const std::vector<const DexFile*>& dex_files,
+ TimingLogger* timings,
+ /*inout*/ HashSet<std::string>* image_classes,
+ /*out*/ VerificationResults* verification_results)
+ REQUIRES(!Locks::mutator_lock_);
void CompileAll(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
TimingLogger* timings)
@@ -120,13 +131,11 @@ class CompilerDriver {
uint32_t method_idx,
uint32_t access_flags,
InvokeType invoke_type,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> h_class_loader)
REQUIRES(!Locks::mutator_lock_);
- VerificationResults* GetVerificationResults() const;
-
const CompilerOptions& GetCompilerOptions() const {
return *compiler_options_;
}
@@ -146,55 +155,10 @@ class CompilerDriver {
bool GetCompiledClass(const ClassReference& ref, ClassStatus* status) const;
CompiledMethod* GetCompiledMethod(MethodReference ref) const;
- size_t GetNonRelativeLinkerPatchCount() const;
// Add a compiled method.
void AddCompiledMethod(const MethodReference& method_ref, CompiledMethod* const compiled_method);
CompiledMethod* RemoveCompiledMethod(const MethodReference& method_ref);
- void SetRequiresConstructorBarrier(Thread* self,
- const DexFile* dex_file,
- uint16_t class_def_index,
- bool requires)
- REQUIRES(!requires_constructor_barrier_lock_);
-
- // Do the <init> methods for this class require a constructor barrier (prior to the return)?
- // The answer is "yes", if and only if this class has any instance final fields.
- // (This must not be called for any non-<init> methods; the answer would be "no").
- //
- // ---
- //
- // JLS 17.5.1 "Semantics of final fields" mandates that all final fields are frozen at the end
- // of the invoked constructor. The constructor barrier is a conservative implementation means of
- // enforcing the freezes happen-before the object being constructed is observable by another
- // thread.
- //
- // Note: This question only makes sense for instance constructors;
- // static constructors (despite possibly having finals) never need
- // a barrier.
- //
- // JLS 12.4.2 "Detailed Initialization Procedure" approximately describes
- // class initialization as:
- //
- // lock(class.lock)
- // class.state = initializing
- // unlock(class.lock)
- //
- // invoke <clinit>
- //
- // lock(class.lock)
- // class.state = initialized
- // unlock(class.lock) <-- acts as a release
- //
- // The last operation in the above example acts as an atomic release
- // for any stores in <clinit>, which ends up being stricter
- // than what a constructor barrier needs.
- //
- // See also QuasiAtomic::ThreadFenceForConstructor().
- bool RequiresConstructorBarrier(Thread* self,
- const DexFile* dex_file,
- uint16_t class_def_index)
- REQUIRES(!requires_constructor_barrier_lock_);
-
// Resolve compiling method's class. Returns null on failure.
ObjPtr<mirror::Class> ResolveCompilingMethodsClass(const ScopedObjectAccess& soa,
Handle<mirror::DexCache> dex_cache,
@@ -225,16 +189,6 @@ class CompilerDriver {
uint16_t field_idx)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Resolve a method. Returns null on failure, including incompatible class change.
- ArtMethod* ResolveMethod(
- ScopedObjectAccess& soa,
- Handle<mirror::DexCache> dex_cache,
- Handle<mirror::ClassLoader> class_loader,
- const DexCompilationUnit* mUnit,
- uint32_t method_idx,
- InvokeType invoke_type)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
void ProcessedInstanceField(bool resolved);
void ProcessedStaticField(bool resolved, bool local);
@@ -250,17 +204,8 @@ class CompilerDriver {
REQUIRES_SHARED(Locks::mutator_lock_);
- const VerifiedMethod* GetVerifiedMethod(const DexFile* dex_file, uint32_t method_idx) const;
bool IsSafeCast(const DexCompilationUnit* mUnit, uint32_t dex_pc);
- void SetCompilerContext(void* compiler_context) {
- compiler_context_ = compiler_context;
- }
-
- void* GetCompilerContext() const {
- return compiler_context_;
- }
-
size_t GetThreadCount() const {
return parallel_thread_count_;
}
@@ -273,9 +218,6 @@ class CompilerDriver {
return compiled_method_storage_.DedupeEnabled();
}
- // Checks whether the provided class should be compiled, i.e., is in classes_to_compile_.
- bool IsClassToCompile(const char* descriptor) const;
-
// Checks whether profile guided compilation is enabled and if the method should be compiled
// according to the profile file.
bool ShouldCompileBasedOnProfile(const MethodReference& method_ref) const;
@@ -286,12 +228,6 @@ class CompilerDriver {
void RecordClassStatus(const ClassReference& ref, ClassStatus status);
- // Checks if the specified method has been verified without failures. Returns
- // false if the method is not in the verification results (GetVerificationResults).
- bool IsMethodVerifiedWithoutFailures(uint32_t method_idx,
- uint16_t class_def_idx,
- const DexFile& dex_file) const;
-
// Get memory usage during compilation.
std::string GetMemoryUsageString(bool extended) const;
@@ -310,10 +246,6 @@ class CompilerDriver {
return &compiled_method_storage_;
}
- const ProfileCompilationInfo* GetProfileCompilationInfo() const {
- return profile_compilation_info_;
- }
-
// Is `boot_image_filename` the name of a core image (small boot
// image used for ART testing only)?
static bool IsCoreImageFilename(const std::string& boot_image_filename) {
@@ -336,13 +268,9 @@ class CompilerDriver {
}
private:
- void PreCompile(jobject class_loader,
- const std::vector<const DexFile*>& dex_files,
- TimingLogger* timings)
+ void LoadImageClasses(TimingLogger* timings, /*inout*/ HashSet<std::string>* image_classes)
REQUIRES(!Locks::mutator_lock_);
- void LoadImageClasses(TimingLogger* timings) REQUIRES(!Locks::mutator_lock_);
-
// Attempt to resolve all type, methods, fields, and strings
// referenced from code in the dex file following PathClassLoader
// ordering semantics.
@@ -362,11 +290,13 @@ class CompilerDriver {
// verification was successful.
bool FastVerify(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
- TimingLogger* timings);
+ TimingLogger* timings,
+ /*out*/ VerificationResults* verification_results);
void Verify(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
- TimingLogger* timings);
+ TimingLogger* timings,
+ /*out*/ VerificationResults* verification_results);
void VerifyDexFile(jobject class_loader,
const DexFile& dex_file,
@@ -397,30 +327,26 @@ class CompilerDriver {
TimingLogger* timings)
REQUIRES(!Locks::mutator_lock_);
- void UpdateImageClasses(TimingLogger* timings) REQUIRES(!Locks::mutator_lock_);
+ void UpdateImageClasses(TimingLogger* timings, /*inout*/ HashSet<std::string>* image_classes)
+ REQUIRES(!Locks::mutator_lock_);
void Compile(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
TimingLogger* timings);
- void InitializeThreadPools();
- void FreeThreadPools();
void CheckThreadPools();
- bool RequiresConstructorBarrier(const DexFile& dex_file, uint16_t class_def_idx) const;
+ // Resolve const string literals that are loaded from dex code. If only_startup_strings is
+ // specified, only methods that are marked startup in the profile are resolved.
+ void ResolveConstStrings(const std::vector<const DexFile*>& dex_files,
+ bool only_startup_strings,
+ /*inout*/ TimingLogger* timings);
const CompilerOptions* const compiler_options_;
- VerificationResults* const verification_results_;
std::unique_ptr<Compiler> compiler_;
Compiler::Kind compiler_kind_;
- // All class references that require constructor barriers. If the class reference is not in the
- // set then the result has not yet been computed.
- mutable ReaderWriterMutex requires_constructor_barrier_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- std::map<ClassReference, bool> requires_constructor_barrier_
- GUARDED_BY(requires_constructor_barrier_lock_);
-
// All class references that this compiler has compiled. Indexed by class defs.
using ClassStateTable = AtomicDexRefMap<ClassReference, ClassStatus>;
ClassStateTable compiled_classes_;
@@ -429,21 +355,9 @@ class CompilerDriver {
typedef AtomicDexRefMap<MethodReference, CompiledMethod*> MethodTable;
- private:
// All method references that this compiler has compiled.
MethodTable compiled_methods_;
- // Image classes to be updated by PreCompile().
- // TODO: Remove this member which is a non-const pointer to the CompilerOptions' data.
- // Pass this explicitly to the PreCompile() which should be called directly from
- // Dex2Oat rather than implicitly by CompileAll().
- HashSet<std::string>* image_classes_;
-
- // Specifies the classes that will be compiled. Note that if classes_to_compile_ is null,
- // all classes are eligible for compilation (duplication filters etc. will still apply).
- // This option may be restricted to the boot image, depending on a flag in the implementation.
- std::unique_ptr<HashSet<std::string>> classes_to_compile_;
-
std::atomic<uint32_t> number_of_soft_verifier_failures_;
bool had_hard_verifier_failure_;
@@ -458,16 +372,8 @@ class CompilerDriver {
class AOTCompilationStats;
std::unique_ptr<AOTCompilationStats> stats_;
- typedef void (*CompilerCallbackFn)(CompilerDriver& driver);
- typedef MutexLock* (*CompilerMutexLockFn)(CompilerDriver& driver);
-
- void* compiler_context_;
-
CompiledMethodStorage compiled_method_storage_;
- // Info for profile guided compilation.
- const ProfileCompilationInfo* const profile_compilation_info_;
-
size_t max_arena_alloc_;
// Compiler for dex to dex (quickening).
@@ -476,6 +382,7 @@ class CompilerDriver {
friend class CommonCompilerTest;
friend class CompileClassVisitor;
friend class DexToDexDecompilerTest;
+ friend class InitializeClassVisitor;
friend class verifier::VerifierDepsTest;
DISALLOW_COPY_AND_ASSIGN(CompilerDriver);
};
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index fe1568da83..e73d0724c9 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -42,20 +42,18 @@ namespace art {
class CompilerDriverTest : public CommonCompilerTest {
protected:
- void CompileAll(jobject class_loader) REQUIRES(!Locks::mutator_lock_) {
- TimingLogger timings("CompilerDriverTest::CompileAll", false, false);
- TimingLogger::ScopedTiming t(__FUNCTION__, &timings);
+ void CompileAllAndMakeExecutable(jobject class_loader) REQUIRES(!Locks::mutator_lock_) {
+ TimingLogger timings("CompilerDriverTest::CompileAllAndMakeExecutable", false, false);
dex_files_ = GetDexFiles(class_loader);
- SetDexFilesForOatFile(dex_files_);
- compiler_driver_->CompileAll(class_loader, dex_files_, &timings);
- t.NewTiming("MakeAllExecutable");
+ CompileAll(class_loader, dex_files_, &timings);
+ TimingLogger::ScopedTiming t("MakeAllExecutable", &timings);
MakeAllExecutable(class_loader);
}
void EnsureCompiled(jobject class_loader, const char* class_name, const char* method,
const char* signature, bool is_virtual)
REQUIRES(!Locks::mutator_lock_) {
- CompileAll(class_loader);
+ CompileAllAndMakeExecutable(class_loader);
Thread::Current()->TransitionFromSuspendedToRunnable();
bool started = runtime_->Start();
CHECK(started);
@@ -82,7 +80,7 @@ class CompilerDriverTest : public CommonCompilerTest {
void MakeDexFileExecutable(jobject class_loader, const DexFile& dex_file) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
for (size_t i = 0; i < dex_file.NumClassDefs(); i++) {
- const DexFile::ClassDef& class_def = dex_file.GetClassDef(i);
+ const dex::ClassDef& class_def = dex_file.GetClassDef(i);
const char* descriptor = dex_file.GetClassDescriptor(class_def);
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<1> hs(soa.Self());
@@ -106,7 +104,7 @@ class CompilerDriverTest : public CommonCompilerTest {
// Disabled due to 10 second runtime on host
// TODO: Update the test for hash-based dex cache arrays. Bug: 30627598
TEST_F(CompilerDriverTest, DISABLED_LARGE_CompileDexLibCore) {
- CompileAll(nullptr);
+ CompileAllAndMakeExecutable(nullptr);
// All libcore references should resolve
ScopedObjectAccess soa(Thread::Current());
@@ -266,7 +264,7 @@ TEST_F(CompilerDriverProfileTest, ProfileGuidedCompilation) {
ASSERT_TRUE(dex_file->EnableWrite());
}
- CompileAll(class_loader);
+ CompileAllAndMakeExecutable(class_loader);
std::unordered_set<std::string> m = GetExpectedMethodsForClass("Main");
std::unordered_set<std::string> s = GetExpectedMethodsForClass("Second");
@@ -310,7 +308,7 @@ TEST_F(CompilerDriverVerifyTest, VerifyCompilation) {
}
ASSERT_NE(class_loader, nullptr);
- CompileAll(class_loader);
+ CompileAllAndMakeExecutable(class_loader);
CheckVerifiedClass(class_loader, "LMain;");
CheckVerifiedClass(class_loader, "LSecond;");
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 8cc6cf10f0..8d1ae3d524 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -24,9 +24,14 @@
#include "arch/instruction_set_features.h"
#include "base/runtime_debug.h"
#include "base/variant_map.h"
+#include "class_linker.h"
#include "cmdline_parser.h"
#include "compiler_options_map-inl.h"
+#include "dex/dex_file-inl.h"
+#include "dex/verification_results.h"
+#include "dex/verified_method.h"
#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
#include "simple_compiler_options_map.h"
namespace art {
@@ -44,9 +49,10 @@ CompilerOptions::CompilerOptions()
no_inline_from_(),
dex_files_for_oat_file_(),
image_classes_(),
- boot_image_(false),
- core_image_(false),
- app_image_(false),
+ verification_results_(nullptr),
+ image_type_(ImageType::kNone),
+ compiling_with_core_image_(false),
+ baseline_(false),
debuggable_(false),
generate_debug_info_(kDefaultGenerateDebugInfo),
generate_mini_debug_info_(kDefaultGenerateMiniDebugInfo),
@@ -59,6 +65,7 @@ CompilerOptions::CompilerOptions()
dump_pass_timings_(false),
dump_stats_(false),
top_k_profile_threshold_(kDefaultTopKProfileThreshold),
+ profile_compilation_info_(nullptr),
verbose_methods_(),
abort_on_hard_verifier_failure_(false),
abort_on_soft_verifier_failure_(false),
@@ -68,6 +75,9 @@ CompilerOptions::CompilerOptions()
force_determinism_(false),
deduplicate_code_(true),
count_hotness_in_compiled_code_(false),
+ resolve_startup_const_strings_(false),
+ check_profiled_methods_(ProfileMethodsCheck::kNone),
+ max_image_block_size_(std::numeric_limits<uint32_t>::max()),
register_allocation_strategy_(RegisterAllocator::kRegisterAllocatorDefault),
passes_to_run_(nullptr) {
}
@@ -137,4 +147,40 @@ bool CompilerOptions::IsImageClass(const char* descriptor) const {
return image_classes_.find(StringPiece(descriptor)) != image_classes_.end();
}
+const VerificationResults* CompilerOptions::GetVerificationResults() const {
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ return verification_results_;
+}
+
+const VerifiedMethod* CompilerOptions::GetVerifiedMethod(const DexFile* dex_file,
+ uint32_t method_idx) const {
+ MethodReference ref(dex_file, method_idx);
+ return verification_results_->GetVerifiedMethod(ref);
+}
+
+bool CompilerOptions::IsMethodVerifiedWithoutFailures(uint32_t method_idx,
+ uint16_t class_def_idx,
+ const DexFile& dex_file) const {
+ const VerifiedMethod* verified_method = GetVerifiedMethod(&dex_file, method_idx);
+ if (verified_method != nullptr) {
+ return !verified_method->HasVerificationFailures();
+ }
+
+ // If we can't find verification metadata, check if this is a system class (we trust that system
+ // classes have their methods verified). If it's not, be conservative and assume the method
+ // has not been verified successfully.
+
+ // TODO: When compiling the boot image it should be safe to assume that everything is verified,
+ // even if methods are not found in the verification cache.
+ const char* descriptor = dex_file.GetClassDescriptor(dex_file.GetClassDef(class_def_idx));
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ bool is_system_class = class_linker->FindSystemClass(self, descriptor) != nullptr;
+ if (!is_system_class) {
+ self->ClearException();
+ }
+ return is_system_class;
+}
+
} // namespace art
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 34aceba1c4..bd12bf7dda 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -39,9 +39,23 @@ namespace verifier {
class VerifierDepsTest;
} // namespace verifier
+namespace linker {
+class Arm64RelativePatcherTest;
+} // namespace linker
+
class DexFile;
enum class InstructionSet;
class InstructionSetFeatures;
+class ProfileCompilationInfo;
+class VerificationResults;
+class VerifiedMethod;
+
+// Enum for CheckProfileMethodsCompiled. Outside CompilerOptions so it can be forward-declared.
+enum class ProfileMethodsCheck : uint8_t {
+ kNone,
+ kLog,
+ kAbort,
+};
class CompilerOptions final {
public:
@@ -57,6 +71,12 @@ class CompilerOptions final {
static const size_t kDefaultInlineMaxCodeUnits = 32;
static constexpr size_t kUnsetInlineMaxCodeUnits = -1;
+ enum class ImageType : uint8_t {
+ kNone, // JIT or AOT app compilation producing only an oat file but no image.
+ kBootImage, // Creating boot image.
+ kAppImage, // Creating app image.
+ };
+
CompilerOptions();
~CompilerOptions();
@@ -190,23 +210,23 @@ class CompilerOptions final {
// Are we compiling a boot image?
bool IsBootImage() const {
- return boot_image_;
+ return image_type_ == ImageType::kBootImage;
}
- // Are we compiling a core image (small boot image only used for ART testing)?
- bool IsCoreImage() const {
- // Ensure that `core_image_` => `boot_image_`.
- DCHECK(!core_image_ || boot_image_);
- return core_image_;
+ bool IsBaseline() const {
+ return baseline_;
}
// Are we compiling an app image?
bool IsAppImage() const {
- return app_image_;
+ return image_type_ == ImageType::kAppImage;
}
- void DisableAppImage() {
- app_image_ = false;
+ // Returns whether we are compiling against a "core" image, which
+ // is an indicative we are running tests. The compiler will use that
+ // information for checking invariants.
+ bool CompilingWithCoreImage() const {
+ return compiling_with_core_image_;
}
// Should the code be compiled as position independent?
@@ -214,6 +234,10 @@ class CompilerOptions final {
return compile_pic_;
}
+ const ProfileCompilationInfo* GetProfileCompilationInfo() const {
+ return profile_compilation_info_;
+ }
+
bool HasVerboseMethods() const {
return !verbose_methods_.empty();
}
@@ -261,6 +285,16 @@ class CompilerOptions final {
bool IsImageClass(const char* descriptor) const;
+ const VerificationResults* GetVerificationResults() const;
+
+ const VerifiedMethod* GetVerifiedMethod(const DexFile* dex_file, uint32_t method_idx) const;
+
+ // Checks if the specified method has been verified without failures. Returns
+ // false if the method is not in the verification results (GetVerificationResults).
+ bool IsMethodVerifiedWithoutFailures(uint32_t method_idx,
+ uint16_t class_def_idx,
+ const DexFile& dex_file) const;
+
bool ParseCompilerOptions(const std::vector<std::string>& options,
bool ignore_unrecognized,
std::string* error_msg);
@@ -309,6 +343,22 @@ class CompilerOptions final {
return count_hotness_in_compiled_code_;
}
+ bool ResolveStartupConstStrings() const {
+ return resolve_startup_const_strings_;
+ }
+
+ ProfileMethodsCheck CheckProfiledMethodsCompiled() const {
+ return check_profiled_methods_;
+ }
+
+ uint32_t MaxImageBlockSize() const {
+ return max_image_block_size_;
+ }
+
+ void SetMaxImageBlockSize(uint32_t size) {
+ max_image_block_size_ = size;
+ }
+
private:
bool ParseDumpInitFailures(const std::string& option, std::string* error_msg);
void ParseDumpCfgPasses(const StringPiece& option, UsageFn Usage);
@@ -343,9 +393,12 @@ class CompilerOptions final {
// Must not be empty for real boot image, only for tests pretending to compile boot image.
HashSet<std::string> image_classes_;
- bool boot_image_;
- bool core_image_;
- bool app_image_;
+ // Results of AOT verification.
+ const VerificationResults* verification_results_;
+
+ ImageType image_type_;
+ bool compiling_with_core_image_;
+ bool baseline_;
bool debuggable_;
bool generate_debug_info_;
bool generate_mini_debug_info_;
@@ -361,6 +414,9 @@ class CompilerOptions final {
// When using a profile file only the top K% of the profiled samples will be compiled.
double top_k_profile_threshold_;
+ // Info for profile guided compilation.
+ const ProfileCompilationInfo* profile_compilation_info_;
+
// Vector of methods to have verbose output enabled for.
std::vector<std::string> verbose_methods_;
@@ -387,6 +443,17 @@ class CompilerOptions final {
// won't be atomic for performance reasons, so we accept races, just like in interpreter.
bool count_hotness_in_compiled_code_;
+ // Whether we eagerly resolve all of the const strings that are loaded from startup methods in the
+ // profile.
+ bool resolve_startup_const_strings_;
+
+ // When running profile-guided compilation, check that methods intended to be compiled end
+ // up compiled and are not punted.
+ ProfileMethodsCheck check_profiled_methods_;
+
+ // Maximum solid block size in the generated image.
+ uint32_t max_image_block_size_;
+
RegisterAllocator::Strategy register_allocation_strategy_;
// If not null, specifies optimization passes which will be run instead of defaults.
@@ -402,6 +469,7 @@ class CompilerOptions final {
friend class CommonCompilerTest;
friend class jit::JitCompiler;
friend class verifier::VerifierDepsTest;
+ friend class linker::Arm64RelativePatcherTest;
template <class Base>
friend bool ReadCompilerOptions(Base& map, CompilerOptions* options, std::string* error_msg);
diff --git a/compiler/driver/compiler_options_map-inl.h b/compiler/driver/compiler_options_map-inl.h
index 32fc887b8e..7e2a64b52b 100644
--- a/compiler/driver/compiler_options_map-inl.h
+++ b/compiler/driver/compiler_options_map-inl.h
@@ -43,9 +43,6 @@ inline bool ReadCompilerOptions(Base& map, CompilerOptions* options, std::string
}
options->SetCompilerFilter(compiler_filter);
}
- if (map.Exists(Base::PIC)) {
- options->compile_pic_ = true;
- }
map.AssignIfExists(Base::HugeMethodMaxThreshold, &options->huge_method_threshold_);
map.AssignIfExists(Base::LargeMethodMaxThreshold, &options->large_method_threshold_);
map.AssignIfExists(Base::SmallMethodMaxThreshold, &options->small_method_threshold_);
@@ -58,6 +55,9 @@ inline bool ReadCompilerOptions(Base& map, CompilerOptions* options, std::string
if (map.Exists(Base::Debuggable)) {
options->debuggable_ = true;
}
+ if (map.Exists(Base::Baseline)) {
+ options->baseline_ = true;
+ }
map.AssignIfExists(Base::TopKProfileThreshold, &options->top_k_profile_threshold_);
map.AssignIfExists(Base::AbortOnHardVerifierFailure, &options->abort_on_hard_verifier_failure_);
map.AssignIfExists(Base::AbortOnSoftVerifierFailure, &options->abort_on_soft_verifier_failure_);
@@ -80,6 +80,11 @@ inline bool ReadCompilerOptions(Base& map, CompilerOptions* options, std::string
if (map.Exists(Base::CountHotnessInCompiledCode)) {
options->count_hotness_in_compiled_code_ = true;
}
+ map.AssignIfExists(Base::ResolveStartupConstStrings, &options->resolve_startup_const_strings_);
+ if (map.Exists(Base::CheckProfiledMethods)) {
+ options->check_profiled_methods_ = *map.Get(Base::CheckProfiledMethods);
+ }
+ map.AssignIfExists(Base::MaxImageBlockSize, &options->max_image_block_size_);
if (map.Exists(Base::DumpTimings)) {
options->dump_timings_ = true;
@@ -106,9 +111,6 @@ inline void AddCompilerOptionsArgumentParserOptions(Builder& b) {
.template WithType<std::string>()
.IntoKey(Map::CompilerFilter)
- .Define("--compile-pic")
- .IntoKey(Map::PIC)
-
.Define("--huge-method-max=_")
.template WithType<unsigned int>()
.IntoKey(Map::HugeMethodMaxThreshold)
@@ -147,6 +149,12 @@ inline void AddCompilerOptionsArgumentParserOptions(Builder& b) {
.Define({"--count-hotness-in-compiled-code"})
.IntoKey(Map::CountHotnessInCompiledCode)
+ .Define({"--check-profiled-methods=_"})
+ .template WithType<ProfileMethodsCheck>()
+ .WithValueMap({{"log", ProfileMethodsCheck::kLog},
+ {"abort", ProfileMethodsCheck::kAbort}})
+ .IntoKey(Map::CheckProfiledMethods)
+
.Define({"--dump-timings"})
.IntoKey(Map::DumpTimings)
@@ -159,6 +167,9 @@ inline void AddCompilerOptionsArgumentParserOptions(Builder& b) {
.Define("--debuggable")
.IntoKey(Map::Debuggable)
+ .Define("--baseline")
+ .IntoKey(Map::Baseline)
+
.Define("--top-k-profile-threshold=_")
.template WithType<double>().WithRange(0.0, 100.0)
.IntoKey(Map::TopKProfileThreshold)
@@ -184,9 +195,18 @@ inline void AddCompilerOptionsArgumentParserOptions(Builder& b) {
.template WithType<std::string>()
.IntoKey(Map::RegisterAllocationStrategy)
+ .Define("--resolve-startup-const-strings=_")
+ .template WithType<bool>()
+ .WithValueMap({{"false", false}, {"true", true}})
+ .IntoKey(Map::ResolveStartupConstStrings)
+
.Define("--verbose-methods=_")
.template WithType<ParseStringList<','>>()
- .IntoKey(Map::VerboseMethods);
+ .IntoKey(Map::VerboseMethods)
+
+ .Define("--max-image-block-size=_")
+ .template WithType<unsigned int>()
+ .IntoKey(Map::MaxImageBlockSize);
}
#pragma GCC diagnostic pop
diff --git a/compiler/driver/compiler_options_map.def b/compiler/driver/compiler_options_map.def
index 529d43fc72..0a9c873988 100644
--- a/compiler/driver/compiler_options_map.def
+++ b/compiler/driver/compiler_options_map.def
@@ -48,19 +48,23 @@ COMPILER_OPTIONS_KEY (bool, GenerateDebugInfo)
COMPILER_OPTIONS_KEY (bool, GenerateMiniDebugInfo)
COMPILER_OPTIONS_KEY (bool, GenerateBuildID)
COMPILER_OPTIONS_KEY (Unit, Debuggable)
+COMPILER_OPTIONS_KEY (Unit, Baseline)
COMPILER_OPTIONS_KEY (double, TopKProfileThreshold)
COMPILER_OPTIONS_KEY (bool, AbortOnHardVerifierFailure)
COMPILER_OPTIONS_KEY (bool, AbortOnSoftVerifierFailure)
+COMPILER_OPTIONS_KEY (bool, ResolveStartupConstStrings, false)
COMPILER_OPTIONS_KEY (std::string, DumpInitFailures)
COMPILER_OPTIONS_KEY (std::string, DumpCFG)
COMPILER_OPTIONS_KEY (Unit, DumpCFGAppend)
// TODO: Add type parser.
COMPILER_OPTIONS_KEY (std::string, RegisterAllocationStrategy)
COMPILER_OPTIONS_KEY (ParseStringList<','>, VerboseMethods)
-COMPILER_OPTIONS_KEY (bool, DeduplicateCode, true)
+COMPILER_OPTIONS_KEY (bool, DeduplicateCode, true)
COMPILER_OPTIONS_KEY (Unit, CountHotnessInCompiledCode)
+COMPILER_OPTIONS_KEY (ProfileMethodsCheck, CheckProfiledMethods)
COMPILER_OPTIONS_KEY (Unit, DumpTimings)
COMPILER_OPTIONS_KEY (Unit, DumpPassTimings)
COMPILER_OPTIONS_KEY (Unit, DumpStats)
+COMPILER_OPTIONS_KEY (unsigned int, MaxImageBlockSize)
#undef COMPILER_OPTIONS_KEY
diff --git a/compiler/driver/compiler_options_map.h b/compiler/driver/compiler_options_map.h
index b9bc8b6ea1..af212d66a1 100644
--- a/compiler/driver/compiler_options_map.h
+++ b/compiler/driver/compiler_options_map.h
@@ -25,6 +25,8 @@
namespace art {
+enum class ProfileMethodsCheck : uint8_t;
+
// Defines a type-safe heterogeneous key->value map. This is to be used as the base for
// an extended map.
template <typename Base, template <typename TV> class KeyType>
diff --git a/compiler/driver/dex_compilation_unit.cc b/compiler/driver/dex_compilation_unit.cc
index c90c37d54a..0d0f074917 100644
--- a/compiler/driver/dex_compilation_unit.cc
+++ b/compiler/driver/dex_compilation_unit.cc
@@ -16,22 +16,27 @@
#include "dex_compilation_unit.h"
+#include "art_field.h"
#include "base/utils.h"
+#include "dex/class_accessor-inl.h"
#include "dex/code_item_accessors-inl.h"
#include "dex/descriptors_names.h"
+#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
+#include "scoped_thread_state_change-inl.h"
namespace art {
DexCompilationUnit::DexCompilationUnit(Handle<mirror::ClassLoader> class_loader,
ClassLinker* class_linker,
const DexFile& dex_file,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
uint16_t class_def_idx,
uint32_t method_idx,
uint32_t access_flags,
const VerifiedMethod* verified_method,
- Handle<mirror::DexCache> dex_cache)
+ Handle<mirror::DexCache> dex_cache,
+ Handle<mirror::Class> compiling_class)
: class_loader_(class_loader),
class_linker_(class_linker),
dex_file_(&dex_file),
@@ -41,7 +46,8 @@ DexCompilationUnit::DexCompilationUnit(Handle<mirror::ClassLoader> class_loader,
access_flags_(access_flags),
verified_method_(verified_method),
dex_cache_(dex_cache),
- code_item_accessor_(dex_file, code_item) {}
+ code_item_accessor_(dex_file, code_item),
+ compiling_class_(compiling_class) {}
const std::string& DexCompilationUnit::GetSymbol() {
if (symbol_.empty()) {
@@ -51,4 +57,32 @@ const std::string& DexCompilationUnit::GetSymbol() {
return symbol_;
}
+bool DexCompilationUnit::RequiresConstructorBarrier() const {
+ // Constructor barriers are applicable only for <init> methods.
+ DCHECK(!IsStatic());
+ DCHECK(IsConstructor());
+
+ // We require a constructor barrier if there are final instance fields.
+ if (GetCompilingClass().GetReference() != nullptr && !GetCompilingClass().IsNull()) {
+ // Decoding class data can be slow, so iterate over fields of the compiling class if resolved.
+ ScopedObjectAccess soa(Thread::Current());
+ ObjPtr<mirror::Class> compiling_class = GetCompilingClass().Get();
+ for (size_t i = 0, size = compiling_class->NumInstanceFields(); i != size; ++i) {
+ ArtField* field = compiling_class->GetInstanceField(i);
+ if (field->IsFinal()) {
+ return true;
+ }
+ }
+ } else {
+ // Iterate over field definitions in the class data.
+ ClassAccessor accessor(*GetDexFile(), GetClassDefIndex());
+ for (const ClassAccessor::Field& field : accessor.GetInstanceFields()) {
+ if (field.IsFinal()) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
} // namespace art
diff --git a/compiler/driver/dex_compilation_unit.h b/compiler/driver/dex_compilation_unit.h
index c1ae3c938b..def90fa4e1 100644
--- a/compiler/driver/dex_compilation_unit.h
+++ b/compiler/driver/dex_compilation_unit.h
@@ -23,10 +23,10 @@
#include "dex/code_item_accessors.h"
#include "dex/dex_file.h"
#include "handle.h"
-#include "jni.h"
namespace art {
namespace mirror {
+class Class;
class ClassLoader;
class DexCache;
} // namespace mirror
@@ -38,12 +38,13 @@ class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> {
DexCompilationUnit(Handle<mirror::ClassLoader> class_loader,
ClassLinker* class_linker,
const DexFile& dex_file,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
uint16_t class_def_idx,
uint32_t method_idx,
uint32_t access_flags,
const VerifiedMethod* verified_method,
- Handle<mirror::DexCache> dex_cache);
+ Handle<mirror::DexCache> dex_cache,
+ Handle<mirror::Class> compiling_class = Handle<mirror::Class>());
Handle<mirror::ClassLoader> GetClassLoader() const {
return class_loader_;
@@ -65,17 +66,17 @@ class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> {
return dex_method_idx_;
}
- const DexFile::CodeItem* GetCodeItem() const {
+ const dex::CodeItem* GetCodeItem() const {
return code_item_;
}
const char* GetShorty() const {
- const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
+ const dex::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
return dex_file_->GetMethodShorty(method_id);
}
const char* GetShorty(uint32_t* shorty_len) const {
- const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
+ const dex::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
return dex_file_->GetMethodShorty(method_id, shorty_len);
}
@@ -117,6 +118,45 @@ class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> {
return code_item_accessor_;
}
+ Handle<mirror::Class> GetCompilingClass() const {
+ return compiling_class_;
+ }
+
+ // Does this <init> method require a constructor barrier (prior to the return)?
+ // The answer is "yes", if and only if the class has any instance final fields.
+ // (This must not be called for any non-<init> methods; the answer would be "no").
+ //
+ // ---
+ //
+ // JLS 17.5.1 "Semantics of final fields" mandates that all final fields are frozen at the end
+ // of the invoked constructor. The constructor barrier is a conservative implementation means of
+ // enforcing the freezes happen-before the object being constructed is observable by another
+ // thread.
+ //
+ // Note: This question only makes sense for instance constructors;
+ // static constructors (despite possibly having finals) never need
+ // a barrier.
+ //
+ // JLS 12.4.2 "Detailed Initialization Procedure" approximately describes
+ // class initialization as:
+ //
+ // lock(class.lock)
+ // class.state = initializing
+ // unlock(class.lock)
+ //
+ // invoke <clinit>
+ //
+ // lock(class.lock)
+ // class.state = initialized
+ // unlock(class.lock) <-- acts as a release
+ //
+ // The last operation in the above example acts as an atomic release
+ // for any stores in <clinit>, which ends up being stricter
+ // than what a constructor barrier needs.
+ //
+ // See also QuasiAtomic::ThreadFenceForConstructor().
+ bool RequiresConstructorBarrier() const;
+
private:
const Handle<mirror::ClassLoader> class_loader_;
@@ -124,7 +164,7 @@ class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> {
const DexFile* const dex_file_;
- const DexFile::CodeItem* const code_item_;
+ const dex::CodeItem* const code_item_;
const uint16_t class_def_idx_;
const uint32_t dex_method_idx_;
const uint32_t access_flags_;
@@ -134,6 +174,8 @@ class DexCompilationUnit : public DeletableArenaObject<kArenaAllocMisc> {
const CodeItemDataAccessor code_item_accessor_;
+ Handle<mirror::Class> compiling_class_;
+
std::string symbol_;
};
diff --git a/compiler/driver/simple_compiler_options_map.h b/compiler/driver/simple_compiler_options_map.h
index 3860da9f66..e7a51a4995 100644
--- a/compiler/driver/simple_compiler_options_map.h
+++ b/compiler/driver/simple_compiler_options_map.h
@@ -50,7 +50,7 @@ using Parser = CmdlineParser<SimpleParseArgumentMap, SimpleParseArgumentMapKey>;
static inline Parser CreateSimpleParser(bool ignore_unrecognized) {
std::unique_ptr<Parser::Builder> parser_builder =
- std::unique_ptr<Parser::Builder>(new Parser::Builder());
+ std::make_unique<Parser::Builder>();
AddCompilerOptionsArgumentParserOptions<SimpleParseArgumentMap>(*parser_builder);
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index fd17364c26..d5ceafeac9 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -50,7 +50,7 @@ class ExceptionTest : public CommonRuntimeTest {
// which always points to the first source statement.
static constexpr const uint32_t kDexPc = 0;
- virtual void SetUp() {
+ void SetUp() override {
CommonRuntimeTest::SetUp();
ScopedObjectAccess soa(Thread::Current());
@@ -135,8 +135,8 @@ TEST_F(ExceptionTest, FindCatchHandler) {
ASSERT_EQ(2u, accessor.TriesSize());
ASSERT_NE(0u, accessor.InsnsSizeInCodeUnits());
- const DexFile::TryItem& t0 = accessor.TryItems().begin()[0];
- const DexFile::TryItem& t1 = accessor.TryItems().begin()[1];
+ const dex::TryItem& t0 = accessor.TryItems().begin()[0];
+ const dex::TryItem& t1 = accessor.TryItems().begin()[1];
EXPECT_LE(t0.start_addr_, t1.start_addr_);
{
CatchHandlerIterator iter(accessor, 4 /* Dex PC in the first try block */);
@@ -187,14 +187,14 @@ TEST_F(ExceptionTest, StackTraceElement) {
}
fake_stack.push_back(method_g_->GetOatQuickMethodHeader(0)->ToNativeQuickPc(
- method_g_, kDexPc, /* is_catch_handler */ false)); // return pc
+ method_g_, kDexPc, /* is_for_catch_handler= */ false)); // return pc
// Create/push fake 16byte stack frame for method g
fake_stack.push_back(reinterpret_cast<uintptr_t>(method_g_));
fake_stack.push_back(0);
fake_stack.push_back(0);
fake_stack.push_back(method_g_->GetOatQuickMethodHeader(0)->ToNativeQuickPc(
- method_g_, kDexPc, /* is_catch_handler */ false)); // return pc
+ method_g_, kDexPc, /* is_for_catch_handler= */ false)); // return pc
// Create/push fake 16byte stack frame for method f
fake_stack.push_back(reinterpret_cast<uintptr_t>(method_f_));
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 3fc559e13b..4d7ae9bd1b 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -26,7 +26,6 @@
#include "base/systrace.h"
#include "base/time_utils.h"
#include "base/timing_logger.h"
-#include "base/unix_file/fd_file.h"
#include "debug/elf_debug_writer.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
@@ -34,11 +33,6 @@
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "jit/jit_logger.h"
-#include "oat_file-inl.h"
-#include "oat_quick_method_header.h"
-#include "object_lock.h"
-#include "optimizing/register_allocator.h"
-#include "thread_list.h"
namespace art {
namespace jit {
@@ -47,54 +41,16 @@ JitCompiler* JitCompiler::Create() {
return new JitCompiler();
}
-extern "C" void* jit_load(bool* generate_debug_info) {
- VLOG(jit) << "loading jit compiler";
- auto* const jit_compiler = JitCompiler::Create();
- CHECK(jit_compiler != nullptr);
- *generate_debug_info = jit_compiler->GetCompilerOptions().GetGenerateDebugInfo();
- VLOG(jit) << "Done loading jit compiler";
- return jit_compiler;
-}
-
-extern "C" void jit_unload(void* handle) {
- DCHECK(handle != nullptr);
- delete reinterpret_cast<JitCompiler*>(handle);
-}
-
-extern "C" bool jit_compile_method(
- void* handle, ArtMethod* method, Thread* self, bool osr)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
- DCHECK(jit_compiler != nullptr);
- return jit_compiler->CompileMethod(self, method, osr);
-}
-
-extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t count)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
- DCHECK(jit_compiler != nullptr);
- const CompilerOptions& compiler_options = jit_compiler->GetCompilerOptions();
- if (compiler_options.GetGenerateDebugInfo()) {
- const ArrayRef<mirror::Class*> types_array(types, count);
- std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForClasses(
- kRuntimeISA, compiler_options.GetInstructionSetFeatures(), types_array);
- MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
- // We never free debug info for types, so we don't need to provide a handle
- // (which would have been otherwise used as identifier to remove it later).
- AddNativeDebugInfoForJit(nullptr /* handle */, elf_file);
- }
-}
-
-JitCompiler::JitCompiler() {
- compiler_options_.reset(new CompilerOptions());
+void JitCompiler::ParseCompilerOptions() {
// Special case max code units for inlining, whose default is "unset" (implictly
// meaning no limit). Do this before parsing the actual passed options.
compiler_options_->SetInlineMaxCodeUnits(CompilerOptions::kDefaultInlineMaxCodeUnits);
+ Runtime* runtime = Runtime::Current();
{
std::string error_msg;
- if (!compiler_options_->ParseCompilerOptions(Runtime::Current()->GetCompilerOptions(),
- true /* ignore_unrecognized */,
- &error_msg)) {
+ if (!compiler_options_->ParseCompilerOptions(runtime->GetCompilerOptions(),
+ /*ignore_unrecognized=*/ true,
+ &error_msg)) {
LOG(FATAL) << error_msg;
UNREACHABLE();
}
@@ -102,8 +58,11 @@ JitCompiler::JitCompiler() {
// JIT is never PIC, no matter what the runtime compiler options specify.
compiler_options_->SetNonPic();
- // Set debuggability based on the runtime value.
- compiler_options_->SetDebuggable(Runtime::Current()->IsJavaDebuggable());
+ // If the options don't provide whether we generate debuggable code, set
+ // debuggability based on the runtime value.
+ if (!compiler_options_->GetDebuggable()) {
+ compiler_options_->SetDebuggable(runtime->IsJavaDebuggable());
+ }
const InstructionSet instruction_set = compiler_options_->GetInstructionSet();
if (kRuntimeISA == InstructionSet::kArm) {
@@ -112,7 +71,7 @@ JitCompiler::JitCompiler() {
DCHECK_EQ(instruction_set, kRuntimeISA);
}
std::unique_ptr<const InstructionSetFeatures> instruction_set_features;
- for (const StringPiece option : Runtime::Current()->GetCompilerOptions()) {
+ for (const StringPiece option : runtime->GetCompilerOptions()) {
VLOG(compiler) << "JIT compiler option " << option;
std::string error_msg;
if (option.starts_with("--instruction-set-variant=")) {
@@ -140,29 +99,86 @@ JitCompiler::JitCompiler() {
}
}
}
+
if (instruction_set_features == nullptr) {
+ // '--instruction-set-features/--instruction-set-variant' were not used.
+ // Use build-time defined features.
instruction_set_features = InstructionSetFeatures::FromCppDefines();
}
compiler_options_->instruction_set_features_ = std::move(instruction_set_features);
+ compiler_options_->compiling_with_core_image_ =
+ CompilerDriver::IsCoreImageFilename(runtime->GetImageLocation());
+
+ if (compiler_options_->GetGenerateDebugInfo()) {
+ jit_logger_.reset(new JitLogger());
+ jit_logger_->OpenLog();
+ }
+}
+
+extern "C" void* jit_load() {
+ VLOG(jit) << "Create jit compiler";
+ auto* const jit_compiler = JitCompiler::Create();
+ CHECK(jit_compiler != nullptr);
+ VLOG(jit) << "Done creating jit compiler";
+ return jit_compiler;
+}
+
+extern "C" void jit_unload(void* handle) {
+ DCHECK(handle != nullptr);
+ delete reinterpret_cast<JitCompiler*>(handle);
+}
+
+extern "C" bool jit_compile_method(
+ void* handle, ArtMethod* method, Thread* self, bool baseline, bool osr)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
+ DCHECK(jit_compiler != nullptr);
+ return jit_compiler->CompileMethod(self, method, baseline, osr);
+}
+
+extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t count)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
+ DCHECK(jit_compiler != nullptr);
+ const CompilerOptions& compiler_options = jit_compiler->GetCompilerOptions();
+ if (compiler_options.GetGenerateDebugInfo()) {
+ const ArrayRef<mirror::Class*> types_array(types, count);
+ std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForClasses(
+ kRuntimeISA, compiler_options.GetInstructionSetFeatures(), types_array);
+ // We never free debug info for types, so we don't need to provide a handle
+ // (which would have been otherwise used as identifier to remove it later).
+ AddNativeDebugInfoForJit(Thread::Current(),
+ /*code_ptr=*/ nullptr,
+ elf_file,
+ debug::PackElfFileForJIT,
+ compiler_options.GetInstructionSet(),
+ compiler_options.GetInstructionSetFeatures());
+ }
+}
+
+extern "C" void jit_update_options(void* handle) {
+ JitCompiler* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
+ DCHECK(jit_compiler != nullptr);
+ jit_compiler->ParseCompilerOptions();
+}
+
+extern "C" bool jit_generate_debug_info(void* handle) {
+ JitCompiler* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
+ DCHECK(jit_compiler != nullptr);
+ return jit_compiler->GetCompilerOptions().GetGenerateDebugInfo();
+}
+
+JitCompiler::JitCompiler() {
+ compiler_options_.reset(new CompilerOptions());
+ ParseCompilerOptions();
compiler_driver_.reset(new CompilerDriver(
compiler_options_.get(),
- /* verification_results */ nullptr,
Compiler::kOptimizing,
- /* image_classes */ nullptr,
- /* thread_count */ 1,
- /* swap_fd */ -1,
- /* profile_compilation_info */ nullptr));
+ /* thread_count= */ 1,
+ /* swap_fd= */ -1));
// Disable dedupe so we can remove compiled methods.
compiler_driver_->SetDedupeEnabled(false);
-
- size_t thread_count = compiler_driver_->GetThreadCount();
- if (compiler_options_->GetGenerateDebugInfo()) {
- DCHECK_EQ(thread_count, 1u)
- << "Generating debug info only works with one compiler thread";
- jit_logger_.reset(new JitLogger());
- jit_logger_->OpenLog();
- }
}
JitCompiler::~JitCompiler() {
@@ -171,7 +187,7 @@ JitCompiler::~JitCompiler() {
}
}
-bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool osr) {
+bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool baseline, bool osr) {
SCOPED_TRACE << "JIT compiling " << method->PrettyMethod();
DCHECK(!method->IsProxyMethod());
@@ -188,7 +204,7 @@ bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool osr) {
TimingLogger::ScopedTiming t2("Compiling", &logger);
JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache();
success = compiler_driver_->GetCompiler()->JitCompile(
- self, code_cache, method, osr, jit_logger_.get());
+ self, code_cache, method, baseline, osr, jit_logger_.get());
}
// Trim maps to reduce memory usage.
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index 5840fece2e..29d2761348 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -37,16 +37,19 @@ class JitCompiler {
virtual ~JitCompiler();
// Compilation entrypoint. Returns whether the compilation succeeded.
- bool CompileMethod(Thread* self, ArtMethod* method, bool osr)
+ bool CompileMethod(Thread* self, ArtMethod* method, bool baseline, bool osr)
REQUIRES_SHARED(Locks::mutator_lock_);
const CompilerOptions& GetCompilerOptions() const {
return *compiler_options_.get();
}
+
CompilerDriver* GetCompilerDriver() const {
return compiler_driver_.get();
}
+ void ParseCompilerOptions();
+
private:
std::unique_ptr<CompilerOptions> compiler_options_;
std::unique_ptr<CompilerDriver> compiler_driver_;
@@ -54,11 +57,6 @@ class JitCompiler {
JitCompiler();
- // This is in the compiler since the runtime doesn't have access to the compiled method
- // structures.
- bool AddToCodeCache(ArtMethod* method, const CompiledMethod* compiled_method)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
DISALLOW_COPY_AND_ASSIGN(JitCompiler);
};
diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc
index 920a3a8da6..b19a2b8843 100644
--- a/compiler/jni/jni_cfi_test.cc
+++ b/compiler/jni/jni_cfi_test.cc
@@ -86,7 +86,7 @@ class JNICFITest : public CFITest {
callee_save_regs, mr_conv->EntrySpills());
jni_asm->IncreaseFrameSize(32);
jni_asm->DecreaseFrameSize(32);
- jni_asm->RemoveFrame(frame_size, callee_save_regs, /* may_suspend */ true);
+ jni_asm->RemoveFrame(frame_size, callee_save_regs, /* may_suspend= */ true);
jni_asm->FinalizeCode();
std::vector<uint8_t> actual_asm(jni_asm->CodeSize());
MemoryRegion code(&actual_asm[0], actual_asm.size());
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 92b9543c27..ce987c1f35 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -659,7 +659,7 @@ void JniCompilerTest::CompileAndRunIntMethodThroughStubImpl() {
std::string reason;
ASSERT_TRUE(Runtime::Current()->GetJavaVM()->
- LoadNativeLibrary(env_, "", class_loader_, &reason))
+ LoadNativeLibrary(env_, "", class_loader_, nullptr, &reason))
<< reason;
jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 24);
@@ -675,7 +675,7 @@ void JniCompilerTest::CompileAndRunStaticIntMethodThroughStubImpl() {
std::string reason;
ASSERT_TRUE(Runtime::Current()->GetJavaVM()->
- LoadNativeLibrary(env_, "", class_loader_, &reason))
+ LoadNativeLibrary(env_, "", class_loader_, nullptr, &reason))
<< reason;
jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 42);
@@ -1300,15 +1300,15 @@ jint my_gettext(JNIEnv* env, jclass klass, jlong val1, jobject obj1, jlong val2,
EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass));
EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj1));
EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj2));
- EXPECT_EQ(0x12345678ABCDEF88ll, val1);
- EXPECT_EQ(0x7FEDCBA987654321ll, val2);
+ EXPECT_EQ(0x12345678ABCDEF88LL, val1);
+ EXPECT_EQ(0x7FEDCBA987654321LL, val2);
return 42;
}
void JniCompilerTest::GetTextImpl() {
SetUpForTest(true, "getText", "(JLjava/lang/Object;JLjava/lang/Object;)I",
CURRENT_JNI_WRAPPER(my_gettext));
- jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 0x12345678ABCDEF88ll, jobj_,
+ jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 0x12345678ABCDEF88LL, jobj_,
INT64_C(0x7FEDCBA987654321), jobj_);
EXPECT_EQ(result, 42);
}
@@ -2196,7 +2196,7 @@ void Java_MyClassNatives_normalNative(JNIEnv*, jclass) {
// Methods not annotated with anything are not considered "fast native"
// -- Check that the annotation lookup does not find it.
void JniCompilerTest::NormalNativeImpl() {
- SetUpForTest(/* direct */ true,
+ SetUpForTest(/* direct= */ true,
"normalNative",
"()V",
CURRENT_JNI_WRAPPER(Java_MyClassNatives_normalNative));
@@ -2218,7 +2218,7 @@ void Java_MyClassNatives_fastNative(JNIEnv*, jclass) {
}
void JniCompilerTest::FastNativeImpl() {
- SetUpForTest(/* direct */ true,
+ SetUpForTest(/* direct= */ true,
"fastNative",
"()V",
CURRENT_JNI_WRAPPER(Java_MyClassNatives_fastNative));
@@ -2241,7 +2241,7 @@ void Java_MyClassNatives_criticalNative() {
}
void JniCompilerTest::CriticalNativeImpl() {
- SetUpForTest(/* direct */ true,
+ SetUpForTest(/* direct= */ true,
// Important: Don't change the "current jni" yet to avoid a method name suffix.
"criticalNative",
"()V",
diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc
index 54f193b551..42a4603571 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.cc
+++ b/compiler/jni/quick/arm/calling_convention_arm.cc
@@ -18,6 +18,7 @@
#include <android-base/logging.h>
+#include "arch/instruction_set.h"
#include "base/macros.h"
#include "handle_scope-inl.h"
#include "utils/arm/managed_register_arm.h"
@@ -173,7 +174,7 @@ bool ArmManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
ManagedRegister ArmManagedRuntimeCallingConvention::CurrentParamRegister() {
LOG(FATAL) << "Should not reach here";
- return ManagedRegister::NoRegister();
+ UNREACHABLE();
}
FrameOffset ArmManagedRuntimeCallingConvention::CurrentParamStackOffset() {
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index 328ecbbc5c..4a6a754b5f 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -18,6 +18,7 @@
#include <android-base/logging.h>
+#include "arch/instruction_set.h"
#include "handle_scope-inl.h"
#include "utils/arm64/managed_register_arm64.h"
@@ -181,7 +182,7 @@ bool Arm64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
ManagedRegister Arm64ManagedRuntimeCallingConvention::CurrentParamRegister() {
LOG(FATAL) << "Should not reach here";
- return ManagedRegister::NoRegister();
+ UNREACHABLE();
}
FrameOffset Arm64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index ff814c8a6b..f031b9be82 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -18,6 +18,8 @@
#include <android-base/logging.h>
+#include "arch/instruction_set.h"
+
#ifdef ART_ENABLE_CODEGEN_arm
#include "jni/quick/arm/calling_convention_arm.h"
#endif
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index e256ce647d..77a5d595d7 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -27,6 +27,8 @@
namespace art {
+enum class InstructionSet;
+
// Top-level abstraction for different calling conventions.
class CallingConvention : public DeletableArenaObject<kArenaAllocCallingConvention> {
public:
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 62e8e0264f..bdbf4293f4 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -151,7 +151,7 @@ static JniCompiledMethod ArtJniCompileMethodInternal(const CompilerOptions& comp
// Don't allow both @FastNative and @CriticalNative. They are mutually exclusive.
if (UNLIKELY(is_fast_native && is_critical_native)) {
LOG(FATAL) << "JniCompile: Method cannot be both @CriticalNative and @FastNative"
- << dex_file.PrettyMethod(method_idx, /* with_signature */ true);
+ << dex_file.PrettyMethod(method_idx, /* with_signature= */ true);
}
// @CriticalNative - extra checks:
@@ -162,15 +162,15 @@ static JniCompiledMethod ArtJniCompileMethodInternal(const CompilerOptions& comp
CHECK(is_static)
<< "@CriticalNative functions cannot be virtual since that would"
<< "require passing a reference parameter (this), which is illegal "
- << dex_file.PrettyMethod(method_idx, /* with_signature */ true);
+ << dex_file.PrettyMethod(method_idx, /* with_signature= */ true);
CHECK(!is_synchronized)
<< "@CriticalNative functions cannot be synchronized since that would"
<< "require passing a (class and/or this) reference parameter, which is illegal "
- << dex_file.PrettyMethod(method_idx, /* with_signature */ true);
+ << dex_file.PrettyMethod(method_idx, /* with_signature= */ true);
for (size_t i = 0; i < strlen(shorty); ++i) {
CHECK_NE(Primitive::kPrimNot, Primitive::GetType(shorty[i]))
<< "@CriticalNative methods' shorty types must not have illegal references "
- << dex_file.PrettyMethod(method_idx, /* with_signature */ true);
+ << dex_file.PrettyMethod(method_idx, /* with_signature= */ true);
}
}
}
@@ -219,12 +219,6 @@ static JniCompiledMethod ArtJniCompileMethodInternal(const CompilerOptions& comp
jni_asm->cfi().SetEnabled(compiler_options.GenerateAnyDebugInfo());
jni_asm->SetEmitRunTimeChecksInDebugMode(compiler_options.EmitRunTimeChecksInDebugMode());
- // Offsets into data structures
- // TODO: if cross compiling these offsets are for the host not the target
- const Offset functions(OFFSETOF_MEMBER(JNIEnvExt, functions));
- const Offset monitor_enter(OFFSETOF_MEMBER(JNINativeInterface, MonitorEnter));
- const Offset monitor_exit(OFFSETOF_MEMBER(JNINativeInterface, MonitorExit));
-
// 1. Build the frame saving all callee saves, Method*, and PC return address.
const size_t frame_size(main_jni_conv->FrameSize()); // Excludes outgoing args.
ArrayRef<const ManagedRegister> callee_save_regs = main_jni_conv->CalleeSaveRegisters();
@@ -638,7 +632,7 @@ static JniCompiledMethod ArtJniCompileMethodInternal(const CompilerOptions& comp
__ DecreaseFrameSize(current_out_arg_size);
// 15. Process pending exceptions from JNI call or monitor exit.
- __ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), 0 /* stack_adjust */);
+ __ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), 0 /* stack_adjust= */);
// 16. Remove activation - need to restore callee save registers since the GC may have changed
// them.
diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc
index 5ec1addcb9..c69854d19a 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.cc
+++ b/compiler/jni/quick/mips/calling_convention_mips.cc
@@ -18,6 +18,7 @@
#include <android-base/logging.h>
+#include "arch/instruction_set.h"
#include "handle_scope-inl.h"
#include "utils/mips/managed_register_mips.h"
@@ -124,7 +125,7 @@ bool MipsManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
ManagedRegister MipsManagedRuntimeCallingConvention::CurrentParamRegister() {
LOG(FATAL) << "Should not reach here";
- return ManagedRegister::NoRegister();
+ UNREACHABLE();
}
FrameOffset MipsManagedRuntimeCallingConvention::CurrentParamStackOffset() {
diff --git a/compiler/jni/quick/mips/calling_convention_mips.h b/compiler/jni/quick/mips/calling_convention_mips.h
index 165fc6056e..8b395a0300 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.h
+++ b/compiler/jni/quick/mips/calling_convention_mips.h
@@ -87,7 +87,7 @@ class MipsJniCallingConvention final : public JniCallingConvention {
private:
// Padding to ensure longs and doubles are not split in o32.
size_t padding_;
- size_t use_fp_arg_registers_;
+ bool use_fp_arg_registers_;
DISALLOW_COPY_AND_ASSIGN(MipsJniCallingConvention);
};
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.cc b/compiler/jni/quick/mips64/calling_convention_mips64.cc
index a7012aefa8..2c297b3ce3 100644
--- a/compiler/jni/quick/mips64/calling_convention_mips64.cc
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.cc
@@ -18,6 +18,7 @@
#include <android-base/logging.h>
+#include "arch/instruction_set.h"
#include "handle_scope-inl.h"
#include "utils/mips64/managed_register_mips64.h"
@@ -109,7 +110,7 @@ bool Mips64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
ManagedRegister Mips64ManagedRuntimeCallingConvention::CurrentParamRegister() {
LOG(FATAL) << "Should not reach here";
- return ManagedRegister::NoRegister();
+ UNREACHABLE();
}
FrameOffset Mips64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc
index ad58e3820d..1f255e2bbd 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.cc
+++ b/compiler/jni/quick/x86/calling_convention_x86.cc
@@ -18,6 +18,7 @@
#include <android-base/logging.h>
+#include "arch/instruction_set.h"
#include "handle_scope-inl.h"
#include "utils/x86/managed_register_x86.h"
@@ -257,7 +258,7 @@ bool X86JniCallingConvention::IsCurrentParamOnStack() {
ManagedRegister X86JniCallingConvention::CurrentParamRegister() {
LOG(FATAL) << "Should not reach here";
- return ManagedRegister::NoRegister();
+ UNREACHABLE();
}
FrameOffset X86JniCallingConvention::CurrentParamStackOffset() {
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
index e5e96d01fc..9e77d6b36c 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -18,6 +18,7 @@
#include <android-base/logging.h>
+#include "arch/instruction_set.h"
#include "base/bit_utils.h"
#include "handle_scope-inl.h"
#include "utils/x86_64/managed_register_x86_64.h"
diff --git a/compiler/linker/elf_builder.h b/compiler/linker/elf_builder.h
index 81ecc175b5..6acce10fdf 100644
--- a/compiler/linker/elf_builder.h
+++ b/compiler/linker/elf_builder.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_LINKER_ELF_BUILDER_H_
#include <vector>
+#include <deque>
#include "arch/instruction_set.h"
#include "arch/mips/instruction_set_features_mips.h"
@@ -281,10 +282,10 @@ class ElfBuilder final {
name,
SHT_STRTAB,
flags,
- /* link */ nullptr,
- /* info */ 0,
+ /* link= */ nullptr,
+ /* info= */ 0,
align,
- /* entsize */ 0) { }
+ /* entsize= */ 0) { }
Elf_Word Add(const std::string& name) {
if (CachedSection::GetCacheSize() == 0u) {
@@ -305,10 +306,10 @@ class ElfBuilder final {
name,
SHT_STRTAB,
flags,
- /* link */ nullptr,
- /* info */ 0,
+ /* link= */ nullptr,
+ /* info= */ 0,
align,
- /* entsize */ 0) {
+ /* entsize= */ 0) {
Reset();
}
@@ -350,64 +351,56 @@ class ElfBuilder final {
type,
flags,
strtab,
- /* info */ 1,
+ /* info= */ 1,
sizeof(Elf_Off),
sizeof(Elf_Sym)) {
syms_.push_back(Elf_Sym()); // The symbol table always has to start with NULL symbol.
}
// Buffer symbol for this section. It will be written later.
- // If the symbol's section is null, it will be considered absolute (SHN_ABS).
- // (we use this in JIT to reference code which is stored outside the debug ELF file)
void Add(Elf_Word name,
const Section* section,
Elf_Addr addr,
Elf_Word size,
uint8_t binding,
uint8_t type) {
- Elf_Word section_index;
- if (section != nullptr) {
- DCHECK_LE(section->GetAddress(), addr);
- DCHECK_LE(addr, section->GetAddress() + section->header_.sh_size);
- section_index = section->GetSectionIndex();
- } else {
- section_index = static_cast<Elf_Word>(SHN_ABS);
- }
- Add(name, section_index, addr, size, binding, type);
- }
-
- // Buffer symbol for this section. It will be written later.
- void Add(Elf_Word name,
- Elf_Word section_index,
- Elf_Addr addr,
- Elf_Word size,
- uint8_t binding,
- uint8_t type) {
Elf_Sym sym = Elf_Sym();
sym.st_name = name;
sym.st_value = addr;
sym.st_size = size;
sym.st_other = 0;
- sym.st_shndx = section_index;
sym.st_info = (binding << 4) + (type & 0xf);
- syms_.push_back(sym);
+ Add(sym, section);
+ }
+
+ // Buffer symbol for this section. It will be written later.
+ void Add(Elf_Sym sym, const Section* section) {
+ DCHECK(section != nullptr);
+ DCHECK_LE(section->GetAddress(), sym.st_value);
+ DCHECK_LE(sym.st_value, section->GetAddress() + section->header_.sh_size);
+ sym.st_shndx = section->GetSectionIndex();
// The sh_info file must be set to index one-past the last local symbol.
- if (binding == STB_LOCAL) {
- this->header_.sh_info = syms_.size();
+ if (sym.getBinding() == STB_LOCAL) {
+ DCHECK_EQ(syms_.back().getBinding(), STB_LOCAL);
+ this->header_.sh_info = syms_.size() + 1;
}
+
+ syms_.push_back(sym);
}
Elf_Word GetCacheSize() { return syms_.size() * sizeof(Elf_Sym); }
void WriteCachedSection() {
this->Start();
- this->WriteFully(syms_.data(), syms_.size() * sizeof(Elf_Sym));
+ for (; !syms_.empty(); syms_.pop_front()) {
+ this->WriteFully(&syms_.front(), sizeof(Elf_Sym));
+ }
this->End();
}
private:
- std::vector<Elf_Sym> syms_; // Buffered/cached content of the whole section.
+ std::deque<Elf_Sym> syms_; // Buffered/cached content of the whole section.
};
class AbiflagsSection final : public Section {
@@ -775,7 +768,7 @@ class ElfBuilder final {
// The runtime does not care about the size of this symbol (it uses the "lastword" symbol).
// We use size 0 (meaning "unknown size" in ELF) to prevent overlap with the debug symbols.
Elf_Word oatexec = dynstr_.Add("oatexec");
- dynsym_.Add(oatexec, &text_, text_.GetAddress(), /* size */ 0, STB_GLOBAL, STT_OBJECT);
+ dynsym_.Add(oatexec, &text_, text_.GetAddress(), /* size= */ 0, STB_GLOBAL, STT_OBJECT);
Elf_Word oatlastword = dynstr_.Add("oatlastword");
Elf_Word oatlastword_address = text_.GetAddress() + text_size - 4;
dynsym_.Add(oatlastword, &text_, oatlastword_address, 4, STB_GLOBAL, STT_OBJECT);
@@ -831,7 +824,7 @@ class ElfBuilder final {
}
if (dex_size != 0u) {
Elf_Word oatdex = dynstr_.Add("oatdex");
- dynsym_.Add(oatdex, &dex_, dex_.GetAddress(), /* size */ 0, STB_GLOBAL, STT_OBJECT);
+ dynsym_.Add(oatdex, &dex_, dex_.GetAddress(), /* size= */ 0, STB_GLOBAL, STT_OBJECT);
Elf_Word oatdexlastword = dynstr_.Add("oatdexlastword");
Elf_Word oatdexlastword_address = dex_.GetAddress() + dex_size - 4;
dynsym_.Add(oatdexlastword, &dex_, oatdexlastword_address, 4, STB_GLOBAL, STT_OBJECT);
diff --git a/compiler/linker/linker_patch.h b/compiler/linker/linker_patch.h
index 5e1615fbd2..f9e3930f56 100644
--- a/compiler/linker/linker_patch.h
+++ b/compiler/linker/linker_patch.h
@@ -58,7 +58,7 @@ class LinkerPatch {
static LinkerPatch IntrinsicReferencePatch(size_t literal_offset,
uint32_t pc_insn_offset,
uint32_t intrinsic_data) {
- LinkerPatch patch(literal_offset, Type::kIntrinsicReference, /* target_dex_file */ nullptr);
+ LinkerPatch patch(literal_offset, Type::kIntrinsicReference, /* target_dex_file= */ nullptr);
patch.intrinsic_data_ = intrinsic_data;
patch.pc_insn_offset_ = pc_insn_offset;
return patch;
@@ -67,7 +67,7 @@ class LinkerPatch {
static LinkerPatch DataBimgRelRoPatch(size_t literal_offset,
uint32_t pc_insn_offset,
uint32_t boot_image_offset) {
- LinkerPatch patch(literal_offset, Type::kDataBimgRelRo, /* target_dex_file */ nullptr);
+ LinkerPatch patch(literal_offset, Type::kDataBimgRelRo, /* target_dex_file= */ nullptr);
patch.boot_image_offset_ = boot_image_offset;
patch.pc_insn_offset_ = pc_insn_offset;
return patch;
@@ -144,7 +144,9 @@ class LinkerPatch {
static LinkerPatch BakerReadBarrierBranchPatch(size_t literal_offset,
uint32_t custom_value1 = 0u,
uint32_t custom_value2 = 0u) {
- LinkerPatch patch(literal_offset, Type::kBakerReadBarrierBranch, /* target_dex_file */ nullptr);
+ LinkerPatch patch(literal_offset,
+ Type::kBakerReadBarrierBranch,
+ /* target_dex_file= */ nullptr);
patch.baker_custom_value1_ = custom_value1;
patch.baker_custom_value2_ = custom_value2;
return patch;
diff --git a/compiler/optimizing/block_builder.cc b/compiler/optimizing/block_builder.cc
index d9df23fd47..a5f78cafe0 100644
--- a/compiler/optimizing/block_builder.cc
+++ b/compiler/optimizing/block_builder.cc
@@ -68,7 +68,7 @@ bool HBasicBlockBuilder::CreateBranchTargets() {
// places where the program might fall through into/out of the a block and
// where TryBoundary instructions will be inserted later. Other edges which
// enter/exit the try blocks are a result of branches/switches.
- for (const DexFile::TryItem& try_item : code_item_accessor_.TryItems()) {
+ for (const dex::TryItem& try_item : code_item_accessor_.TryItems()) {
uint32_t dex_pc_start = try_item.start_addr_;
uint32_t dex_pc_end = dex_pc_start + try_item.insn_count_;
MaybeCreateBlockAt(dex_pc_start);
@@ -222,9 +222,9 @@ void HBasicBlockBuilder::ConnectBasicBlocks() {
}
// Returns the TryItem stored for `block` or nullptr if there is no info for it.
-static const DexFile::TryItem* GetTryItem(
+static const dex::TryItem* GetTryItem(
HBasicBlock* block,
- const ScopedArenaSafeMap<uint32_t, const DexFile::TryItem*>& try_block_info) {
+ const ScopedArenaSafeMap<uint32_t, const dex::TryItem*>& try_block_info) {
auto iterator = try_block_info.find(block->GetBlockId());
return (iterator == try_block_info.end()) ? nullptr : iterator->second;
}
@@ -235,7 +235,7 @@ static const DexFile::TryItem* GetTryItem(
// for a handler.
static void LinkToCatchBlocks(HTryBoundary* try_boundary,
const CodeItemDataAccessor& accessor,
- const DexFile::TryItem* try_item,
+ const dex::TryItem* try_item,
const ScopedArenaSafeMap<uint32_t, HBasicBlock*>& catch_blocks) {
for (CatchHandlerIterator it(accessor.GetCatchHandlerData(try_item->handler_off_));
it.HasNext();
@@ -279,7 +279,7 @@ void HBasicBlockBuilder::InsertTryBoundaryBlocks() {
// Keep a map of all try blocks and their respective TryItems. We do not use
// the block's pointer but rather its id to ensure deterministic iteration.
- ScopedArenaSafeMap<uint32_t, const DexFile::TryItem*> try_block_info(
+ ScopedArenaSafeMap<uint32_t, const dex::TryItem*> try_block_info(
std::less<uint32_t>(), local_allocator_->Adapter(kArenaAllocGraphBuilder));
// Obtain TryItem information for blocks with throwing instructions, and split
@@ -295,7 +295,7 @@ void HBasicBlockBuilder::InsertTryBoundaryBlocks() {
// loop for synchronized blocks.
if (ContainsElement(throwing_blocks_, block)) {
// Try to find a TryItem covering the block.
- const DexFile::TryItem* try_item = code_item_accessor_.FindTryItem(block->GetDexPc());
+ const dex::TryItem* try_item = code_item_accessor_.FindTryItem(block->GetDexPc());
if (try_item != nullptr) {
// Block throwing and in a TryItem. Store the try block information.
try_block_info.Put(block->GetBlockId(), try_item);
@@ -315,8 +315,16 @@ void HBasicBlockBuilder::InsertTryBoundaryBlocks() {
CatchHandlerIterator iterator(handlers_ptr);
for (; iterator.HasNext(); iterator.Next()) {
uint32_t address = iterator.GetHandlerAddress();
- if (catch_blocks.find(address) != catch_blocks.end()) {
+ auto existing = catch_blocks.find(address);
+ if (existing != catch_blocks.end()) {
// Catch block already processed.
+ TryCatchInformation* info = existing->second->GetTryCatchInformation();
+ if (iterator.GetHandlerTypeIndex() != info->GetCatchTypeIndex()) {
+ // The handler is for multiple types. We could record all the types, but
+ // doing class resolution here isn't ideal, and it's unclear whether wasting
+ // the space in TryCatchInformation is worth it.
+ info->SetInvalidTypeIndex();
+ }
continue;
}
@@ -337,7 +345,7 @@ void HBasicBlockBuilder::InsertTryBoundaryBlocks() {
catch_blocks.Put(address, catch_block);
catch_block->SetTryCatchInformation(
- new (allocator_) TryCatchInformation(iterator.GetHandlerTypeIndex(), *dex_file_));
+ new (allocator_) TryCatchInformation(iterator.GetHandlerTypeIndex(), *dex_file_));
}
handlers_ptr = iterator.EndDataPointer();
}
@@ -348,7 +356,7 @@ void HBasicBlockBuilder::InsertTryBoundaryBlocks() {
// that all predecessors are relinked to. This preserves loop headers (b/23895756).
for (const auto& entry : try_block_info) {
uint32_t block_id = entry.first;
- const DexFile::TryItem* try_item = entry.second;
+ const dex::TryItem* try_item = entry.second;
HBasicBlock* try_block = graph_->GetBlocks()[block_id];
for (HBasicBlock* predecessor : try_block->GetPredecessors()) {
if (GetTryItem(predecessor, try_block_info) != try_item) {
@@ -367,7 +375,7 @@ void HBasicBlockBuilder::InsertTryBoundaryBlocks() {
// the successor is not in the same TryItem.
for (const auto& entry : try_block_info) {
uint32_t block_id = entry.first;
- const DexFile::TryItem* try_item = entry.second;
+ const dex::TryItem* try_item = entry.second;
HBasicBlock* try_block = graph_->GetBlocks()[block_id];
// NOTE: Do not use iterators because SplitEdge would invalidate them.
for (size_t i = 0, e = try_block->GetSuccessors().size(); i < e; ++i) {
@@ -415,7 +423,7 @@ void HBasicBlockBuilder::BuildIntrinsic() {
// Create blocks.
HBasicBlock* entry_block = new (allocator_) HBasicBlock(graph_, kNoDexPc);
HBasicBlock* exit_block = new (allocator_) HBasicBlock(graph_, kNoDexPc);
- HBasicBlock* body = MaybeCreateBlockAt(/* semantic_dex_pc */ kNoDexPc, /* store_dex_pc */ 0u);
+ HBasicBlock* body = MaybeCreateBlockAt(/* semantic_dex_pc= */ kNoDexPc, /* store_dex_pc= */ 0u);
// Add blocks to the graph.
graph_->AddBlock(entry_block);
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 1c3660c0a7..e35d50220e 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -845,8 +845,10 @@ class BCEVisitor : public HGraphVisitor {
// make one more attempt to get a constant in the array range.
ValueRange* existing_range = LookupValueRange(array_length, block);
if (existing_range != nullptr &&
- existing_range->IsConstantValueRange()) {
- ValueRange constant_array_range(&allocator_, lower, existing_range->GetLower());
+ existing_range->IsConstantValueRange() &&
+ existing_range->GetLower().GetConstant() > 0) {
+ ValueBound constant_upper(nullptr, existing_range->GetLower().GetConstant() - 1);
+ ValueRange constant_array_range(&allocator_, lower, constant_upper);
if (index_range->FitsIn(&constant_array_range)) {
ReplaceInstruction(bounds_check, index);
return;
@@ -1634,7 +1636,7 @@ class BCEVisitor : public HGraphVisitor {
HBasicBlock* block = GetPreHeader(loop, check);
HInstruction* cond =
new (GetGraph()->GetAllocator()) HEqual(array, GetGraph()->GetNullConstant());
- InsertDeoptInLoop(loop, block, cond, /* is_null_check */ true);
+ InsertDeoptInLoop(loop, block, cond, /* is_null_check= */ true);
ReplaceInstruction(check, array);
return true;
}
diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc
index 7c29df877a..5927d681b2 100644
--- a/compiler/optimizing/bounds_check_elimination_test.cc
+++ b/compiler/optimizing/bounds_check_elimination_test.cc
@@ -43,7 +43,7 @@ class BoundsCheckEliminationTest : public OptimizingUnitTest {
void RunBCE() {
graph_->BuildDominatorTree();
- InstructionSimplifier(graph_, /* codegen */ nullptr).Run();
+ InstructionSimplifier(graph_, /* codegen= */ nullptr).Run();
SideEffectsAnalysis side_effects(graph_);
side_effects.Run();
@@ -598,9 +598,10 @@ static HInstruction* BuildSSAGraph3(HGraph* graph,
entry->AddSuccessor(block);
// We pass a bogus constant for the class to avoid mocking one.
HInstruction* new_array = new (allocator) HNewArray(
- constant_10,
- constant_10,
- 0);
+ /* cls= */ constant_10,
+ /* length= */ constant_10,
+ /* dex_pc= */ 0,
+ /* component_size_shift= */ 0);
block->AddInstruction(new_array);
block->AddInstruction(new (allocator) HGoto());
@@ -977,7 +978,11 @@ TEST_F(BoundsCheckEliminationTest, ModArrayBoundsElimination) {
graph_->AddBlock(block);
entry->AddSuccessor(block);
// We pass a bogus constant for the class to avoid mocking one.
- HInstruction* new_array = new (GetAllocator()) HNewArray(constant_10, constant_10, 0);
+ HInstruction* new_array = new (GetAllocator()) HNewArray(
+ /* cls= */ constant_10,
+ /* length= */ constant_10,
+ /* dex_pc= */ 0,
+ /* component_size_shift= */ 0);
block->AddInstruction(new_array);
block->AddInstruction(new (GetAllocator()) HGoto());
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index a1a5692ef6..64aa1b9358 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -21,6 +21,7 @@
#include "base/bit_vector-inl.h"
#include "base/logging.h"
#include "block_builder.h"
+#include "code_generator.h"
#include "data_type-inl.h"
#include "dex/verified_method.h"
#include "driver/compiler_options.h"
@@ -40,7 +41,6 @@ HGraphBuilder::HGraphBuilder(HGraph* graph,
const CodeItemDebugInfoAccessor& accessor,
const DexCompilationUnit* dex_compilation_unit,
const DexCompilationUnit* outer_compilation_unit,
- CompilerDriver* driver,
CodeGenerator* code_generator,
OptimizingCompilerStats* compiler_stats,
ArrayRef<const uint8_t> interpreter_metadata,
@@ -50,7 +50,6 @@ HGraphBuilder::HGraphBuilder(HGraph* graph,
code_item_accessor_(accessor),
dex_compilation_unit_(dex_compilation_unit),
outer_compilation_unit_(outer_compilation_unit),
- compiler_driver_(driver),
code_generator_(code_generator),
compilation_stats_(compiler_stats),
interpreter_metadata_(interpreter_metadata),
@@ -67,19 +66,18 @@ HGraphBuilder::HGraphBuilder(HGraph* graph,
code_item_accessor_(accessor),
dex_compilation_unit_(dex_compilation_unit),
outer_compilation_unit_(nullptr),
- compiler_driver_(nullptr),
code_generator_(nullptr),
compilation_stats_(nullptr),
handles_(handles),
return_type_(return_type) {}
bool HGraphBuilder::SkipCompilation(size_t number_of_branches) {
- if (compiler_driver_ == nullptr) {
- // Note that the compiler driver is null when unit testing.
+ if (code_generator_ == nullptr) {
+ // Note that the codegen is null when unit testing.
return false;
}
- const CompilerOptions& compiler_options = compiler_driver_->GetCompilerOptions();
+ const CompilerOptions& compiler_options = code_generator_->GetCompilerOptions();
CompilerFilter::Filter compiler_filter = compiler_options.GetCompilerFilter();
if (compiler_filter == CompilerFilter::kEverything) {
return false;
@@ -131,7 +129,6 @@ GraphAnalysisResult HGraphBuilder::BuildGraph() {
return_type_,
dex_compilation_unit_,
outer_compilation_unit_,
- compiler_driver_,
code_generator_,
interpreter_metadata_,
compilation_stats_,
@@ -203,7 +200,6 @@ void HGraphBuilder::BuildIntrinsicGraph(ArtMethod* method) {
return_type_,
dex_compilation_unit_,
outer_compilation_unit_,
- compiler_driver_,
code_generator_,
interpreter_metadata_,
compilation_stats_,
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 5a1914ce08..6152740324 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -22,7 +22,6 @@
#include "dex/code_item_accessors.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file.h"
-#include "driver/compiler_driver.h"
#include "nodes.h"
namespace art {
@@ -38,7 +37,6 @@ class HGraphBuilder : public ValueObject {
const CodeItemDebugInfoAccessor& accessor,
const DexCompilationUnit* dex_compilation_unit,
const DexCompilationUnit* outer_compilation_unit,
- CompilerDriver* driver,
CodeGenerator* code_generator,
OptimizingCompilerStats* compiler_stats,
ArrayRef<const uint8_t> interpreter_metadata,
@@ -70,7 +68,6 @@ class HGraphBuilder : public ValueObject {
// The compilation unit of the enclosing method being compiled.
const DexCompilationUnit* const outer_compilation_unit_;
- CompilerDriver* const compiler_driver_;
CodeGenerator* const code_generator_;
OptimizingCompilerStats* const compilation_stats_;
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index e84896b113..9e2f5cd508 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -197,7 +197,7 @@ class CodeGenerator::CodeGenerationData : public DeletableArenaObject<kArenaAllo
return GetNumberOfJitStringRoots() + GetNumberOfJitClassRoots();
}
- void EmitJitRoots(Handle<mirror::ObjectArray<mirror::Object>> roots)
+ void EmitJitRoots(/*out*/std::vector<Handle<mirror::Object>>* roots)
REQUIRES_SHARED(Locks::mutator_lock_);
private:
@@ -230,29 +230,31 @@ class CodeGenerator::CodeGenerationData : public DeletableArenaObject<kArenaAllo
};
void CodeGenerator::CodeGenerationData::EmitJitRoots(
- Handle<mirror::ObjectArray<mirror::Object>> roots) {
- DCHECK_EQ(static_cast<size_t>(roots->GetLength()), GetNumberOfJitRoots());
+ /*out*/std::vector<Handle<mirror::Object>>* roots) {
+ DCHECK(roots->empty());
+ roots->reserve(GetNumberOfJitRoots());
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
size_t index = 0;
for (auto& entry : jit_string_roots_) {
// Update the `roots` with the string, and replace the address temporarily
// stored to the index in the table.
uint64_t address = entry.second;
- roots->Set(index, reinterpret_cast<StackReference<mirror::String>*>(address)->AsMirrorPtr());
- DCHECK(roots->Get(index) != nullptr);
+ roots->emplace_back(reinterpret_cast<StackReference<mirror::Object>*>(address));
+ DCHECK(roots->back() != nullptr);
+ DCHECK(roots->back()->IsString());
entry.second = index;
// Ensure the string is strongly interned. This is a requirement on how the JIT
// handles strings. b/32995596
- class_linker->GetInternTable()->InternStrong(
- reinterpret_cast<mirror::String*>(roots->Get(index)));
+ class_linker->GetInternTable()->InternStrong(roots->back()->AsString());
++index;
}
for (auto& entry : jit_class_roots_) {
// Update the `roots` with the class, and replace the address temporarily
// stored to the index in the table.
uint64_t address = entry.second;
- roots->Set(index, reinterpret_cast<StackReference<mirror::Class>*>(address)->AsMirrorPtr());
- DCHECK(roots->Get(index) != nullptr);
+ roots->emplace_back(reinterpret_cast<StackReference<mirror::Object>*>(address));
+ DCHECK(roots->back() != nullptr);
+ DCHECK(roots->back()->IsClass());
entry.second = index;
++index;
}
@@ -412,7 +414,7 @@ void CodeGenerator::Compile(CodeAllocator* allocator) {
// This ensures that we have correct native line mapping for all native instructions.
// It is necessary to make stepping over a statement work. Otherwise, any initial
// instructions (e.g. moves) would be assumed to be the start of next statement.
- MaybeRecordNativeDebugInfo(nullptr /* instruction */, block->GetDexPc());
+ MaybeRecordNativeDebugInfo(/* instruction= */ nullptr, block->GetDexPc());
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
if (current->HasEnvironment()) {
@@ -985,7 +987,7 @@ static void CheckCovers(uint32_t dex_pc,
// dex branch instructions.
static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
const CodeInfo& code_info,
- const DexFile::CodeItem& code_item) {
+ const dex::CodeItem& code_item) {
if (graph.HasTryCatch()) {
// One can write loops through try/catch, which we do not support for OSR anyway.
return;
@@ -1027,7 +1029,7 @@ static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
}
}
-ScopedArenaVector<uint8_t> CodeGenerator::BuildStackMaps(const DexFile::CodeItem* code_item) {
+ScopedArenaVector<uint8_t> CodeGenerator::BuildStackMaps(const dex::CodeItem* code_item) {
ScopedArenaVector<uint8_t> stack_map = GetStackMapStream()->Encode();
if (kIsDebugBuild && code_item != nullptr) {
CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map.data()), *code_item);
@@ -1083,7 +1085,7 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
// call). Therefore register_mask contains both callee-save and caller-save
// registers that hold objects. We must remove the spilled caller-save from the
// mask, since they will be overwritten by the callee.
- uint32_t spills = GetSlowPathSpills(locations, /* core_registers */ true);
+ uint32_t spills = GetSlowPathSpills(locations, /* core_registers= */ true);
register_mask &= ~spills;
} else {
// The register mask must be a subset of callee-save registers.
@@ -1124,6 +1126,7 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
if (osr) {
DCHECK_EQ(info->GetSuspendCheck(), instruction);
DCHECK(info->IsIrreducible());
+ DCHECK(environment != nullptr);
if (kIsDebugBuild) {
for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
HInstruction* in_environment = environment->GetInstructionAt(i);
@@ -1161,7 +1164,7 @@ void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction,
// Ensure that we do not collide with the stack map of the previous instruction.
GenerateNop();
}
- RecordPcInfo(instruction, dex_pc, slow_path, /* native_debug_info */ true);
+ RecordPcInfo(instruction, dex_pc, slow_path, /* native_debug_info= */ true);
}
}
@@ -1179,8 +1182,8 @@ void CodeGenerator::RecordCatchBlockInfo() {
stack_map_stream->BeginStackMapEntry(dex_pc,
native_pc,
- /* register_mask */ 0,
- /* stack_mask */ nullptr,
+ /* register_mask= */ 0,
+ /* sp_mask= */ nullptr,
StackMap::Kind::Catch);
HInstruction* current_phi = block->GetFirstPhi();
@@ -1552,7 +1555,7 @@ void CodeGenerator::ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction* in
void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
- const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+ const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
for (uint32_t i : LowToHighBits(core_spills)) {
// If the register holds an object, update the stack mask.
if (locations->RegisterContainsObject(i)) {
@@ -1564,7 +1567,7 @@ void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* lo
stack_offset += codegen->SaveCoreRegister(stack_offset, i);
}
- const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+ const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
for (uint32_t i : LowToHighBits(fp_spills)) {
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
@@ -1576,14 +1579,14 @@ void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* lo
void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
- const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+ const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
for (uint32_t i : LowToHighBits(core_spills)) {
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
stack_offset += codegen->RestoreCoreRegister(stack_offset, i);
}
- const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+ const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
for (uint32_t i : LowToHighBits(fp_spills)) {
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
@@ -1645,28 +1648,21 @@ void CodeGenerator::CreateSystemArrayCopyLocationSummary(HInvoke* invoke) {
}
void CodeGenerator::EmitJitRoots(uint8_t* code,
- Handle<mirror::ObjectArray<mirror::Object>> roots,
- const uint8_t* roots_data) {
+ const uint8_t* roots_data,
+ /*out*/std::vector<Handle<mirror::Object>>* roots) {
code_generation_data_->EmitJitRoots(roots);
EmitJitRootPatches(code, roots_data);
}
-QuickEntrypointEnum CodeGenerator::GetArrayAllocationEntrypoint(Handle<mirror::Class> array_klass) {
- ScopedObjectAccess soa(Thread::Current());
- if (array_klass == nullptr) {
- // This can only happen for non-primitive arrays, as primitive arrays can always
- // be resolved.
- return kQuickAllocArrayResolved32;
- }
-
- switch (array_klass->GetComponentSize()) {
- case 1: return kQuickAllocArrayResolved8;
- case 2: return kQuickAllocArrayResolved16;
- case 4: return kQuickAllocArrayResolved32;
- case 8: return kQuickAllocArrayResolved64;
+QuickEntrypointEnum CodeGenerator::GetArrayAllocationEntrypoint(HNewArray* new_array) {
+ switch (new_array->GetComponentSizeShift()) {
+ case 0: return kQuickAllocArrayResolved8;
+ case 1: return kQuickAllocArrayResolved16;
+ case 2: return kQuickAllocArrayResolved32;
+ case 3: return kQuickAllocArrayResolved64;
}
LOG(FATAL) << "Unreachable";
- return kQuickAllocArrayResolved;
+ UNREACHABLE();
}
} // namespace art
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index e77d621b58..f70ecb612d 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -59,7 +59,6 @@ static constexpr ReadBarrierOption kCompilerReadBarrierOption =
class Assembler;
class CodeGenerator;
-class CompilerDriver;
class CompilerOptions;
class StackMapStream;
class ParallelMoveResolver;
@@ -350,14 +349,14 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
void AddSlowPath(SlowPathCode* slow_path);
- ScopedArenaVector<uint8_t> BuildStackMaps(const DexFile::CodeItem* code_item_for_osr_check);
+ ScopedArenaVector<uint8_t> BuildStackMaps(const dex::CodeItem* code_item_for_osr_check);
size_t GetNumberOfJitRoots() const;
// Fills the `literals` array with literals collected during code generation.
// Also emits literal patches.
void EmitJitRoots(uint8_t* code,
- Handle<mirror::ObjectArray<mirror::Object>> roots,
- const uint8_t* roots_data)
+ const uint8_t* roots_data,
+ /*out*/std::vector<Handle<mirror::Object>>* roots)
REQUIRES_SHARED(Locks::mutator_lock_);
bool IsLeafMethod() const {
@@ -622,7 +621,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
// otherwise return a fall-back info that should be used instead.
virtual HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) = 0;
+ ArtMethod* method) = 0;
// Generate a call to a static or direct method.
virtual void GenerateStaticOrDirectCall(
@@ -636,7 +635,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
virtual void GenerateNop() = 0;
- static QuickEntrypointEnum GetArrayAllocationEntrypoint(Handle<mirror::Class> array_klass);
+ static QuickEntrypointEnum GetArrayAllocationEntrypoint(HNewArray* new_array);
protected:
// Patch info used for recording locations of required linker patches and their targets,
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index d56f7aaca1..ff99a3eff2 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -168,8 +168,8 @@ static void SaveRestoreLiveRegistersHelper(CodeGenerator* codegen,
LocationSummary* locations,
int64_t spill_offset,
bool is_save) {
- const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
- const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+ const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
+ const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
DCHECK(ArtVixlRegCodeCoherentForRegSet(core_spills,
codegen->GetNumberOfCoreRegisters(),
fp_spills,
@@ -212,7 +212,7 @@ static void SaveRestoreLiveRegistersHelper(CodeGenerator* codegen,
void SlowPathCodeARM64::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
- const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+ const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
for (uint32_t i : LowToHighBits(core_spills)) {
// If the register holds an object, update the stack mask.
if (locations->RegisterContainsObject(i)) {
@@ -224,7 +224,7 @@ void SlowPathCodeARM64::SaveLiveRegisters(CodeGenerator* codegen, LocationSummar
stack_offset += kXRegSizeInBytes;
}
- const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+ const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
for (uint32_t i : LowToHighBits(fp_spills)) {
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
@@ -234,13 +234,13 @@ void SlowPathCodeARM64::SaveLiveRegisters(CodeGenerator* codegen, LocationSummar
SaveRestoreLiveRegistersHelper(codegen,
locations,
- codegen->GetFirstRegisterSlotInSlowPath(), true /* is_save */);
+ codegen->GetFirstRegisterSlotInSlowPath(), /* is_save= */ true);
}
void SlowPathCodeARM64::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
SaveRestoreLiveRegistersHelper(codegen,
locations,
- codegen->GetFirstRegisterSlotInSlowPath(), false /* is_save */);
+ codegen->GetFirstRegisterSlotInSlowPath(), /* is_save= */ false);
}
class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
@@ -885,7 +885,8 @@ CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph,
location_builder_(graph, this),
instruction_visitor_(graph, this),
move_resolver_(graph->GetAllocator(), this),
- assembler_(graph->GetAllocator()),
+ assembler_(graph->GetAllocator(),
+ compiler_options.GetInstructionSetFeatures()->AsArm64InstructionSetFeatures()),
uint32_literals_(std::less<uint32_t>(),
graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
uint64_literals_(std::less<uint64_t>(),
@@ -925,7 +926,7 @@ void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) {
uint32_t encoded_data = entry.first;
vixl::aarch64::Label* slow_path_entry = &entry.second.label;
__ Bind(slow_path_entry);
- CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name */ nullptr);
+ CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name= */ nullptr);
}
// Ensure we emit the literal pool.
@@ -1117,7 +1118,7 @@ void CodeGeneratorARM64::GenerateFrameEntry() {
}
}
- MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void CodeGeneratorARM64::GenerateFrameExit() {
@@ -1205,6 +1206,7 @@ void CodeGeneratorARM64::SetupBlockedRegisters() const {
// mr : Runtime reserved.
// ip1 : VIXL core temp.
// ip0 : VIXL core temp.
+ // x18 : Platform register.
//
// Blocked fp registers:
// d31 : VIXL fp temp.
@@ -1213,6 +1215,7 @@ void CodeGeneratorARM64::SetupBlockedRegisters() const {
while (!reserved_core_registers.IsEmpty()) {
blocked_core_registers_[reserved_core_registers.PopLowestIndex().GetCode()] = true;
}
+ blocked_core_registers_[X18] = true;
CPURegList reserved_fp_registers = vixl_reserved_fp_registers;
while (!reserved_fp_registers.IsEmpty()) {
@@ -1885,7 +1888,7 @@ void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction,
base,
offset,
maybe_temp,
- /* needs_null_check */ true,
+ /* needs_null_check= */ true,
field_info.IsVolatile());
} else {
// General case.
@@ -1894,7 +1897,7 @@ void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction,
// CodeGeneratorARM64::LoadAcquire call.
// NB: LoadAcquire will record the pc info if needed.
codegen_->LoadAcquire(
- instruction, OutputCPURegister(instruction), field, /* needs_null_check */ true);
+ instruction, OutputCPURegister(instruction), field, /* needs_null_check= */ true);
} else {
// Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
@@ -1949,7 +1952,7 @@ void InstructionCodeGeneratorARM64::HandleFieldSet(HInstruction* instruction,
if (field_info.IsVolatile()) {
codegen_->StoreRelease(
- instruction, field_type, source, HeapOperand(obj, offset), /* needs_null_check */ true);
+ instruction, field_type, source, HeapOperand(obj, offset), /* needs_null_check= */ true);
} else {
// Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
@@ -2317,9 +2320,10 @@ void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) {
if (offset >= kReferenceLoadMinFarOffset) {
locations->AddTemp(FixedTempLocation());
}
- } else {
+ } else if (!instruction->GetArray()->IsIntermediateAddress()) {
// We need a non-scratch temporary for the array data pointer in
- // CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier().
+ // CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier() for the case with no
+ // intermediate address.
locations->AddTemp(Location::RequiresRegister());
}
}
@@ -2349,11 +2353,12 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
MacroAssembler* masm = GetVIXLAssembler();
UseScratchRegisterScope temps(masm);
- // The read barrier instrumentation of object ArrayGet instructions
+ // The non-Baker read barrier instrumentation of object ArrayGet instructions
// does not support the HIntermediateAddress instruction.
DCHECK(!((type == DataType::Type::kReference) &&
instruction->GetArray()->IsIntermediateAddress() &&
- kEmitCompilerReadBarrier));
+ kEmitCompilerReadBarrier &&
+ !kUseBakerReadBarrier));
if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// Object ArrayGet with Baker's read barrier case.
@@ -2361,6 +2366,7 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
// CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier call.
DCHECK(!instruction->CanDoImplicitNullCheckOn(instruction->InputAt(0)));
if (index.IsConstant()) {
+ DCHECK(!instruction->GetArray()->IsIntermediateAddress());
// Array load with a constant index can be treated as a field load.
offset += Int64FromLocation(index) << DataType::SizeShift(type);
Location maybe_temp =
@@ -2370,12 +2376,11 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
obj.W(),
offset,
maybe_temp,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
} else {
- Register temp = WRegisterFrom(locations->GetTemp(0));
codegen_->GenerateArrayLoadWithBakerReadBarrier(
- out, obj.W(), offset, index, temp, /* needs_null_check */ false);
+ instruction, out, obj.W(), offset, index, /* needs_null_check= */ false);
}
} else {
// General case.
@@ -2424,8 +2429,8 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
// input instruction has done it already. See the comment in
// `TryExtractArrayAccessAddress()`.
if (kIsDebugBuild) {
- HIntermediateAddress* tmp = instruction->GetArray()->AsIntermediateAddress();
- DCHECK_EQ(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64(), offset);
+ HIntermediateAddress* interm_addr = instruction->GetArray()->AsIntermediateAddress();
+ DCHECK_EQ(interm_addr->GetOffset()->AsIntConstant()->GetValueAsUint64(), offset);
}
temp = obj;
} else {
@@ -2537,8 +2542,8 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
// input instruction has done it already. See the comment in
// `TryExtractArrayAccessAddress()`.
if (kIsDebugBuild) {
- HIntermediateAddress* tmp = instruction->GetArray()->AsIntermediateAddress();
- DCHECK(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64() == offset);
+ HIntermediateAddress* interm_addr = instruction->GetArray()->AsIntermediateAddress();
+ DCHECK(interm_addr->GetOffset()->AsIntConstant()->GetValueAsUint64() == offset);
}
temp = array;
} else {
@@ -2920,7 +2925,7 @@ void InstructionCodeGeneratorARM64::GenerateDivRemWithAnyConstant(HBinaryOperati
int64_t magic;
int shift;
CalculateMagicAndShiftForDivRem(
- imm, type == DataType::Type::kInt64 /* is_long */, &magic, &shift);
+ imm, /* is_long= */ type == DataType::Type::kInt64, &magic, &shift);
UseScratchRegisterScope temps(GetVIXLAssembler());
Register temp = temps.AcquireSameSizeAs(out);
@@ -3042,7 +3047,7 @@ void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction
if (!DataType::IsIntegralType(type)) {
LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
- return;
+ UNREACHABLE();
}
if (value.IsConstant()) {
@@ -3111,7 +3116,7 @@ void InstructionCodeGeneratorARM64::HandleGoto(HInstruction* got, HBasicBlock* s
}
if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
if (!codegen_->GoesToNextBlock(block, successor)) {
__ B(codegen_->GetLabelOf(successor));
@@ -3261,7 +3266,7 @@ void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) {
if (codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor)) {
false_target = nullptr;
}
- GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+ GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
}
void LocationsBuilderARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -3280,9 +3285,9 @@ void InstructionCodeGeneratorARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
SlowPathCodeARM64* slow_path =
deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathARM64>(deoptimize);
GenerateTestAndBranch(deoptimize,
- /* condition_input_index */ 0,
+ /* condition_input_index= */ 0,
slow_path->GetEntryLabel(),
- /* false_target */ nullptr);
+ /* false_target= */ nullptr);
}
void LocationsBuilderARM64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
@@ -3622,7 +3627,7 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
__ Cmp(out, cls);
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ B(ne, slow_path->GetEntryLabel());
__ Mov(out, 1);
@@ -3654,7 +3659,7 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
if (zero.IsLinked()) {
@@ -3947,7 +3952,7 @@ void LocationsBuilderARM64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
void InstructionCodeGeneratorARM64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void LocationsBuilderARM64::HandleInvoke(HInvoke* invoke) {
@@ -4017,7 +4022,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
@@ -4053,7 +4058,7 @@ static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM64* codege
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARM64::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
+ ArtMethod* method ATTRIBUTE_UNUSED) {
// On ARM64 we support all dispatch types.
return desired_dispatch_info;
}
@@ -4196,7 +4201,7 @@ void LocationsBuilderARM64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
void InstructionCodeGeneratorARM64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
codegen_->GenerateInvokePolymorphicCall(invoke);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void LocationsBuilderARM64::VisitInvokeCustom(HInvokeCustom* invoke) {
@@ -4205,21 +4210,21 @@ void LocationsBuilderARM64::VisitInvokeCustom(HInvokeCustom* invoke) {
void InstructionCodeGeneratorARM64::VisitInvokeCustom(HInvokeCustom* invoke) {
codegen_->GenerateInvokeCustomCall(invoke);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageIntrinsicPatch(
uint32_t intrinsic_data,
vixl::aarch64::Label* adrp_label) {
return NewPcRelativePatch(
- /* dex_file */ nullptr, intrinsic_data, adrp_label, &boot_image_intrinsic_patches_);
+ /* dex_file= */ nullptr, intrinsic_data, adrp_label, &boot_image_intrinsic_patches_);
}
vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageRelRoPatch(
uint32_t boot_image_offset,
vixl::aarch64::Label* adrp_label) {
return NewPcRelativePatch(
- /* dex_file */ nullptr, boot_image_offset, adrp_label, &boot_image_method_patches_);
+ /* dex_file= */ nullptr, boot_image_offset, adrp_label, &boot_image_method_patches_);
}
vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageMethodPatch(
@@ -4303,7 +4308,7 @@ vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitStringLitera
ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
return jit_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
- [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
+ [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ 0u); });
}
vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitClassLiteral(
@@ -4311,7 +4316,7 @@ vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitClassLiteral
ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
return jit_class_patches_.GetOrCreate(
TypeReference(&dex_file, type_index),
- [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
+ [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ 0u); });
}
void CodeGeneratorARM64::EmitAdrpPlaceholder(vixl::aarch64::Label* fixup_label,
@@ -4350,7 +4355,7 @@ void CodeGeneratorARM64::LoadBootImageAddress(vixl::aarch64::Register reg,
// Add ADD with its PC-relative type patch.
vixl::aarch64::Label* add_label = NewBootImageIntrinsicPatch(boot_image_reference, adrp_label);
EmitAddPlaceholder(add_label, reg.X(), reg.X());
- } else if (Runtime::Current()->IsAotCompiler()) {
+ } else if (GetCompilerOptions().GetCompilePic()) {
// Add ADRP with its PC-relative .data.bimg.rel.ro patch.
vixl::aarch64::Label* adrp_label = NewBootImageRelRoPatch(boot_image_reference);
EmitAdrpPlaceholder(adrp_label, reg.X());
@@ -4508,7 +4513,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDir
DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
return;
}
@@ -4521,12 +4526,12 @@ void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDir
invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
return;
}
@@ -4538,7 +4543,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
DCHECK(!codegen_->IsLeafMethod());
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
HLoadClass::LoadKind CodeGeneratorARM64::GetSupportedLoadClassKind(
@@ -4606,7 +4611,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
codegen_->GenerateLoadClassRuntimeCall(cls);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
return;
}
DCHECK(!cls->NeedsAccessCheck());
@@ -4628,7 +4633,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
out_loc,
current_method,
ArtMethod::DeclaringClassOffset().Int32Value(),
- /* fixup_label */ nullptr,
+ /* fixup_label= */ nullptr,
read_barrier_option);
break;
}
@@ -4691,8 +4696,8 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
codegen_->GenerateGcRootFieldLoad(cls,
out_loc,
out.X(),
- /* offset */ 0,
- /* fixup_label */ nullptr,
+ /* offset= */ 0,
+ /* fixup_label= */ nullptr,
read_barrier_option);
break;
}
@@ -4716,7 +4721,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
} else {
__ Bind(slow_path->GetExitLabel());
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
}
@@ -4854,7 +4859,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD
codegen_->AddSlowPath(slow_path);
__ Cbz(out.X(), slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
return;
}
case HLoadString::LoadKind::kJitBootImageAddress: {
@@ -4870,8 +4875,8 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD
codegen_->GenerateGcRootFieldLoad(load,
out_loc,
out.X(),
- /* offset */ 0,
- /* fixup_label */ nullptr,
+ /* offset= */ 0,
+ /* fixup_label= */ nullptr,
kCompilerReadBarrierOption);
return;
}
@@ -4885,7 +4890,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD
__ Mov(calling_convention.GetRegisterAt(0).W(), load->GetStringIndex().index_);
codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
@@ -4913,7 +4918,7 @@ void InstructionCodeGeneratorARM64::VisitMonitorOperation(HMonitorOperation* ins
} else {
CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>();
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void LocationsBuilderARM64::VisitMul(HMul* mul) {
@@ -5004,13 +5009,11 @@ void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) {
}
void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) {
- // Note: if heap poisoning is enabled, the entry point takes cares
- // of poisoning the reference.
- QuickEntrypointEnum entrypoint =
- CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+ QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
@@ -5024,7 +5027,7 @@ void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void LocationsBuilderARM64::VisitNot(HNot* instruction) {
@@ -5499,7 +5502,7 @@ void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction
return;
}
GenerateSuspendCheck(instruction, nullptr);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void LocationsBuilderARM64::VisitThrow(HThrow* instruction) {
@@ -5712,8 +5715,8 @@ void InstructionCodeGeneratorARM64::GenerateReferenceLoadOneRegister(
out_reg,
offset,
maybe_temp,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
} else {
// Load with slow path based read barrier.
// Save the value of `out` into `maybe_temp` before overwriting it
@@ -5753,8 +5756,8 @@ void InstructionCodeGeneratorARM64::GenerateReferenceLoadTwoRegisters(
obj_reg,
offset,
maybe_temp,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
} else {
// Load with slow path based read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
@@ -5839,7 +5842,7 @@ void CodeGeneratorARM64::GenerateGcRootFieldLoad(
// Note that GC roots are not affected by heap poisoning, thus we
// do not have to unpoison `root_reg` here.
}
- MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+ MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
}
void CodeGeneratorARM64::GenerateUnsafeCasOldValueMovWithBakerReadBarrier(
@@ -5928,7 +5931,7 @@ void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* ins
}
__ bind(&return_address);
}
- MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__, /* temp_loc */ LocationFrom(ip1));
+ MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__, /* temp_loc= */ LocationFrom(ip1));
}
void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
@@ -5957,11 +5960,11 @@ void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* ins
instruction, ref, obj, src, needs_null_check, use_load_acquire);
}
-void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(Location ref,
+void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(HArrayGet* instruction,
+ Location ref,
Register obj,
uint32_t data_offset,
Location index,
- Register temp,
bool needs_null_check) {
DCHECK(kEmitCompilerReadBarrier);
DCHECK(kUseBakerReadBarrier);
@@ -6000,9 +6003,24 @@ void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(Location ref,
DCHECK(temps.IsAvailable(ip0));
DCHECK(temps.IsAvailable(ip1));
temps.Exclude(ip0, ip1);
+
+ Register temp;
+ if (instruction->GetArray()->IsIntermediateAddress()) {
+ // We do not need to compute the intermediate address from the array: the
+ // input instruction has done it already. See the comment in
+ // `TryExtractArrayAccessAddress()`.
+ if (kIsDebugBuild) {
+ HIntermediateAddress* interm_addr = instruction->GetArray()->AsIntermediateAddress();
+ DCHECK_EQ(interm_addr->GetOffset()->AsIntConstant()->GetValueAsUint64(), data_offset);
+ }
+ temp = obj;
+ } else {
+ temp = WRegisterFrom(instruction->GetLocations()->GetTemp(0));
+ __ Add(temp.X(), obj.X(), Operand(data_offset));
+ }
+
uint32_t custom_data = EncodeBakerReadBarrierArrayData(temp.GetCode());
- __ Add(temp.X(), obj.X(), Operand(data_offset));
{
ExactAssemblyScope guard(GetVIXLAssembler(),
(kPoisonHeapReferences ? 4u : 3u) * vixl::aarch64::kInstructionSize);
@@ -6021,7 +6039,7 @@ void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(Location ref,
}
__ bind(&return_address);
}
- MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__, /* temp_loc */ LocationFrom(ip1));
+ MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__, /* temp_loc= */ LocationFrom(ip1));
}
void CodeGeneratorARM64::MaybeGenerateMarkingRegisterCheck(int code, Location temp_loc) {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 2e7a20b553..ada5742fc0 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -557,7 +557,7 @@ class CodeGeneratorARM64 : public CodeGenerator {
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) override;
+ ArtMethod* method) override;
void GenerateStaticOrDirectCall(
HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
@@ -694,11 +694,11 @@ class CodeGeneratorARM64 : public CodeGenerator {
bool use_load_acquire);
// Fast path implementation of ReadBarrier::Barrier for a heap
// reference array load when Baker's read barriers are used.
- void GenerateArrayLoadWithBakerReadBarrier(Location ref,
+ void GenerateArrayLoadWithBakerReadBarrier(HArrayGet* instruction,
+ Location ref,
vixl::aarch64::Register obj,
uint32_t data_offset,
Location index,
- vixl::aarch64::Register temp,
bool needs_null_check);
// Emit code checking the status of the Marking Register, and
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 3580975c62..8204f1eecb 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -319,7 +319,7 @@ void SlowPathCodeARMVIXL::SaveLiveRegisters(CodeGenerator* codegen, LocationSumm
size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
size_t orig_offset = stack_offset;
- const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+ const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
for (uint32_t i : LowToHighBits(core_spills)) {
// If the register holds an object, update the stack mask.
if (locations->RegisterContainsObject(i)) {
@@ -334,7 +334,7 @@ void SlowPathCodeARMVIXL::SaveLiveRegisters(CodeGenerator* codegen, LocationSumm
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
arm_codegen->GetAssembler()->StoreRegisterList(core_spills, orig_offset);
- uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+ uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
orig_offset = stack_offset;
for (uint32_t i : LowToHighBits(fp_spills)) {
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
@@ -357,7 +357,7 @@ void SlowPathCodeARMVIXL::RestoreLiveRegisters(CodeGenerator* codegen, LocationS
size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
size_t orig_offset = stack_offset;
- const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+ const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true);
for (uint32_t i : LowToHighBits(core_spills)) {
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
@@ -368,7 +368,7 @@ void SlowPathCodeARMVIXL::RestoreLiveRegisters(CodeGenerator* codegen, LocationS
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
arm_codegen->GetAssembler()->LoadRegisterList(core_spills, orig_offset);
- uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+ uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
while (fp_spills != 0u) {
uint32_t begin = CTZ(fp_spills);
uint32_t tmp = fp_spills + (1u << begin);
@@ -1037,26 +1037,26 @@ static uint32_t ComputeSRegisterListMask(const SRegisterList& regs) {
size_t CodeGeneratorARMVIXL::SaveCoreRegister(size_t stack_index ATTRIBUTE_UNUSED,
uint32_t reg_id ATTRIBUTE_UNUSED) {
TODO_VIXL32(FATAL);
- return 0;
+ UNREACHABLE();
}
// Restores the register from the stack. Returns the size taken on stack.
size_t CodeGeneratorARMVIXL::RestoreCoreRegister(size_t stack_index ATTRIBUTE_UNUSED,
uint32_t reg_id ATTRIBUTE_UNUSED) {
TODO_VIXL32(FATAL);
- return 0;
+ UNREACHABLE();
}
size_t CodeGeneratorARMVIXL::SaveFloatingPointRegister(size_t stack_index ATTRIBUTE_UNUSED,
uint32_t reg_id ATTRIBUTE_UNUSED) {
TODO_VIXL32(FATAL);
- return 0;
+ UNREACHABLE();
}
size_t CodeGeneratorARMVIXL::RestoreFloatingPointRegister(size_t stack_index ATTRIBUTE_UNUSED,
uint32_t reg_id ATTRIBUTE_UNUSED) {
TODO_VIXL32(FATAL);
- return 0;
+ UNREACHABLE();
}
static void GenerateDataProcInstruction(HInstruction::InstructionKind kind,
@@ -1539,7 +1539,7 @@ static void GenerateConditionGeneric(HCondition* cond, CodeGeneratorARMVIXL* cod
vixl32::Label done_label;
vixl32::Label* const final_label = codegen->GetFinalLabel(cond, &done_label);
- __ B(condition.second, final_label, /* far_target */ false);
+ __ B(condition.second, final_label, /* is_far_target= */ false);
__ Mov(out, 1);
if (done_label.IsReferenced()) {
@@ -1934,7 +1934,7 @@ void CodeGeneratorARMVIXL::Finalize(CodeAllocator* allocator) {
uint32_t encoded_data = entry.first;
vixl::aarch32::Label* slow_path_entry = &entry.second.label;
__ Bind(slow_path_entry);
- CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name */ nullptr);
+ CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name= */ nullptr);
}
GetAssembler()->FinalizeCode();
@@ -2159,7 +2159,7 @@ void CodeGeneratorARMVIXL::GenerateFrameEntry() {
GetAssembler()->StoreToOffset(kStoreWord, temp, sp, GetStackOffsetOfShouldDeoptimizeFlag());
}
- MaybeGenerateMarkingRegisterCheck(/* code */ 1);
+ MaybeGenerateMarkingRegisterCheck(/* code= */ 1);
}
void CodeGeneratorARMVIXL::GenerateFrameExit() {
@@ -2268,7 +2268,7 @@ Location InvokeDexCallingConventionVisitorARMVIXL::GetNextLocation(DataType::Typ
case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected parameter type " << type;
- break;
+ UNREACHABLE();
}
return Location::NoLocation();
}
@@ -2427,7 +2427,7 @@ void InstructionCodeGeneratorARMVIXL::HandleGoto(HInstruction* got, HBasicBlock*
}
if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 2);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 2);
}
if (!codegen_->GoesToNextBlock(block, successor)) {
__ B(codegen_->GetLabelOf(successor));
@@ -2606,7 +2606,7 @@ void InstructionCodeGeneratorARMVIXL::VisitIf(HIf* if_instr) {
nullptr : codegen_->GetLabelOf(true_successor);
vixl32::Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
nullptr : codegen_->GetLabelOf(false_successor);
- GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+ GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
}
void LocationsBuilderARMVIXL::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -2625,9 +2625,9 @@ void InstructionCodeGeneratorARMVIXL::VisitDeoptimize(HDeoptimize* deoptimize) {
SlowPathCodeARMVIXL* slow_path =
deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathARMVIXL>(deoptimize);
GenerateTestAndBranch(deoptimize,
- /* condition_input_index */ 0,
+ /* condition_input_index= */ 0,
slow_path->GetEntryLabel(),
- /* false_target */ nullptr);
+ /* false_target= */ nullptr);
}
void LocationsBuilderARMVIXL::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
@@ -2677,6 +2677,18 @@ void InstructionCodeGeneratorARMVIXL::VisitSelect(HSelect* select) {
const Location first = locations->InAt(0);
const Location out = locations->Out();
const Location second = locations->InAt(1);
+
+ // In the unlucky case the output of this instruction overlaps
+ // with an input of an "emitted-at-use-site" condition, and
+ // the output of this instruction is not one of its inputs, we'll
+ // need to fallback to branches instead of conditional ARM instructions.
+ bool output_overlaps_with_condition_inputs =
+ !IsBooleanValueOrMaterializedCondition(condition) &&
+ !out.Equals(first) &&
+ !out.Equals(second) &&
+ (condition->GetLocations()->InAt(0).Equals(out) ||
+ condition->GetLocations()->InAt(1).Equals(out));
+ DCHECK(!output_overlaps_with_condition_inputs || condition->IsCondition());
Location src;
if (condition->IsIntConstant()) {
@@ -2690,7 +2702,7 @@ void InstructionCodeGeneratorARMVIXL::VisitSelect(HSelect* select) {
return;
}
- if (!DataType::IsFloatingPointType(type)) {
+ if (!DataType::IsFloatingPointType(type) && !output_overlaps_with_condition_inputs) {
bool invert = false;
if (out.Equals(second)) {
@@ -2762,6 +2774,7 @@ void InstructionCodeGeneratorARMVIXL::VisitSelect(HSelect* select) {
vixl32::Label* false_target = nullptr;
vixl32::Label* true_target = nullptr;
vixl32::Label select_end;
+ vixl32::Label other_case;
vixl32::Label* const target = codegen_->GetFinalLabel(select, &select_end);
if (out.Equals(second)) {
@@ -2772,12 +2785,21 @@ void InstructionCodeGeneratorARMVIXL::VisitSelect(HSelect* select) {
src = second;
if (!out.Equals(first)) {
- codegen_->MoveLocation(out, first, type);
+ if (output_overlaps_with_condition_inputs) {
+ false_target = &other_case;
+ } else {
+ codegen_->MoveLocation(out, first, type);
+ }
}
}
- GenerateTestAndBranch(select, 2, true_target, false_target, /* far_target */ false);
+ GenerateTestAndBranch(select, 2, true_target, false_target, /* far_target= */ false);
codegen_->MoveLocation(out, src, type);
+ if (output_overlaps_with_condition_inputs) {
+ __ B(target);
+ __ Bind(&other_case);
+ codegen_->MoveLocation(out, first, type);
+ }
if (select_end.IsReferenced()) {
__ Bind(&select_end);
@@ -2876,31 +2898,16 @@ void CodeGeneratorARMVIXL::GenerateConditionWithZero(IfCondition condition,
void LocationsBuilderARMVIXL::HandleCondition(HCondition* cond) {
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(cond, LocationSummary::kNoCall);
- // Handle the long/FP comparisons made in instruction simplification.
- switch (cond->InputAt(0)->GetType()) {
- case DataType::Type::kInt64:
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
- if (!cond->IsEmittedAtUseSite()) {
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- }
- break;
-
- case DataType::Type::kFloat32:
- case DataType::Type::kFloat64:
- locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetInAt(1, ArithmeticZeroOrFpuRegister(cond->InputAt(1)));
- if (!cond->IsEmittedAtUseSite()) {
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- }
- break;
-
- default:
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
- if (!cond->IsEmittedAtUseSite()) {
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- }
+ const DataType::Type type = cond->InputAt(0)->GetType();
+ if (DataType::IsFloatingPointType(type)) {
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, ArithmeticZeroOrFpuRegister(cond->InputAt(1)));
+ } else {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
+ }
+ if (!cond->IsEmittedAtUseSite()) {
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
}
@@ -3128,7 +3135,7 @@ void LocationsBuilderARMVIXL::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
void InstructionCodeGeneratorARMVIXL::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 3);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 3);
}
void LocationsBuilderARMVIXL::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
@@ -3159,7 +3166,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokeStaticOrDirect(HInvokeStaticOrD
DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 4);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 4);
return;
}
@@ -3167,7 +3174,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokeStaticOrDirect(HInvokeStaticOrD
codegen_->GenerateStaticOrDirectCall(
invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 5);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 5);
}
void LocationsBuilderARMVIXL::HandleInvoke(HInvoke* invoke) {
@@ -3186,14 +3193,14 @@ void LocationsBuilderARMVIXL::VisitInvokeVirtual(HInvokeVirtual* invoke) {
void InstructionCodeGeneratorARMVIXL::VisitInvokeVirtual(HInvokeVirtual* invoke) {
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 6);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 6);
return;
}
codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
DCHECK(!codegen_->IsLeafMethod());
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 7);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 7);
}
void LocationsBuilderARMVIXL::VisitInvokeInterface(HInvokeInterface* invoke) {
@@ -3271,7 +3278,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokeInterface(HInvokeInterface* inv
DCHECK(!codegen_->IsLeafMethod());
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 8);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 8);
}
void LocationsBuilderARMVIXL::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
@@ -3280,7 +3287,7 @@ void LocationsBuilderARMVIXL::VisitInvokePolymorphic(HInvokePolymorphic* invoke)
void InstructionCodeGeneratorARMVIXL::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
codegen_->GenerateInvokePolymorphicCall(invoke);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 9);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 9);
}
void LocationsBuilderARMVIXL::VisitInvokeCustom(HInvokeCustom* invoke) {
@@ -3289,7 +3296,7 @@ void LocationsBuilderARMVIXL::VisitInvokeCustom(HInvokeCustom* invoke) {
void InstructionCodeGeneratorARMVIXL::VisitInvokeCustom(HInvokeCustom* invoke) {
codegen_->GenerateInvokeCustomCall(invoke);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 10);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 10);
}
void LocationsBuilderARMVIXL::VisitNeg(HNeg* neg) {
@@ -4006,7 +4013,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateDivRemWithAnyConstant(HBinaryOpera
int64_t magic;
int shift;
- CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
+ CalculateMagicAndShiftForDivRem(imm, /* is_long= */ false, &magic, &shift);
// TODO(VIXL): Change the static cast to Operand::From() after VIXL is fixed.
__ Mov(temp1, static_cast<int32_t>(magic));
@@ -4414,7 +4421,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateMinMaxFloat(HInstruction* minmax,
__ Vcmp(op1, op2);
__ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
- __ B(vs, &nan, /* far_target */ false); // if un-ordered, go to NaN handling.
+ __ B(vs, &nan, /* is_far_target= */ false); // if un-ordered, go to NaN handling.
// op1 <> op2
vixl32::ConditionType cond = is_min ? gt : lt;
@@ -4426,7 +4433,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateMinMaxFloat(HInstruction* minmax,
__ vmov(cond, F32, out, op2);
}
// for <>(not equal), we've done min/max calculation.
- __ B(ne, final_label, /* far_target */ false);
+ __ B(ne, final_label, /* is_far_target= */ false);
// handle op1 == op2, max(+0.0,-0.0), min(+0.0,-0.0).
__ Vmov(temp1, op1);
@@ -4471,7 +4478,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateMinMaxDouble(HInstruction* minmax,
__ Vcmp(op1, op2);
__ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR);
- __ B(vs, &handle_nan_eq, /* far_target */ false); // if un-ordered, go to NaN handling.
+ __ B(vs, &handle_nan_eq, /* is_far_target= */ false); // if un-ordered, go to NaN handling.
// op1 <> op2
vixl32::ConditionType cond = is_min ? gt : lt;
@@ -4483,7 +4490,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateMinMaxDouble(HInstruction* minmax,
__ vmov(cond, F64, out, op2);
}
// for <>(not equal), we've done min/max calculation.
- __ B(ne, final_label, /* far_target */ false);
+ __ B(ne, final_label, /* is_far_target= */ false);
// handle op1 == op2, max(+0.0,-0.0).
if (!is_min) {
@@ -4707,7 +4714,7 @@ void InstructionCodeGeneratorARMVIXL::HandleLongRotate(HRor* ror) {
__ And(shift_right, RegisterFrom(rhs), 0x1F);
__ Lsrs(shift_left, RegisterFrom(rhs), 6);
__ Rsb(LeaveFlags, shift_left, shift_right, Operand::From(kArmBitsPerWord));
- __ B(cc, &shift_by_32_plus_shift_right, /* far_target */ false);
+ __ B(cc, &shift_by_32_plus_shift_right, /* is_far_target= */ false);
// out_reg_hi = (reg_hi << shift_left) | (reg_lo >> shift_right).
// out_reg_lo = (reg_lo << shift_left) | (reg_hi >> shift_right).
@@ -4964,7 +4971,7 @@ void InstructionCodeGeneratorARMVIXL::HandleShift(HBinaryOperation* op) {
__ Rrx(o_l, low);
}
} else {
- DCHECK(2 <= shift_value && shift_value < 32) << shift_value;
+ DCHECK(0 <= shift_value && shift_value < 32) << shift_value;
if (op->IsShl()) {
__ Lsl(o_h, high, shift_value);
__ Orr(o_h, o_h, Operand(low, ShiftType::LSR, 32 - shift_value));
@@ -5023,7 +5030,7 @@ void LocationsBuilderARMVIXL::VisitNewInstance(HNewInstance* instruction) {
void InstructionCodeGeneratorARMVIXL::VisitNewInstance(HNewInstance* instruction) {
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 11);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 11);
}
void LocationsBuilderARMVIXL::VisitNewArray(HNewArray* instruction) {
@@ -5036,14 +5043,12 @@ void LocationsBuilderARMVIXL::VisitNewArray(HNewArray* instruction) {
}
void InstructionCodeGeneratorARMVIXL::VisitNewArray(HNewArray* instruction) {
- // Note: if heap poisoning is enabled, the entry point takes cares
- // of poisoning the reference.
- QuickEntrypointEnum entrypoint =
- CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+ QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 12);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 12);
}
void LocationsBuilderARMVIXL::VisitParameterValue(HParameterValue* instruction) {
@@ -5165,8 +5170,8 @@ void InstructionCodeGeneratorARMVIXL::VisitCompare(HCompare* compare) {
}
case DataType::Type::kInt64: {
__ Cmp(HighRegisterFrom(left), HighRegisterFrom(right)); // Signed compare.
- __ B(lt, &less, /* far_target */ false);
- __ B(gt, &greater, /* far_target */ false);
+ __ B(lt, &less, /* is_far_target= */ false);
+ __ B(gt, &greater, /* is_far_target= */ false);
// Emit move to `out` before the last `Cmp`, as `Mov` might affect the status flags.
__ Mov(out, 0);
__ Cmp(LowRegisterFrom(left), LowRegisterFrom(right)); // Unsigned compare.
@@ -5187,8 +5192,8 @@ void InstructionCodeGeneratorARMVIXL::VisitCompare(HCompare* compare) {
UNREACHABLE();
}
- __ B(eq, final_label, /* far_target */ false);
- __ B(less_cond, &less, /* far_target */ false);
+ __ B(eq, final_label, /* is_far_target= */ false);
+ __ B(less_cond, &less, /* is_far_target= */ false);
__ Bind(&greater);
__ Mov(out, 1);
@@ -5603,7 +5608,7 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction,
// Note that a potential implicit null check is handled in this
// CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier call.
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, out, base, offset, maybe_temp, /* needs_null_check */ true);
+ instruction, out, base, offset, maybe_temp, /* needs_null_check= */ true);
if (is_volatile) {
codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
}
@@ -5959,7 +5964,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) {
__ Lsrs(length, length, 1u); // LSRS has a 16-bit encoding, TST (immediate) does not.
static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
"Expecting 0=compressed, 1=uncompressed");
- __ B(cs, &uncompressed_load, /* far_target */ false);
+ __ B(cs, &uncompressed_load, /* is_far_target= */ false);
GetAssembler()->LoadFromOffset(kLoadUnsignedByte,
RegisterFrom(out_loc),
obj,
@@ -6001,7 +6006,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) {
__ Lsrs(length, length, 1u); // LSRS has a 16-bit encoding, TST (immediate) does not.
static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
"Expecting 0=compressed, 1=uncompressed");
- __ B(cs, &uncompressed_load, /* far_target */ false);
+ __ B(cs, &uncompressed_load, /* is_far_target= */ false);
__ Ldrb(RegisterFrom(out_loc), MemOperand(temp, RegisterFrom(index), vixl32::LSL, 0));
__ B(final_label);
__ Bind(&uncompressed_load);
@@ -6041,11 +6046,11 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) {
obj,
data_offset,
maybe_temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
} else {
Location temp = locations->GetTemp(0);
codegen_->GenerateArrayLoadWithBakerReadBarrier(
- out_loc, obj, data_offset, index, temp, /* needs_null_check */ false);
+ out_loc, obj, data_offset, index, temp, /* needs_null_check= */ false);
}
} else {
vixl32::Register out = OutputRegister(instruction);
@@ -6320,7 +6325,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) {
if (instruction->StaticTypeOfArrayIsObjectArray()) {
vixl32::Label do_put;
- __ B(eq, &do_put, /* far_target */ false);
+ __ B(eq, &do_put, /* is_far_target= */ false);
// If heap poisoning is enabled, the `temp1` reference has
// not been unpoisoned yet; unpoison it now.
GetAssembler()->MaybeUnpoisonHeapReference(temp1);
@@ -6622,7 +6627,7 @@ void InstructionCodeGeneratorARMVIXL::VisitSuspendCheck(HSuspendCheck* instructi
return;
}
GenerateSuspendCheck(instruction, nullptr);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 13);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 13);
}
void InstructionCodeGeneratorARMVIXL::GenerateSuspendCheck(HSuspendCheck* instruction,
@@ -6970,7 +6975,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
codegen_->GenerateLoadClassRuntimeCall(cls);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 14);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 14);
return;
}
DCHECK(!cls->NeedsAccessCheck());
@@ -7009,14 +7014,14 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
codegen_->NewBootImageRelRoPatch(codegen_->GetBootImageOffset(cls));
codegen_->EmitMovwMovtPlaceholder(labels, out);
- __ Ldr(out, MemOperand(out, /* offset */ 0));
+ __ Ldr(out, MemOperand(out, /* offset= */ 0));
break;
}
case HLoadClass::LoadKind::kBssEntry: {
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
codegen_->EmitMovwMovtPlaceholder(labels, out);
- codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option);
+ codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset= */ 0, read_barrier_option);
generate_null_check = true;
break;
}
@@ -7032,7 +7037,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
cls->GetTypeIndex(),
cls->GetClass()));
// /* GcRoot<mirror::Class> */ out = *out
- codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option);
+ codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset= */ 0, read_barrier_option);
break;
}
case HLoadClass::LoadKind::kRuntimeCall:
@@ -7054,7 +7059,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
} else {
__ Bind(slow_path->GetExitLabel());
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 15);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 15);
}
}
@@ -7235,7 +7240,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
codegen_->NewBootImageRelRoPatch(codegen_->GetBootImageOffset(load));
codegen_->EmitMovwMovtPlaceholder(labels, out);
- __ Ldr(out, MemOperand(out, /* offset */ 0));
+ __ Ldr(out, MemOperand(out, /* offset= */ 0));
return;
}
case HLoadString::LoadKind::kBssEntry: {
@@ -7244,13 +7249,13 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex());
codegen_->EmitMovwMovtPlaceholder(labels, out);
codegen_->GenerateGcRootFieldLoad(
- load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
+ load, out_loc, out, /* offset= */ 0, kCompilerReadBarrierOption);
LoadStringSlowPathARMVIXL* slow_path =
new (codegen_->GetScopedAllocator()) LoadStringSlowPathARMVIXL(load);
codegen_->AddSlowPath(slow_path);
__ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 16);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 16);
return;
}
case HLoadString::LoadKind::kJitBootImageAddress: {
@@ -7265,7 +7270,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
load->GetString()));
// /* GcRoot<mirror::String> */ out = *out
codegen_->GenerateGcRootFieldLoad(
- load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
+ load, out_loc, out, /* offset= */ 0, kCompilerReadBarrierOption);
return;
}
default:
@@ -7278,7 +7283,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
__ Mov(calling_convention.GetRegisterAt(0), load->GetStringIndex().index_);
codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 17);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 17);
}
static int32_t GetExceptionTlsOffset() {
@@ -7410,7 +7415,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
if (instruction->MustDoNullCheck()) {
DCHECK(!out.Is(obj));
__ Mov(out, 0);
- __ CompareAndBranchIfZero(obj, final_label, /* far_target */ false);
+ __ CompareAndBranchIfZero(obj, final_label, /* is_far_target= */ false);
}
switch (type_check_kind) {
@@ -7442,7 +7447,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
__ it(eq);
__ mov(eq, out, 1);
} else {
- __ B(ne, final_label, /* far_target */ false);
+ __ B(ne, final_label, /* is_far_target= */ false);
__ Mov(out, 1);
}
@@ -7470,9 +7475,9 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
maybe_temp_loc,
read_barrier_option);
// If `out` is null, we use it for the result, and jump to the final label.
- __ CompareAndBranchIfZero(out, final_label, /* far_target */ false);
+ __ CompareAndBranchIfZero(out, final_label, /* is_far_target= */ false);
__ Cmp(out, cls);
- __ B(ne, &loop, /* far_target */ false);
+ __ B(ne, &loop, /* is_far_target= */ false);
__ Mov(out, 1);
break;
}
@@ -7491,7 +7496,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
vixl32::Label loop, success;
__ Bind(&loop);
__ Cmp(out, cls);
- __ B(eq, &success, /* far_target */ false);
+ __ B(eq, &success, /* is_far_target= */ false);
// /* HeapReference<Class> */ out = out->super_class_
GenerateReferenceLoadOneRegister(instruction,
out_loc,
@@ -7501,7 +7506,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
// This is essentially a null check, but it sets the condition flags to the
// proper value for the code that follows the loop, i.e. not `eq`.
__ Cmp(out, 1);
- __ B(hs, &loop, /* far_target */ false);
+ __ B(hs, &loop, /* is_far_target= */ false);
// Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8,
// we check that the output is in a low register, so that a 16-bit MOV
@@ -7546,7 +7551,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
// Do an exact check.
vixl32::Label exact_check;
__ Cmp(out, cls);
- __ B(eq, &exact_check, /* far_target */ false);
+ __ B(eq, &exact_check, /* is_far_target= */ false);
// Otherwise, we need to check that the object's class is a non-primitive array.
// /* HeapReference<Class> */ out = out->component_type_
GenerateReferenceLoadOneRegister(instruction,
@@ -7555,7 +7560,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
maybe_temp_loc,
read_barrier_option);
// If `out` is null, we use it for the result, and jump to the final label.
- __ CompareAndBranchIfZero(out, final_label, /* far_target */ false);
+ __ CompareAndBranchIfZero(out, final_label, /* is_far_target= */ false);
GetAssembler()->LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
__ Cmp(out, 0);
@@ -7577,7 +7582,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
__ it(eq);
__ mov(eq, out, 1);
} else {
- __ B(ne, final_label, /* far_target */ false);
+ __ B(ne, final_label, /* is_far_target= */ false);
__ Bind(&exact_check);
__ Mov(out, 1);
}
@@ -7597,7 +7602,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
__ Cmp(out, cls);
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ B(ne, slow_path->GetEntryLabel());
__ Mov(out, 1);
@@ -7626,7 +7631,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction)
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
break;
@@ -7711,7 +7716,7 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) {
vixl32::Label* final_label = codegen_->GetFinalLabel(instruction, &done);
// Avoid null check if we know obj is not null.
if (instruction->MustDoNullCheck()) {
- __ CompareAndBranchIfZero(obj, final_label, /* far_target */ false);
+ __ CompareAndBranchIfZero(obj, final_label, /* is_far_target= */ false);
}
switch (type_check_kind) {
@@ -7758,7 +7763,7 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) {
// Otherwise, compare the classes.
__ Cmp(temp, cls);
- __ B(ne, &loop, /* far_target */ false);
+ __ B(ne, &loop, /* is_far_target= */ false);
break;
}
@@ -7775,7 +7780,7 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) {
vixl32::Label loop;
__ Bind(&loop);
__ Cmp(temp, cls);
- __ B(eq, final_label, /* far_target */ false);
+ __ B(eq, final_label, /* is_far_target= */ false);
// /* HeapReference<Class> */ temp = temp->super_class_
GenerateReferenceLoadOneRegister(instruction,
@@ -7803,7 +7808,7 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) {
// Do an exact check.
__ Cmp(temp, cls);
- __ B(eq, final_label, /* far_target */ false);
+ __ B(eq, final_label, /* is_far_target= */ false);
// Otherwise, we need to check that the object's class is a non-primitive array.
// /* HeapReference<Class> */ temp = temp->component_type_
@@ -7867,7 +7872,7 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) {
__ Sub(RegisterFrom(maybe_temp2_loc), RegisterFrom(maybe_temp2_loc), 2);
// Compare the classes and continue the loop if they do not match.
__ Cmp(cls, RegisterFrom(maybe_temp3_loc));
- __ B(ne, &start_loop, /* far_target */ false);
+ __ B(ne, &start_loop, /* is_far_target= */ false);
break;
}
@@ -7908,7 +7913,7 @@ void InstructionCodeGeneratorARMVIXL::VisitMonitorOperation(HMonitorOperation* i
} else {
CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>();
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 18);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 18);
}
void LocationsBuilderARMVIXL::VisitAnd(HAnd* instruction) {
@@ -8263,7 +8268,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateReferenceLoadOneRegister(
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(out + offset)
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, out, out_reg, offset, maybe_temp, /* needs_null_check */ false);
+ instruction, out, out_reg, offset, maybe_temp, /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// Save the value of `out` into `maybe_temp` before overwriting it
@@ -8298,7 +8303,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateReferenceLoadTwoRegisters(
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, out, obj_reg, offset, maybe_temp, /* needs_null_check */ false);
+ instruction, out, obj_reg, offset, maybe_temp, /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
@@ -8379,7 +8384,7 @@ void CodeGeneratorARMVIXL::GenerateGcRootFieldLoad(
// Note that GC roots are not affected by heap poisoning, thus we
// do not have to unpoison `root_reg` here.
}
- MaybeGenerateMarkingRegisterCheck(/* code */ 19);
+ MaybeGenerateMarkingRegisterCheck(/* code= */ 19);
}
void CodeGeneratorARMVIXL::GenerateUnsafeCasOldValueAddWithBakerReadBarrier(
@@ -8479,7 +8484,7 @@ void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* i
narrow ? BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_OFFSET
: BAKER_MARK_INTROSPECTION_FIELD_LDR_WIDE_OFFSET);
}
- MaybeGenerateMarkingRegisterCheck(/* code */ 20, /* temp_loc */ LocationFrom(ip));
+ MaybeGenerateMarkingRegisterCheck(/* code= */ 20, /* temp_loc= */ LocationFrom(ip));
}
void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
@@ -8567,7 +8572,7 @@ void CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier(Location ref,
DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(),
BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET);
}
- MaybeGenerateMarkingRegisterCheck(/* code */ 21, /* temp_loc */ LocationFrom(ip));
+ MaybeGenerateMarkingRegisterCheck(/* code= */ 21, /* temp_loc= */ LocationFrom(ip));
}
void CodeGeneratorARMVIXL::MaybeGenerateMarkingRegisterCheck(int code, Location temp_loc) {
@@ -8650,7 +8655,7 @@ void CodeGeneratorARMVIXL::GenerateReadBarrierForRootSlow(HInstruction* instruct
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARMVIXL::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
+ ArtMethod* method ATTRIBUTE_UNUSED) {
return desired_dispatch_info;
}
@@ -8810,12 +8815,12 @@ void CodeGeneratorARMVIXL::GenerateVirtualCall(
CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewBootImageIntrinsicPatch(
uint32_t intrinsic_data) {
- return NewPcRelativePatch(/* dex_file */ nullptr, intrinsic_data, &boot_image_intrinsic_patches_);
+ return NewPcRelativePatch(/* dex_file= */ nullptr, intrinsic_data, &boot_image_intrinsic_patches_);
}
CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewBootImageRelRoPatch(
uint32_t boot_image_offset) {
- return NewPcRelativePatch(/* dex_file */ nullptr,
+ return NewPcRelativePatch(/* dex_file= */ nullptr,
boot_image_offset,
&boot_image_method_patches_);
}
@@ -8886,7 +8891,7 @@ VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitStringLiteral(
return jit_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
[this]() {
- return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u);
+ return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ 0u);
});
}
@@ -8897,7 +8902,7 @@ VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitClassLiteral(const DexFil
return jit_class_patches_.GetOrCreate(
TypeReference(&dex_file, type_index),
[this]() {
- return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u);
+ return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ 0u);
});
}
@@ -8907,11 +8912,11 @@ void CodeGeneratorARMVIXL::LoadBootImageAddress(vixl32::Register reg,
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
NewBootImageIntrinsicPatch(boot_image_reference);
EmitMovwMovtPlaceholder(labels, reg);
- } else if (Runtime::Current()->IsAotCompiler()) {
+ } else if (GetCompilerOptions().GetCompilePic()) {
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
NewBootImageRelRoPatch(boot_image_reference);
EmitMovwMovtPlaceholder(labels, reg);
- __ Ldr(reg, MemOperand(reg, /* offset */ 0));
+ __ Ldr(reg, MemOperand(reg, /* offset= */ 0));
} else {
DCHECK(Runtime::Current()->UseJitCompilation());
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -9056,7 +9061,7 @@ VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateUint32Literal(
return map->GetOrCreate(
value,
[this, value]() {
- return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ value);
+ return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ value);
});
}
@@ -9283,9 +9288,9 @@ void CodeGeneratorARMVIXL::EmitMovwMovtPlaceholder(
CodeBufferCheckScope::kMaximumSize);
// TODO(VIXL): Think about using mov instead of movw.
__ bind(&labels->movw_label);
- __ movw(out, /* placeholder */ 0u);
+ __ movw(out, /* operand= */ 0u);
__ bind(&labels->movt_label);
- __ movt(out, /* placeholder */ 0u);
+ __ movt(out, /* operand= */ 0u);
__ bind(&labels->add_pc_label);
__ add(out, out, pc);
}
@@ -9308,7 +9313,7 @@ static void EmitGrayCheckAndFastPath(ArmVIXLAssembler& assembler,
static_assert(ReadBarrier::NonGrayState() == 0, "Expecting non-gray to have value 0");
static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
__ Tst(ip, Operand(LockWord::kReadBarrierStateMaskShifted));
- __ B(ne, slow_path, /* is_far_target */ false);
+ __ B(ne, slow_path, /* is_far_target= */ false);
// To throw NPE, we return to the fast path; the artificial dependence below does not matter.
if (throw_npe != nullptr) {
__ Bind(throw_npe);
@@ -9355,7 +9360,7 @@ void CodeGeneratorARMVIXL::CompileBakerReadBarrierThunk(ArmVIXLAssembler& assemb
vixl32::Label* throw_npe = nullptr;
if (GetCompilerOptions().GetImplicitNullChecks() && holder_reg.Is(base_reg)) {
throw_npe = &throw_npe_label;
- __ CompareAndBranchIfZero(holder_reg, throw_npe, /* is_far_target */ false);
+ __ CompareAndBranchIfZero(holder_reg, throw_npe, /* is_far_target= */ false);
}
// Check if the holder is gray and, if not, add fake dependency to the base register
// and return to the LDR instruction to load the reference. Otherwise, use introspection
@@ -9432,7 +9437,7 @@ void CodeGeneratorARMVIXL::CompileBakerReadBarrierThunk(ArmVIXLAssembler& assemb
UseScratchRegisterScope temps(assembler.GetVIXLAssembler());
temps.Exclude(ip);
vixl32::Label return_label, not_marked, forwarding_address;
- __ CompareAndBranchIfZero(root_reg, &return_label, /* is_far_target */ false);
+ __ CompareAndBranchIfZero(root_reg, &return_label, /* is_far_target= */ false);
MemOperand lock_word(root_reg, mirror::Object::MonitorOffset().Int32Value());
__ Ldr(ip, lock_word);
__ Tst(ip, LockWord::kMarkBitStateMaskShifted);
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 33502d4f68..5edca87147 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -547,7 +547,7 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) override;
+ ArtMethod* method) override;
void GenerateStaticOrDirectCall(
HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index d74a7a760f..f7f37db26a 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -147,7 +147,7 @@ Location InvokeDexCallingConventionVisitorMIPS::GetNextLocation(DataType::Type t
case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected parameter type " << type;
- break;
+ UNREACHABLE();
}
// Space on the stack is reserved for all arguments.
@@ -587,7 +587,7 @@ class ReadBarrierMarkSlowPathMIPS : public SlowPathCodeMIPS {
mips_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset,
instruction_,
this,
- /* direct */ false);
+ /* direct= */ false);
}
__ B(GetExitLabel());
}
@@ -681,7 +681,7 @@ class ReadBarrierMarkAndUpdateFieldSlowPathMIPS : public SlowPathCodeMIPS {
mips_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset,
instruction_,
this,
- /* direct */ false);
+ /* direct= */ false);
// If the new reference is different from the old reference,
// update the field in the holder (`*(obj_ + field_offset_)`).
@@ -1167,9 +1167,9 @@ void ParallelMoveResolverMIPS::EmitSwap(size_t index) {
__ Move(r2_l, TMP);
__ Move(r2_h, AT);
} else if (loc1.IsStackSlot() && loc2.IsStackSlot()) {
- Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot */ false);
+ Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot= */ false);
} else if (loc1.IsDoubleStackSlot() && loc2.IsDoubleStackSlot()) {
- Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot */ true);
+ Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot= */ true);
} else if (loc1.IsSIMDStackSlot() && loc2.IsSIMDStackSlot()) {
ExchangeQuadSlots(loc1.GetStackIndex(), loc2.GetStackIndex());
} else if ((loc1.IsRegister() && loc2.IsStackSlot()) ||
@@ -1654,14 +1654,14 @@ CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewBootImageIntrinsic
uint32_t intrinsic_data,
const PcRelativePatchInfo* info_high) {
return NewPcRelativePatch(
- /* dex_file */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_);
+ /* dex_file= */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_);
}
CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewBootImageRelRoPatch(
uint32_t boot_image_offset,
const PcRelativePatchInfo* info_high) {
return NewPcRelativePatch(
- /* dex_file */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_);
+ /* dex_file= */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_);
}
CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewBootImageMethodPatch(
@@ -1737,7 +1737,7 @@ void CodeGeneratorMIPS::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo
__ Bind(&info_high->label);
__ Bind(&info_high->pc_rel_label);
// Add the high half of a 32-bit offset to PC.
- __ Auipc(out, /* placeholder */ 0x1234);
+ __ Auipc(out, /* imm16= */ 0x1234);
__ SetReorder(reordering);
} else {
// If base is ZERO, emit NAL to obtain the actual base.
@@ -1746,7 +1746,7 @@ void CodeGeneratorMIPS::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo
__ Nal();
}
__ Bind(&info_high->label);
- __ Lui(out, /* placeholder */ 0x1234);
+ __ Lui(out, /* imm16= */ 0x1234);
// If we emitted the NAL, bind the pc_rel_label, otherwise base is a register holding
// the HMipsComputeBaseMethodAddress which has its own label stored in MipsAssembler.
if (base == ZERO) {
@@ -1764,13 +1764,13 @@ void CodeGeneratorMIPS::LoadBootImageAddress(Register reg, uint32_t boot_image_r
if (GetCompilerOptions().IsBootImage()) {
PcRelativePatchInfo* info_high = NewBootImageIntrinsicPatch(boot_image_reference);
PcRelativePatchInfo* info_low = NewBootImageIntrinsicPatch(boot_image_reference, info_high);
- EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, /* base */ ZERO);
- __ Addiu(reg, TMP, /* placeholder */ 0x5678, &info_low->label);
- } else if (Runtime::Current()->IsAotCompiler()) {
+ EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, /* base= */ ZERO);
+ __ Addiu(reg, TMP, /* imm16= */ 0x5678, &info_low->label);
+ } else if (GetCompilerOptions().GetCompilePic()) {
PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_reference);
PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_reference, info_high);
- EmitPcRelativeAddressPlaceholderHigh(info_high, reg, /* base */ ZERO);
- __ Lw(reg, reg, /* placeholder */ 0x5678, &info_low->label);
+ EmitPcRelativeAddressPlaceholderHigh(info_high, reg, /* base= */ ZERO);
+ __ Lw(reg, reg, /* imm16= */ 0x5678, &info_low->label);
} else {
DCHECK(Runtime::Current()->UseJitCompilation());
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -1793,8 +1793,8 @@ void CodeGeneratorMIPS::AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invo
PcRelativePatchInfo* info_high = NewBootImageTypePatch(*target_method.dex_file, type_idx);
PcRelativePatchInfo* info_low =
NewBootImageTypePatch(*target_method.dex_file, type_idx, info_high);
- EmitPcRelativeAddressPlaceholderHigh(info_high, argument, /* base */ ZERO);
- __ Addiu(argument, argument, /* placeholder */ 0x5678, &info_low->label);
+ EmitPcRelativeAddressPlaceholderHigh(info_high, argument, /* base= */ ZERO);
+ __ Addiu(argument, argument, /* imm16= */ 0x5678, &info_low->label);
} else {
LoadBootImageAddress(argument, boot_image_offset);
}
@@ -2579,7 +2579,7 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
__ Or(dst_high, dst_high, TMP);
__ Andi(TMP, rhs_reg, kMipsBitsPerWord);
if (isR6) {
- __ Beqzc(TMP, &done, /* is_bare */ true);
+ __ Beqzc(TMP, &done, /* is_bare= */ true);
__ Move(dst_high, dst_low);
__ Move(dst_low, ZERO);
} else {
@@ -2595,7 +2595,7 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
__ Or(dst_low, dst_low, TMP);
__ Andi(TMP, rhs_reg, kMipsBitsPerWord);
if (isR6) {
- __ Beqzc(TMP, &done, /* is_bare */ true);
+ __ Beqzc(TMP, &done, /* is_bare= */ true);
__ Move(dst_low, dst_high);
__ Sra(dst_high, dst_high, 31);
} else {
@@ -2612,7 +2612,7 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
__ Or(dst_low, dst_low, TMP);
__ Andi(TMP, rhs_reg, kMipsBitsPerWord);
if (isR6) {
- __ Beqzc(TMP, &done, /* is_bare */ true);
+ __ Beqzc(TMP, &done, /* is_bare= */ true);
__ Move(dst_low, dst_high);
__ Move(dst_high, ZERO);
} else {
@@ -2631,7 +2631,7 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
__ Or(dst_high, dst_high, TMP);
__ Andi(TMP, rhs_reg, kMipsBitsPerWord);
if (isR6) {
- __ Beqzc(TMP, &done, /* is_bare */ true);
+ __ Beqzc(TMP, &done, /* is_bare= */ true);
__ Move(TMP, dst_high);
__ Move(dst_high, dst_low);
__ Move(dst_low, TMP);
@@ -2862,7 +2862,7 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
obj,
offset,
temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
} else {
codegen_->GenerateArrayLoadWithBakerReadBarrier(instruction,
out_loc,
@@ -2870,7 +2870,7 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
data_offset,
index,
temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
}
} else {
Register out = out_loc.AsRegister<Register>();
@@ -4104,7 +4104,7 @@ void InstructionCodeGeneratorMIPS::GenerateDivRemWithAnyConstant(HBinaryOperatio
int64_t magic;
int shift;
- CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
+ CalculateMagicAndShiftForDivRem(imm, false /* is_long= */, &magic, &shift);
bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
@@ -5948,7 +5948,7 @@ void InstructionCodeGeneratorMIPS::VisitIf(HIf* if_instr) {
nullptr : codegen_->GetLabelOf(true_successor);
MipsLabel* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
nullptr : codegen_->GetLabelOf(false_successor);
- GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+ GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
}
void LocationsBuilderMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -5967,9 +5967,9 @@ void InstructionCodeGeneratorMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
SlowPathCodeMIPS* slow_path =
deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathMIPS>(deoptimize);
GenerateTestAndBranch(deoptimize,
- /* condition_input_index */ 0,
+ /* condition_input_index= */ 0,
slow_path->GetEntryLabel(),
- /* false_target */ nullptr);
+ /* false_target= */ nullptr);
}
// This function returns true if a conditional move can be generated for HSelect.
@@ -5983,7 +5983,7 @@ void InstructionCodeGeneratorMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
// of common logic.
static bool CanMoveConditionally(HSelect* select, bool is_r6, LocationSummary* locations_to_set) {
bool materialized = IsBooleanValueOrMaterializedCondition(select->GetCondition());
- HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+ HInstruction* cond = select->InputAt(/* i= */ 2);
HCondition* condition = cond->AsCondition();
DataType::Type cond_type =
@@ -6216,7 +6216,7 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR2(HSelect* select) {
Location src = locations->InAt(1);
Register src_reg = ZERO;
Register src_reg_high = ZERO;
- HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+ HInstruction* cond = select->InputAt(/* i= */ 2);
Register cond_reg = TMP;
int cond_cc = 0;
DataType::Type cond_type = DataType::Type::kInt32;
@@ -6224,7 +6224,7 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR2(HSelect* select) {
DataType::Type dst_type = select->GetType();
if (IsBooleanValueOrMaterializedCondition(cond)) {
- cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<Register>();
+ cond_reg = locations->InAt(/* at= */ 2).AsRegister<Register>();
} else {
HCondition* condition = cond->AsCondition();
LocationSummary* cond_locations = cond->GetLocations();
@@ -6337,7 +6337,7 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR6(HSelect* select) {
Location dst = locations->Out();
Location false_src = locations->InAt(0);
Location true_src = locations->InAt(1);
- HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+ HInstruction* cond = select->InputAt(/* i= */ 2);
Register cond_reg = TMP;
FRegister fcond_reg = FTMP;
DataType::Type cond_type = DataType::Type::kInt32;
@@ -6345,7 +6345,7 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR6(HSelect* select) {
DataType::Type dst_type = select->GetType();
if (IsBooleanValueOrMaterializedCondition(cond)) {
- cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<Register>();
+ cond_reg = locations->InAt(/* at= */ 2).AsRegister<Register>();
} else {
HCondition* condition = cond->AsCondition();
LocationSummary* cond_locations = cond->GetLocations();
@@ -6526,7 +6526,7 @@ void LocationsBuilderMIPS::VisitSelect(HSelect* select) {
void InstructionCodeGeneratorMIPS::VisitSelect(HSelect* select) {
bool is_r6 = codegen_->GetInstructionSetFeatures().IsR6();
- if (CanMoveConditionally(select, is_r6, /* locations_to_set */ nullptr)) {
+ if (CanMoveConditionally(select, is_r6, /* locations_to_set= */ nullptr)) {
if (is_r6) {
GenConditionalMoveR6(select);
} else {
@@ -6536,8 +6536,8 @@ void InstructionCodeGeneratorMIPS::VisitSelect(HSelect* select) {
LocationSummary* locations = select->GetLocations();
MipsLabel false_target;
GenerateTestAndBranch(select,
- /* condition_input_index */ 2,
- /* true_target */ nullptr,
+ /* condition_input_index= */ 2,
+ /* true_target= */ nullptr,
&false_target);
codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
__ Bind(&false_target);
@@ -6696,7 +6696,7 @@ void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction,
obj,
offset,
temp_loc,
- /* needs_null_check */ true);
+ /* needs_null_check= */ true);
if (is_volatile) {
GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
}
@@ -6929,7 +6929,7 @@ void InstructionCodeGeneratorMIPS::GenerateReferenceLoadOneRegister(
out_reg,
offset,
maybe_temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// Save the value of `out` into `maybe_temp` before overwriting it
@@ -6970,7 +6970,7 @@ void InstructionCodeGeneratorMIPS::GenerateReferenceLoadTwoRegisters(
obj_reg,
offset,
maybe_temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
@@ -7061,7 +7061,7 @@ void InstructionCodeGeneratorMIPS::GenerateGcRootFieldLoad(HInstruction* instruc
__ AddUpper(base, obj, offset_high);
}
MipsLabel skip_call;
- __ Beqz(T9, &skip_call, /* is_bare */ true);
+ __ Beqz(T9, &skip_call, /* is_bare= */ true);
if (label_low != nullptr) {
DCHECK(short_offset);
__ Bind(label_low);
@@ -7216,11 +7216,11 @@ void CodeGeneratorMIPS::GenerateFieldLoadWithBakerReadBarrier(HInstruction* inst
MipsLabel skip_call;
if (short_offset) {
if (isR6) {
- __ Beqzc(T9, &skip_call, /* is_bare */ true);
+ __ Beqzc(T9, &skip_call, /* is_bare= */ true);
__ Nop(); // In forbidden slot.
__ Jialc(T9, thunk_disp);
} else {
- __ Beqz(T9, &skip_call, /* is_bare */ true);
+ __ Beqz(T9, &skip_call, /* is_bare= */ true);
__ Addiu(T9, T9, thunk_disp); // In delay slot.
__ Jalr(T9);
__ Nop(); // In delay slot.
@@ -7228,13 +7228,13 @@ void CodeGeneratorMIPS::GenerateFieldLoadWithBakerReadBarrier(HInstruction* inst
__ Bind(&skip_call);
} else {
if (isR6) {
- __ Beqz(T9, &skip_call, /* is_bare */ true);
+ __ Beqz(T9, &skip_call, /* is_bare= */ true);
__ Aui(base, obj, offset_high); // In delay slot.
__ Jialc(T9, thunk_disp);
__ Bind(&skip_call);
} else {
__ Lui(base, offset_high);
- __ Beqz(T9, &skip_call, /* is_bare */ true);
+ __ Beqz(T9, &skip_call, /* is_bare= */ true);
__ Addiu(T9, T9, thunk_disp); // In delay slot.
__ Jalr(T9);
__ Bind(&skip_call);
@@ -7311,7 +7311,7 @@ void CodeGeneratorMIPS::GenerateArrayLoadWithBakerReadBarrier(HInstruction* inst
// We will not do the explicit null check in the thunk as some form of a null check
// must've been done earlier.
DCHECK(!needs_null_check);
- const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset */ false);
+ const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset= */ false);
// Loading the entrypoint does not require a load acquire since it is only changed when
// threads are suspended or running a checkpoint.
__ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset);
@@ -7321,13 +7321,13 @@ void CodeGeneratorMIPS::GenerateArrayLoadWithBakerReadBarrier(HInstruction* inst
: index.AsRegister<Register>();
MipsLabel skip_call;
if (GetInstructionSetFeatures().IsR6()) {
- __ Beqz(T9, &skip_call, /* is_bare */ true);
+ __ Beqz(T9, &skip_call, /* is_bare= */ true);
__ Lsa(TMP, index_reg, obj, scale_factor); // In delay slot.
__ Jialc(T9, thunk_disp);
__ Bind(&skip_call);
} else {
__ Sll(TMP, index_reg, scale_factor);
- __ Beqz(T9, &skip_call, /* is_bare */ true);
+ __ Beqz(T9, &skip_call, /* is_bare= */ true);
__ Addiu(T9, T9, thunk_disp); // In delay slot.
__ Jalr(T9);
__ Bind(&skip_call);
@@ -7442,7 +7442,7 @@ void CodeGeneratorMIPS::GenerateReferenceLoadWithBakerReadBarrier(HInstruction*
ReadBarrierMarkAndUpdateFieldSlowPathMIPS(instruction,
ref,
obj,
- /* field_offset */ index,
+ /* field_offset= */ index,
temp_reg);
} else {
slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS(instruction, ref);
@@ -7705,7 +7705,7 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
kWithoutReadBarrier);
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ Bne(out, cls.AsRegister<Register>(), slow_path->GetEntryLabel());
__ LoadConst32(out, 1);
@@ -7734,7 +7734,7 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ B(slow_path->GetEntryLabel());
break;
@@ -7964,7 +7964,7 @@ Register CodeGeneratorMIPS::GetInvokeStaticOrDirectExtraParameter(HInvokeStaticO
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
+ ArtMethod* method ATTRIBUTE_UNUSED) {
return desired_dispatch_info;
}
@@ -8001,7 +8001,7 @@ void CodeGeneratorMIPS::GenerateStaticOrDirectCall(
NewBootImageMethodPatch(invoke->GetTargetMethod(), info_high);
Register temp_reg = temp.AsRegister<Register>();
EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base_reg);
- __ Addiu(temp_reg, TMP, /* placeholder */ 0x5678, &info_low->label);
+ __ Addiu(temp_reg, TMP, /* imm16= */ 0x5678, &info_low->label);
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
@@ -8010,7 +8010,7 @@ void CodeGeneratorMIPS::GenerateStaticOrDirectCall(
PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_offset, info_high);
Register temp_reg = temp.AsRegister<Register>();
EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base_reg);
- __ Lw(temp_reg, TMP, /* placeholder */ 0x5678, &info_low->label);
+ __ Lw(temp_reg, TMP, /* imm16= */ 0x5678, &info_low->label);
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
@@ -8020,7 +8020,7 @@ void CodeGeneratorMIPS::GenerateStaticOrDirectCall(
MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()), info_high);
Register temp_reg = temp.AsRegister<Register>();
EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base_reg);
- __ Lw(temp_reg, TMP, /* placeholder */ 0x5678, &info_low->label);
+ __ Lw(temp_reg, TMP, /* imm16= */ 0x5678, &info_low->label);
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress:
@@ -8226,7 +8226,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
out,
base_or_current_method_reg);
- __ Addiu(out, out, /* placeholder */ 0x5678, &info_low->label);
+ __ Addiu(out, out, /* imm16= */ 0x5678, &info_low->label);
break;
}
case HLoadClass::LoadKind::kBootImageRelRo: {
@@ -8239,7 +8239,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
out,
base_or_current_method_reg);
- __ Lw(out, out, /* placeholder */ 0x5678, &info_low->label);
+ __ Lw(out, out, /* imm16= */ 0x5678, &info_low->label);
break;
}
case HLoadClass::LoadKind::kBssEntry: {
@@ -8253,7 +8253,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
GenerateGcRootFieldLoad(cls,
out_loc,
out,
- /* placeholder */ 0x5678,
+ /* offset= */ 0x5678,
read_barrier_option,
&info_low->label);
generate_null_check = true;
@@ -8278,12 +8278,12 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
cls->GetClass());
bool reordering = __ SetReorder(false);
__ Bind(&info->high_label);
- __ Lui(out, /* placeholder */ 0x1234);
+ __ Lui(out, /* imm16= */ 0x1234);
__ SetReorder(reordering);
GenerateGcRootFieldLoad(cls,
out_loc,
out,
- /* placeholder */ 0x5678,
+ /* offset= */ 0x5678,
read_barrier_option,
&info->low_label);
break;
@@ -8432,7 +8432,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
out,
base_or_current_method_reg);
- __ Addiu(out, out, /* placeholder */ 0x5678, &info_low->label);
+ __ Addiu(out, out, /* imm16= */ 0x5678, &info_low->label);
return;
}
case HLoadString::LoadKind::kBootImageRelRo: {
@@ -8445,7 +8445,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
out,
base_or_current_method_reg);
- __ Lw(out, out, /* placeholder */ 0x5678, &info_low->label);
+ __ Lw(out, out, /* imm16= */ 0x5678, &info_low->label);
return;
}
case HLoadString::LoadKind::kBssEntry: {
@@ -8460,7 +8460,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
GenerateGcRootFieldLoad(load,
out_loc,
out,
- /* placeholder */ 0x5678,
+ /* offset= */ 0x5678,
kCompilerReadBarrierOption,
&info_low->label);
SlowPathCodeMIPS* slow_path =
@@ -8489,12 +8489,12 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
load->GetString());
bool reordering = __ SetReorder(false);
__ Bind(&info->high_label);
- __ Lui(out, /* placeholder */ 0x1234);
+ __ Lui(out, /* imm16= */ 0x1234);
__ SetReorder(reordering);
GenerateGcRootFieldLoad(load,
out_loc,
out,
- /* placeholder */ 0x5678,
+ /* offset= */ 0x5678,
kCompilerReadBarrierOption,
&info->low_label);
return;
@@ -8702,10 +8702,8 @@ void LocationsBuilderMIPS::VisitNewArray(HNewArray* instruction) {
}
void InstructionCodeGeneratorMIPS::VisitNewArray(HNewArray* instruction) {
- // Note: if heap poisoning is enabled, the entry point takes care
- // of poisoning the reference.
- QuickEntrypointEnum entrypoint =
- CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+ QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index bf9589331b..50807310b6 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -563,7 +563,7 @@ class CodeGeneratorMIPS : public CodeGenerator {
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) override;
+ ArtMethod* method) override;
void GenerateStaticOrDirectCall(
HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 7c89808d54..8b6328f097 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -953,7 +953,7 @@ CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
: CodeGenerator(graph,
kNumberOfGpuRegisters,
kNumberOfFpuRegisters,
- /* number_of_register_pairs */ 0,
+ /* number_of_register_pairs= */ 0,
ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
arraysize(kCoreCalleeSaves)),
ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
@@ -1581,14 +1581,14 @@ CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewBootImageIntri
uint32_t intrinsic_data,
const PcRelativePatchInfo* info_high) {
return NewPcRelativePatch(
- /* dex_file */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_);
+ /* dex_file= */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_);
}
CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewBootImageRelRoPatch(
uint32_t boot_image_offset,
const PcRelativePatchInfo* info_high) {
return NewPcRelativePatch(
- /* dex_file */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_);
+ /* dex_file= */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_);
}
CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewBootImageMethodPatch(
@@ -1665,7 +1665,7 @@ void CodeGeneratorMIPS64::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchIn
DCHECK(!info_high->patch_info_high);
__ Bind(&info_high->label);
// Add the high half of a 32-bit offset to PC.
- __ Auipc(out, /* placeholder */ 0x1234);
+ __ Auipc(out, /* imm16= */ 0x1234);
// A following instruction will add the sign-extended low half of the 32-bit
// offset to `out` (e.g. ld, jialc, daddiu).
if (info_low != nullptr) {
@@ -1679,13 +1679,13 @@ void CodeGeneratorMIPS64::LoadBootImageAddress(GpuRegister reg, uint32_t boot_im
PcRelativePatchInfo* info_high = NewBootImageIntrinsicPatch(boot_image_reference);
PcRelativePatchInfo* info_low = NewBootImageIntrinsicPatch(boot_image_reference, info_high);
EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
- __ Daddiu(reg, AT, /* placeholder */ 0x5678);
- } else if (Runtime::Current()->IsAotCompiler()) {
+ __ Daddiu(reg, AT, /* imm16= */ 0x5678);
+ } else if (GetCompilerOptions().GetCompilePic()) {
PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_reference);
PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_reference, info_high);
EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
// Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
- __ Lwu(reg, AT, /* placeholder */ 0x5678);
+ __ Lwu(reg, AT, /* imm16= */ 0x5678);
} else {
DCHECK(Runtime::Current()->UseJitCompilation());
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -1710,7 +1710,7 @@ void CodeGeneratorMIPS64::AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* in
PcRelativePatchInfo* info_low =
NewBootImageTypePatch(*target_method.dex_file, type_idx, info_high);
EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
- __ Daddiu(argument, AT, /* placeholder */ 0x5678);
+ __ Daddiu(argument, AT, /* imm16= */ 0x5678);
} else {
LoadBootImageAddress(argument, boot_image_offset);
}
@@ -1724,7 +1724,7 @@ Literal* CodeGeneratorMIPS64::DeduplicateJitStringLiteral(const DexFile& dex_fil
ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
return jit_string_patches_.GetOrCreate(
StringReference(&dex_file, string_index),
- [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
+ [this]() { return __ NewLiteral<uint32_t>(/* value= */ 0u); });
}
Literal* CodeGeneratorMIPS64::DeduplicateJitClassLiteral(const DexFile& dex_file,
@@ -1733,7 +1733,7 @@ Literal* CodeGeneratorMIPS64::DeduplicateJitClassLiteral(const DexFile& dex_file
ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
return jit_class_patches_.GetOrCreate(
TypeReference(&dex_file, type_index),
- [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); });
+ [this]() { return __ NewLiteral<uint32_t>(/* value= */ 0u); });
}
void CodeGeneratorMIPS64::PatchJitRootUse(uint8_t* code,
@@ -2458,7 +2458,7 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
obj,
offset,
temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
} else {
codegen_->GenerateArrayLoadWithBakerReadBarrier(instruction,
out_loc,
@@ -2466,7 +2466,7 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
data_offset,
index,
temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
}
} else {
GpuRegister out = out_loc.AsRegister<GpuRegister>();
@@ -3337,10 +3337,10 @@ void InstructionCodeGeneratorMIPS64::HandleCondition(HCondition* instruction) {
switch (type) {
default:
// Integer case.
- GenerateIntLongCompare(instruction->GetCondition(), /* is64bit */ false, locations);
+ GenerateIntLongCompare(instruction->GetCondition(), /* is64bit= */ false, locations);
return;
case DataType::Type::kInt64:
- GenerateIntLongCompare(instruction->GetCondition(), /* is64bit */ true, locations);
+ GenerateIntLongCompare(instruction->GetCondition(), /* is64bit= */ true, locations);
return;
case DataType::Type::kFloat32:
case DataType::Type::kFloat64:
@@ -3642,7 +3642,7 @@ void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instructio
if (!DataType::IsIntegralType(type)) {
LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
- return;
+ UNREACHABLE();
}
if (value.IsConstant()) {
@@ -4449,10 +4449,10 @@ void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruc
switch (type) {
default:
- GenerateIntLongCompareAndBranch(if_cond, /* is64bit */ false, locations, branch_target);
+ GenerateIntLongCompareAndBranch(if_cond, /* is64bit= */ false, locations, branch_target);
break;
case DataType::Type::kInt64:
- GenerateIntLongCompareAndBranch(if_cond, /* is64bit */ true, locations, branch_target);
+ GenerateIntLongCompareAndBranch(if_cond, /* is64bit= */ true, locations, branch_target);
break;
case DataType::Type::kFloat32:
case DataType::Type::kFloat64:
@@ -4482,7 +4482,7 @@ void InstructionCodeGeneratorMIPS64::VisitIf(HIf* if_instr) {
nullptr : codegen_->GetLabelOf(true_successor);
Mips64Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
nullptr : codegen_->GetLabelOf(false_successor);
- GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+ GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
}
void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -4501,9 +4501,9 @@ void InstructionCodeGeneratorMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
SlowPathCodeMIPS64* slow_path =
deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathMIPS64>(deoptimize);
GenerateTestAndBranch(deoptimize,
- /* condition_input_index */ 0,
+ /* condition_input_index= */ 0,
slow_path->GetEntryLabel(),
- /* false_target */ nullptr);
+ /* false_target= */ nullptr);
}
// This function returns true if a conditional move can be generated for HSelect.
@@ -4517,7 +4517,7 @@ void InstructionCodeGeneratorMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
// of common logic.
static bool CanMoveConditionally(HSelect* select, LocationSummary* locations_to_set) {
bool materialized = IsBooleanValueOrMaterializedCondition(select->GetCondition());
- HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+ HInstruction* cond = select->InputAt(/* i= */ 2);
HCondition* condition = cond->AsCondition();
DataType::Type cond_type =
@@ -4660,7 +4660,7 @@ void InstructionCodeGeneratorMIPS64::GenConditionalMove(HSelect* select) {
Location dst = locations->Out();
Location false_src = locations->InAt(0);
Location true_src = locations->InAt(1);
- HInstruction* cond = select->InputAt(/* condition_input_index */ 2);
+ HInstruction* cond = select->InputAt(/* i= */ 2);
GpuRegister cond_reg = TMP;
FpuRegister fcond_reg = FTMP;
DataType::Type cond_type = DataType::Type::kInt32;
@@ -4668,7 +4668,7 @@ void InstructionCodeGeneratorMIPS64::GenConditionalMove(HSelect* select) {
DataType::Type dst_type = select->GetType();
if (IsBooleanValueOrMaterializedCondition(cond)) {
- cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<GpuRegister>();
+ cond_reg = locations->InAt(/* at= */ 2).AsRegister<GpuRegister>();
} else {
HCondition* condition = cond->AsCondition();
LocationSummary* cond_locations = cond->GetLocations();
@@ -4677,13 +4677,13 @@ void InstructionCodeGeneratorMIPS64::GenConditionalMove(HSelect* select) {
switch (cond_type) {
default:
cond_inverted = MaterializeIntLongCompare(if_cond,
- /* is64bit */ false,
+ /* is64bit= */ false,
cond_locations,
cond_reg);
break;
case DataType::Type::kInt64:
cond_inverted = MaterializeIntLongCompare(if_cond,
- /* is64bit */ true,
+ /* is64bit= */ true,
cond_locations,
cond_reg);
break;
@@ -4826,14 +4826,14 @@ void LocationsBuilderMIPS64::VisitSelect(HSelect* select) {
}
void InstructionCodeGeneratorMIPS64::VisitSelect(HSelect* select) {
- if (CanMoveConditionally(select, /* locations_to_set */ nullptr)) {
+ if (CanMoveConditionally(select, /* locations_to_set= */ nullptr)) {
GenConditionalMove(select);
} else {
LocationSummary* locations = select->GetLocations();
Mips64Label false_target;
GenerateTestAndBranch(select,
- /* condition_input_index */ 2,
- /* true_target */ nullptr,
+ /* condition_input_index= */ 2,
+ /* true_target= */ nullptr,
&false_target);
codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
__ Bind(&false_target);
@@ -4945,7 +4945,7 @@ void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction,
obj,
offset,
temp_loc,
- /* needs_null_check */ true);
+ /* needs_null_check= */ true);
if (is_volatile) {
GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
}
@@ -5101,7 +5101,7 @@ void InstructionCodeGeneratorMIPS64::GenerateReferenceLoadOneRegister(
out_reg,
offset,
maybe_temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// Save the value of `out` into `maybe_temp` before overwriting it
@@ -5142,7 +5142,7 @@ void InstructionCodeGeneratorMIPS64::GenerateReferenceLoadTwoRegisters(
obj_reg,
offset,
maybe_temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
@@ -5230,7 +5230,7 @@ void InstructionCodeGeneratorMIPS64::GenerateGcRootFieldLoad(HInstruction* instr
__ Daui(base, obj, offset_high);
}
Mips64Label skip_call;
- __ Beqz(T9, &skip_call, /* is_bare */ true);
+ __ Beqz(T9, &skip_call, /* is_bare= */ true);
if (label_low != nullptr) {
DCHECK(short_offset);
__ Bind(label_low);
@@ -5360,7 +5360,7 @@ void CodeGeneratorMIPS64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* in
GpuRegister ref_reg = ref.AsRegister<GpuRegister>();
Mips64Label skip_call;
if (short_offset) {
- __ Beqzc(T9, &skip_call, /* is_bare */ true);
+ __ Beqzc(T9, &skip_call, /* is_bare= */ true);
__ Nop(); // In forbidden slot.
__ Jialc(T9, thunk_disp);
__ Bind(&skip_call);
@@ -5369,7 +5369,7 @@ void CodeGeneratorMIPS64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* in
} else {
int16_t offset_low = Low16Bits(offset);
int16_t offset_high = High16Bits(offset - offset_low); // Accounts for sign extension in lwu.
- __ Beqz(T9, &skip_call, /* is_bare */ true);
+ __ Beqz(T9, &skip_call, /* is_bare= */ true);
__ Daui(TMP, obj, offset_high); // In delay slot.
__ Jialc(T9, thunk_disp);
__ Bind(&skip_call);
@@ -5442,12 +5442,12 @@ void CodeGeneratorMIPS64::GenerateArrayLoadWithBakerReadBarrier(HInstruction* in
// We will not do the explicit null check in the thunk as some form of a null check
// must've been done earlier.
DCHECK(!needs_null_check);
- const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset */ false);
+ const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset= */ false);
// Loading the entrypoint does not require a load acquire since it is only changed when
// threads are suspended or running a checkpoint.
__ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
Mips64Label skip_call;
- __ Beqz(T9, &skip_call, /* is_bare */ true);
+ __ Beqz(T9, &skip_call, /* is_bare= */ true);
GpuRegister ref_reg = ref.AsRegister<GpuRegister>();
GpuRegister index_reg = index.AsRegister<GpuRegister>();
__ Dlsa(TMP, index_reg, obj, scale_factor); // In delay slot.
@@ -5558,7 +5558,7 @@ void CodeGeneratorMIPS64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction
ReadBarrierMarkAndUpdateFieldSlowPathMIPS64(instruction,
ref,
obj,
- /* field_offset */ index,
+ /* field_offset= */ index,
temp_reg);
} else {
slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS64(instruction, ref);
@@ -5821,7 +5821,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
kWithoutReadBarrier);
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ Bnec(out, cls.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
__ LoadConst32(out, 1);
@@ -5850,7 +5850,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ Bc(slow_path->GetEntryLabel());
break;
@@ -6059,7 +6059,7 @@ HLoadClass::LoadKind CodeGeneratorMIPS64::GetSupportedLoadClassKind(
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS64::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
+ ArtMethod* method ATTRIBUTE_UNUSED) {
// On MIPS64 we support all dispatch types.
return desired_dispatch_info;
}
@@ -6092,7 +6092,7 @@ void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(
CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
NewBootImageMethodPatch(invoke->GetTargetMethod(), info_high);
EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
- __ Daddiu(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
+ __ Daddiu(temp.AsRegister<GpuRegister>(), AT, /* imm16= */ 0x5678);
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
@@ -6101,7 +6101,7 @@ void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(
PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_offset, info_high);
EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
// Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
- __ Lwu(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
+ __ Lwu(temp.AsRegister<GpuRegister>(), AT, /* imm16= */ 0x5678);
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
@@ -6110,7 +6110,7 @@ void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(
PcRelativePatchInfo* info_low = NewMethodBssEntryPatch(
MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()), info_high);
EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
- __ Ld(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
+ __ Ld(temp.AsRegister<GpuRegister>(), AT, /* imm16= */ 0x5678);
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress:
@@ -6280,7 +6280,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
codegen_->NewBootImageTypePatch(cls->GetDexFile(), cls->GetTypeIndex(), info_high);
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
- __ Daddiu(out, AT, /* placeholder */ 0x5678);
+ __ Daddiu(out, AT, /* imm16= */ 0x5678);
break;
}
case HLoadClass::LoadKind::kBootImageRelRo: {
@@ -6291,7 +6291,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
codegen_->NewBootImageRelRoPatch(boot_image_offset, info_high);
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
- __ Lwu(out, AT, /* placeholder */ 0x5678);
+ __ Lwu(out, AT, /* imm16= */ 0x5678);
break;
}
case HLoadClass::LoadKind::kBssEntry: {
@@ -6303,7 +6303,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
GenerateGcRootFieldLoad(cls,
out_loc,
out,
- /* placeholder */ 0x5678,
+ /* offset= */ 0x5678,
read_barrier_option,
&info_low->label);
generate_null_check = true;
@@ -6427,7 +6427,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA
CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
codegen_->NewBootImageStringPatch(load->GetDexFile(), load->GetStringIndex(), info_high);
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
- __ Daddiu(out, AT, /* placeholder */ 0x5678);
+ __ Daddiu(out, AT, /* imm16= */ 0x5678);
return;
}
case HLoadString::LoadKind::kBootImageRelRo: {
@@ -6438,7 +6438,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA
CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
codegen_->NewBootImageRelRoPatch(boot_image_offset, info_high);
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
- __ Lwu(out, AT, /* placeholder */ 0x5678);
+ __ Lwu(out, AT, /* imm16= */ 0x5678);
return;
}
case HLoadString::LoadKind::kBssEntry: {
@@ -6451,7 +6451,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA
GenerateGcRootFieldLoad(load,
out_loc,
out,
- /* placeholder */ 0x5678,
+ /* offset= */ 0x5678,
kCompilerReadBarrierOption,
&info_low->label);
SlowPathCodeMIPS64* slow_path =
@@ -6633,10 +6633,8 @@ void LocationsBuilderMIPS64::VisitNewArray(HNewArray* instruction) {
}
void InstructionCodeGeneratorMIPS64::VisitNewArray(HNewArray* instruction) {
- // Note: if heap poisoning is enabled, the entry point takes care
- // of poisoning the reference.
- QuickEntrypointEnum entrypoint =
- CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+ QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index ddc154d40f..52f3a62f33 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -541,7 +541,7 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) override;
+ ArtMethod* method) override;
void GenerateStaticOrDirectCall(
HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
diff --git a/compiler/optimizing/code_generator_vector_arm64.cc b/compiler/optimizing/code_generator_vector_arm64.cc
index 43169ba7eb..5a18c1f72b 100644
--- a/compiler/optimizing/code_generator_vector_arm64.cc
+++ b/compiler/optimizing/code_generator_vector_arm64.cc
@@ -216,7 +216,7 @@ void InstructionCodeGeneratorARM64::VisitVecReduce(HVecReduce* instruction) {
switch (instruction->GetPackedType()) {
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- switch (instruction->GetKind()) {
+ switch (instruction->GetReductionKind()) {
case HVecReduce::kSum:
__ Addv(dst.S(), src.V4S());
break;
@@ -230,7 +230,7 @@ void InstructionCodeGeneratorARM64::VisitVecReduce(HVecReduce* instruction) {
break;
case DataType::Type::kInt64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- switch (instruction->GetKind()) {
+ switch (instruction->GetReductionKind()) {
case HVecReduce::kSum:
__ Addp(dst.D(), src.V2D());
break;
@@ -1277,6 +1277,74 @@ void InstructionCodeGeneratorARM64::VisitVecSADAccumulate(HVecSADAccumulate* ins
}
}
+void LocationsBuilderARM64::VisitVecDotProd(HVecDotProd* instruction) {
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
+ DCHECK(instruction->GetPackedType() == DataType::Type::kInt32);
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetInAt(2, Location::RequiresFpuRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+
+ // For Int8 and Uint8 we need a temp register.
+ if (DataType::Size(instruction->InputAt(1)->AsVecOperation()->GetPackedType()) == 1) {
+ locations->AddTemp(Location::RequiresFpuRegister());
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitVecDotProd(HVecDotProd* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ VRegister acc = VRegisterFrom(locations->InAt(0));
+ VRegister left = VRegisterFrom(locations->InAt(1));
+ VRegister right = VRegisterFrom(locations->InAt(2));
+ HVecOperation* a = instruction->InputAt(1)->AsVecOperation();
+ HVecOperation* b = instruction->InputAt(2)->AsVecOperation();
+ DCHECK_EQ(HVecOperation::ToSignedType(a->GetPackedType()),
+ HVecOperation::ToSignedType(b->GetPackedType()));
+ DCHECK_EQ(instruction->GetPackedType(), DataType::Type::kInt32);
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+
+ size_t inputs_data_size = DataType::Size(a->GetPackedType());
+ switch (inputs_data_size) {
+ case 1u: {
+ DCHECK_EQ(16u, a->GetVectorLength());
+ VRegister tmp = VRegisterFrom(locations->GetTemp(0));
+ if (instruction->IsZeroExtending()) {
+ // TODO: Use Armv8.4-A UDOT instruction when it is available.
+ __ Umull(tmp.V8H(), left.V8B(), right.V8B());
+ __ Uaddw(acc.V4S(), acc.V4S(), tmp.V4H());
+ __ Uaddw2(acc.V4S(), acc.V4S(), tmp.V8H());
+
+ __ Umull2(tmp.V8H(), left.V16B(), right.V16B());
+ __ Uaddw(acc.V4S(), acc.V4S(), tmp.V4H());
+ __ Uaddw2(acc.V4S(), acc.V4S(), tmp.V8H());
+ } else {
+ // TODO: Use Armv8.4-A SDOT instruction when it is available.
+ __ Smull(tmp.V8H(), left.V8B(), right.V8B());
+ __ Saddw(acc.V4S(), acc.V4S(), tmp.V4H());
+ __ Saddw2(acc.V4S(), acc.V4S(), tmp.V8H());
+
+ __ Smull2(tmp.V8H(), left.V16B(), right.V16B());
+ __ Saddw(acc.V4S(), acc.V4S(), tmp.V4H());
+ __ Saddw2(acc.V4S(), acc.V4S(), tmp.V8H());
+ }
+ break;
+ }
+ case 2u:
+ DCHECK_EQ(8u, a->GetVectorLength());
+ if (instruction->IsZeroExtending()) {
+ __ Umlal(acc.V4S(), left.V4H(), right.V4H());
+ __ Umlal2(acc.V4S(), left.V8H(), right.V8H());
+ } else {
+ __ Smlal(acc.V4S(), left.V4H(), right.V4H());
+ __ Smlal2(acc.V4S(), left.V8H(), right.V8H());
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unsupported SIMD type size: " << inputs_data_size;
+ }
+}
+
// Helper to set up locations for vector memory operations.
static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
diff --git a/compiler/optimizing/code_generator_vector_arm_vixl.cc b/compiler/optimizing/code_generator_vector_arm_vixl.cc
index 7b66b17983..b092961a56 100644
--- a/compiler/optimizing/code_generator_vector_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_vector_arm_vixl.cc
@@ -138,7 +138,7 @@ void InstructionCodeGeneratorARMVIXL::VisitVecReduce(HVecReduce* instruction) {
switch (instruction->GetPackedType()) {
case DataType::Type::kInt32:
DCHECK_EQ(2u, instruction->GetVectorLength());
- switch (instruction->GetKind()) {
+ switch (instruction->GetReductionKind()) {
case HVecReduce::kSum:
__ Vpadd(DataTypeValue::I32, dst, src, src);
break;
@@ -854,6 +854,14 @@ void InstructionCodeGeneratorARMVIXL::VisitVecSADAccumulate(HVecSADAccumulate* i
}
}
+void LocationsBuilderARMVIXL::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
// Return whether the vector memory access operation is guaranteed to be word-aligned (ARM word
// size equals to 4).
static bool IsWordAligned(HVecMemoryOperation* instruction) {
diff --git a/compiler/optimizing/code_generator_vector_mips.cc b/compiler/optimizing/code_generator_vector_mips.cc
index df0e1485d6..4e9ba0d3d2 100644
--- a/compiler/optimizing/code_generator_vector_mips.cc
+++ b/compiler/optimizing/code_generator_vector_mips.cc
@@ -74,19 +74,19 @@ void InstructionCodeGeneratorMIPS::VisitVecReplicateScalar(HVecReplicateScalar*
__ InsertW(static_cast<VectorRegister>(FTMP),
locations->InAt(0).AsRegisterPairHigh<Register>(),
1);
- __ ReplicateFPToVectorRegister(dst, FTMP, /* is_double */ true);
+ __ ReplicateFPToVectorRegister(dst, FTMP, /* is_double= */ true);
break;
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
__ ReplicateFPToVectorRegister(dst,
locations->InAt(0).AsFpuRegister<FRegister>(),
- /* is_double */ false);
+ /* is_double= */ false);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
__ ReplicateFPToVectorRegister(dst,
locations->InAt(0).AsFpuRegister<FRegister>(),
- /* is_double */ true);
+ /* is_double= */ true);
break;
default:
LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
@@ -187,7 +187,7 @@ void InstructionCodeGeneratorMIPS::VisitVecReduce(HVecReduce* instruction) {
switch (instruction->GetPackedType()) {
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- switch (instruction->GetKind()) {
+ switch (instruction->GetReductionKind()) {
case HVecReduce::kSum:
__ Hadd_sD(tmp, src, src);
__ IlvlD(dst, tmp, tmp);
@@ -209,7 +209,7 @@ void InstructionCodeGeneratorMIPS::VisitVecReduce(HVecReduce* instruction) {
break;
case DataType::Type::kInt64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- switch (instruction->GetKind()) {
+ switch (instruction->GetReductionKind()) {
case HVecReduce::kSum:
__ IlvlD(dst, src, src);
__ AddvD(dst, dst, src);
@@ -1274,6 +1274,14 @@ void InstructionCodeGeneratorMIPS::VisitVecSADAccumulate(HVecSADAccumulate* inst
}
}
+void LocationsBuilderMIPS::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorMIPS::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
// Helper to set up locations for vector memory operations.
static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
@@ -1336,7 +1344,7 @@ int32_t InstructionCodeGeneratorMIPS::VecAddress(LocationSummary* locations,
}
void LocationsBuilderMIPS::VisitVecLoad(HVecLoad* instruction) {
- CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ true);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ true);
}
void InstructionCodeGeneratorMIPS::VisitVecLoad(HVecLoad* instruction) {
@@ -1379,7 +1387,7 @@ void InstructionCodeGeneratorMIPS::VisitVecLoad(HVecLoad* instruction) {
}
void LocationsBuilderMIPS::VisitVecStore(HVecStore* instruction) {
- CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ false);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ false);
}
void InstructionCodeGeneratorMIPS::VisitVecStore(HVecStore* instruction) {
diff --git a/compiler/optimizing/code_generator_vector_mips64.cc b/compiler/optimizing/code_generator_vector_mips64.cc
index de354b63a1..6467d3e27f 100644
--- a/compiler/optimizing/code_generator_vector_mips64.cc
+++ b/compiler/optimizing/code_generator_vector_mips64.cc
@@ -79,13 +79,13 @@ void InstructionCodeGeneratorMIPS64::VisitVecReplicateScalar(HVecReplicateScalar
DCHECK_EQ(4u, instruction->GetVectorLength());
__ ReplicateFPToVectorRegister(dst,
locations->InAt(0).AsFpuRegister<FpuRegister>(),
- /* is_double */ false);
+ /* is_double= */ false);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
__ ReplicateFPToVectorRegister(dst,
locations->InAt(0).AsFpuRegister<FpuRegister>(),
- /* is_double */ true);
+ /* is_double= */ true);
break;
default:
LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
@@ -185,7 +185,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecReduce(HVecReduce* instruction) {
switch (instruction->GetPackedType()) {
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- switch (instruction->GetKind()) {
+ switch (instruction->GetReductionKind()) {
case HVecReduce::kSum:
__ Hadd_sD(tmp, src, src);
__ IlvlD(dst, tmp, tmp);
@@ -207,7 +207,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecReduce(HVecReduce* instruction) {
break;
case DataType::Type::kInt64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- switch (instruction->GetKind()) {
+ switch (instruction->GetReductionKind()) {
case HVecReduce::kSum:
__ IlvlD(dst, src, src);
__ AddvD(dst, dst, src);
@@ -1272,6 +1272,14 @@ void InstructionCodeGeneratorMIPS64::VisitVecSADAccumulate(HVecSADAccumulate* in
}
}
+void LocationsBuilderMIPS64::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorMIPS64::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
// Helper to set up locations for vector memory operations.
static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
@@ -1334,7 +1342,7 @@ int32_t InstructionCodeGeneratorMIPS64::VecAddress(LocationSummary* locations,
}
void LocationsBuilderMIPS64::VisitVecLoad(HVecLoad* instruction) {
- CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ true);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ true);
}
void InstructionCodeGeneratorMIPS64::VisitVecLoad(HVecLoad* instruction) {
@@ -1377,7 +1385,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecLoad(HVecLoad* instruction) {
}
void LocationsBuilderMIPS64::VisitVecStore(HVecStore* instruction) {
- CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ false);
+ CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ false);
}
void InstructionCodeGeneratorMIPS64::VisitVecStore(HVecStore* instruction) {
diff --git a/compiler/optimizing/code_generator_vector_x86.cc b/compiler/optimizing/code_generator_vector_x86.cc
index 2502275b3a..0ee00356b9 100644
--- a/compiler/optimizing/code_generator_vector_x86.cc
+++ b/compiler/optimizing/code_generator_vector_x86.cc
@@ -205,8 +205,8 @@ void LocationsBuilderX86::VisitVecReduce(HVecReduce* instruction) {
CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
// Long reduction or min/max require a temporary.
if (instruction->GetPackedType() == DataType::Type::kInt64 ||
- instruction->GetKind() == HVecReduce::kMin ||
- instruction->GetKind() == HVecReduce::kMax) {
+ instruction->GetReductionKind() == HVecReduce::kMin ||
+ instruction->GetReductionKind() == HVecReduce::kMax) {
instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
}
}
@@ -218,38 +218,23 @@ void InstructionCodeGeneratorX86::VisitVecReduce(HVecReduce* instruction) {
switch (instruction->GetPackedType()) {
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- switch (instruction->GetKind()) {
+ switch (instruction->GetReductionKind()) {
case HVecReduce::kSum:
__ movaps(dst, src);
__ phaddd(dst, dst);
__ phaddd(dst, dst);
break;
- case HVecReduce::kMin: {
- XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- __ movaps(tmp, src);
- __ movaps(dst, src);
- __ psrldq(tmp, Immediate(8));
- __ pminsd(dst, tmp);
- __ psrldq(tmp, Immediate(4));
- __ pminsd(dst, tmp);
- break;
- }
- case HVecReduce::kMax: {
- XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- __ movaps(tmp, src);
- __ movaps(dst, src);
- __ psrldq(tmp, Immediate(8));
- __ pmaxsd(dst, tmp);
- __ psrldq(tmp, Immediate(4));
- __ pmaxsd(dst, tmp);
- break;
- }
+ case HVecReduce::kMin:
+ case HVecReduce::kMax:
+ // Historical note: We've had a broken implementation here. b/117863065
+ // Do not draw on the old code if we ever want to bring MIN/MAX reduction back.
+ LOG(FATAL) << "Unsupported reduction type.";
}
break;
case DataType::Type::kInt64: {
DCHECK_EQ(2u, instruction->GetVectorLength());
XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- switch (instruction->GetKind()) {
+ switch (instruction->GetReductionKind()) {
case HVecReduce::kSum:
__ movaps(tmp, src);
__ movaps(dst, src);
@@ -1143,6 +1128,14 @@ void InstructionCodeGeneratorX86::VisitVecSADAccumulate(HVecSADAccumulate* instr
LOG(FATAL) << "No SIMD for " << instruction->GetId();
}
+void LocationsBuilderX86::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorX86::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
// Helper to set up locations for vector memory operations.
static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
diff --git a/compiler/optimizing/code_generator_vector_x86_64.cc b/compiler/optimizing/code_generator_vector_x86_64.cc
index 4a67dafd8a..9c2882766c 100644
--- a/compiler/optimizing/code_generator_vector_x86_64.cc
+++ b/compiler/optimizing/code_generator_vector_x86_64.cc
@@ -188,8 +188,8 @@ void LocationsBuilderX86_64::VisitVecReduce(HVecReduce* instruction) {
CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
// Long reduction or min/max require a temporary.
if (instruction->GetPackedType() == DataType::Type::kInt64 ||
- instruction->GetKind() == HVecReduce::kMin ||
- instruction->GetKind() == HVecReduce::kMax) {
+ instruction->GetReductionKind() == HVecReduce::kMin ||
+ instruction->GetReductionKind() == HVecReduce::kMax) {
instruction->GetLocations()->AddTemp(Location::RequiresFpuRegister());
}
}
@@ -201,38 +201,23 @@ void InstructionCodeGeneratorX86_64::VisitVecReduce(HVecReduce* instruction) {
switch (instruction->GetPackedType()) {
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- switch (instruction->GetKind()) {
+ switch (instruction->GetReductionKind()) {
case HVecReduce::kSum:
__ movaps(dst, src);
__ phaddd(dst, dst);
__ phaddd(dst, dst);
break;
- case HVecReduce::kMin: {
- XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- __ movaps(tmp, src);
- __ movaps(dst, src);
- __ psrldq(tmp, Immediate(8));
- __ pminsd(dst, tmp);
- __ psrldq(tmp, Immediate(4));
- __ pminsd(dst, tmp);
- break;
- }
- case HVecReduce::kMax: {
- XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- __ movaps(tmp, src);
- __ movaps(dst, src);
- __ psrldq(tmp, Immediate(8));
- __ pmaxsd(dst, tmp);
- __ psrldq(tmp, Immediate(4));
- __ pmaxsd(dst, tmp);
- break;
- }
+ case HVecReduce::kMin:
+ case HVecReduce::kMax:
+ // Historical note: We've had a broken implementation here. b/117863065
+ // Do not draw on the old code if we ever want to bring MIN/MAX reduction back.
+ LOG(FATAL) << "Unsupported reduction type.";
}
break;
case DataType::Type::kInt64: {
DCHECK_EQ(2u, instruction->GetVectorLength());
XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- switch (instruction->GetKind()) {
+ switch (instruction->GetReductionKind()) {
case HVecReduce::kSum:
__ movaps(tmp, src);
__ movaps(dst, src);
@@ -1116,6 +1101,14 @@ void InstructionCodeGeneratorX86_64::VisitVecSADAccumulate(HVecSADAccumulate* in
LOG(FATAL) << "No SIMD for " << instruction->GetId();
}
+void LocationsBuilderX86_64::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
+void InstructionCodeGeneratorX86_64::VisitVecDotProd(HVecDotProd* instruction) {
+ LOG(FATAL) << "No SIMD for " << instruction->GetId();
+}
+
// Helper to set up locations for vector memory operations.
static void CreateVecMemLocations(ArenaAllocator* allocator,
HVecMemoryOperation* instruction,
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 6a27081dab..766ff78fa4 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1228,7 +1228,7 @@ Location InvokeDexCallingConventionVisitorX86::GetNextLocation(DataType::Type ty
case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected parameter type " << type;
- break;
+ UNREACHABLE();
}
return Location::NoLocation();
}
@@ -1720,7 +1720,7 @@ void InstructionCodeGeneratorX86::VisitIf(HIf* if_instr) {
nullptr : codegen_->GetLabelOf(true_successor);
Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
nullptr : codegen_->GetLabelOf(false_successor);
- GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+ GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
}
void LocationsBuilderX86::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -1738,9 +1738,9 @@ void LocationsBuilderX86::VisitDeoptimize(HDeoptimize* deoptimize) {
void InstructionCodeGeneratorX86::VisitDeoptimize(HDeoptimize* deoptimize) {
SlowPathCode* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathX86>(deoptimize);
GenerateTestAndBranch<Label>(deoptimize,
- /* condition_input_index */ 0,
+ /* condition_input_index= */ 0,
slow_path->GetEntryLabel(),
- /* false_target */ nullptr);
+ /* false_target= */ nullptr);
}
void LocationsBuilderX86::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
@@ -1863,7 +1863,7 @@ void InstructionCodeGeneratorX86::VisitSelect(HSelect* select) {
} else {
NearLabel false_target;
GenerateTestAndBranch<NearLabel>(
- select, /* condition_input_index */ 2, /* true_target */ nullptr, &false_target);
+ select, /* condition_input_index= */ 2, /* true_target= */ nullptr, &false_target);
codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
__ Bind(&false_target);
}
@@ -2989,7 +2989,7 @@ void LocationsBuilderX86::VisitAdd(HAdd* add) {
default:
LOG(FATAL) << "Unexpected add type " << add->GetResultType();
- break;
+ UNREACHABLE();
}
}
@@ -3434,8 +3434,8 @@ void InstructionCodeGeneratorX86::GenerateRemFP(HRem *rem) {
// Load the values to the FP stack in reverse order, using temporaries if needed.
const bool is_wide = !is_float;
- PushOntoFPStack(second, elem_size, 2 * elem_size, /* is_fp */ true, is_wide);
- PushOntoFPStack(first, 0, 2 * elem_size, /* is_fp */ true, is_wide);
+ PushOntoFPStack(second, elem_size, 2 * elem_size, /* is_fp= */ true, is_wide);
+ PushOntoFPStack(first, 0, 2 * elem_size, /* is_fp= */ true, is_wide);
// Loop doing FPREM until we stabilize.
NearLabel retry;
@@ -3497,6 +3497,27 @@ void InstructionCodeGeneratorX86::DivRemOneOrMinusOne(HBinaryOperation* instruct
}
}
+void InstructionCodeGeneratorX86::RemByPowerOfTwo(HRem* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Location second = locations->InAt(1);
+
+ Register out = locations->Out().AsRegister<Register>();
+ Register numerator = locations->InAt(0).AsRegister<Register>();
+
+ int32_t imm = Int64FromConstant(second.GetConstant());
+ DCHECK(IsPowerOfTwo(AbsOrMin(imm)));
+ uint32_t abs_imm = static_cast<uint32_t>(AbsOrMin(imm));
+
+ Register tmp = locations->GetTemp(0).AsRegister<Register>();
+ NearLabel done;
+ __ movl(out, numerator);
+ __ andl(out, Immediate(abs_imm-1));
+ __ j(Condition::kZero, &done);
+ __ leal(tmp, Address(out, static_cast<int32_t>(~(abs_imm-1))));
+ __ testl(numerator, numerator);
+ __ cmovl(Condition::kLess, out, tmp);
+ __ Bind(&done);
+}
void InstructionCodeGeneratorX86::DivByPowerOfTwo(HDiv* instruction) {
LocationSummary* locations = instruction->GetLocations();
@@ -3551,7 +3572,7 @@ void InstructionCodeGeneratorX86::GenerateDivRemWithAnyConstant(HBinaryOperation
int64_t magic;
int shift;
- CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
+ CalculateMagicAndShiftForDivRem(imm, /* is_long= */ false, &magic, &shift);
// Save the numerator.
__ movl(num, eax);
@@ -3610,8 +3631,12 @@ void InstructionCodeGeneratorX86::GenerateDivRemIntegral(HBinaryOperation* instr
// Do not generate anything for 0. DivZeroCheck would forbid any generated code.
} else if (imm == 1 || imm == -1) {
DivRemOneOrMinusOne(instruction);
- } else if (is_div && IsPowerOfTwo(AbsOrMin(imm))) {
- DivByPowerOfTwo(instruction->AsDiv());
+ } else if (IsPowerOfTwo(AbsOrMin(imm))) {
+ if (is_div) {
+ DivByPowerOfTwo(instruction->AsDiv());
+ } else {
+ RemByPowerOfTwo(instruction->AsRem());
+ }
} else {
DCHECK(imm <= -2 || imm >= 2);
GenerateDivRemWithAnyConstant(instruction);
@@ -4525,10 +4550,8 @@ void LocationsBuilderX86::VisitNewArray(HNewArray* instruction) {
}
void InstructionCodeGeneratorX86::VisitNewArray(HNewArray* instruction) {
- // Note: if heap poisoning is enabled, the entry point takes cares
- // of poisoning the reference.
- QuickEntrypointEnum entrypoint =
- CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+ QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
@@ -4778,14 +4801,14 @@ void CodeGeneratorX86::GenerateMemoryBarrier(MemBarrierKind kind) {
}
case MemBarrierKind::kNTStoreStore:
// Non-Temporal Store/Store needs an explicit fence.
- MemoryFence(/* non-temporal */ true);
+ MemoryFence(/* non-temporal= */ true);
break;
}
}
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
+ ArtMethod* method ATTRIBUTE_UNUSED) {
return desired_dispatch_info;
}
@@ -4913,14 +4936,14 @@ void CodeGeneratorX86::GenerateVirtualCall(
void CodeGeneratorX86::RecordBootImageIntrinsicPatch(HX86ComputeBaseMethodAddress* method_address,
uint32_t intrinsic_data) {
boot_image_intrinsic_patches_.emplace_back(
- method_address, /* target_dex_file */ nullptr, intrinsic_data);
+ method_address, /* target_dex_file= */ nullptr, intrinsic_data);
__ Bind(&boot_image_intrinsic_patches_.back().label);
}
void CodeGeneratorX86::RecordBootImageRelRoPatch(HX86ComputeBaseMethodAddress* method_address,
uint32_t boot_image_offset) {
boot_image_method_patches_.emplace_back(
- method_address, /* target_dex_file */ nullptr, boot_image_offset);
+ method_address, /* target_dex_file= */ nullptr, boot_image_offset);
__ Bind(&boot_image_method_patches_.back().label);
}
@@ -4988,7 +5011,7 @@ void CodeGeneratorX86::LoadBootImageAddress(Register reg,
invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex()).AsRegister<Register>();
__ leal(reg, Address(method_address_reg, CodeGeneratorX86::kDummy32BitOffset));
RecordBootImageIntrinsicPatch(method_address, boot_image_reference);
- } else if (Runtime::Current()->IsAotCompiler()) {
+ } else if (GetCompilerOptions().GetCompilePic()) {
DCHECK_EQ(invoke->InputCount(), invoke->GetNumberOfArguments() + 1u);
HX86ComputeBaseMethodAddress* method_address =
invoke->InputAt(invoke->GetSpecialInputIndex())->AsX86ComputeBaseMethodAddress();
@@ -5214,7 +5237,7 @@ void InstructionCodeGeneratorX86::HandleFieldGet(HInstruction* instruction,
// Note that a potential implicit null check is handled in this
// CodeGeneratorX86::GenerateFieldLoadWithBakerReadBarrier call.
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, out, base, offset, /* needs_null_check */ true);
+ instruction, out, base, offset, /* needs_null_check= */ true);
if (is_volatile) {
codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
}
@@ -5697,7 +5720,7 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
// Note that a potential implicit null check is handled in this
// CodeGeneratorX86::GenerateArrayLoadWithBakerReadBarrier call.
codegen_->GenerateArrayLoadWithBakerReadBarrier(
- instruction, out_loc, obj, data_offset, index, /* needs_null_check */ true);
+ instruction, out_loc, obj, data_offset, index, /* needs_null_check= */ true);
} else {
Register out = out_loc.AsRegister<Register>();
__ movl(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_4, data_offset));
@@ -6559,7 +6582,7 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFE
cls,
out_loc,
Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()),
- /* fixup_label */ nullptr,
+ /* fixup_label= */ nullptr,
read_barrier_option);
break;
}
@@ -7086,7 +7109,7 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
}
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -7118,7 +7141,7 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
if (zero.IsLinked()) {
@@ -7426,6 +7449,61 @@ void InstructionCodeGeneratorX86::VisitMonitorOperation(HMonitorOperation* instr
}
}
+void LocationsBuilderX86::VisitX86AndNot(HX86AndNot* instruction) {
+ DCHECK(codegen_->GetInstructionSetFeatures().HasAVX2());
+ DCHECK(DataType::IsIntOrLongType(instruction->GetType())) << instruction->GetType();
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorX86::VisitX86AndNot(HX86AndNot* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ Location dest = locations->Out();
+ if (instruction->GetResultType() == DataType::Type::kInt32) {
+ __ andn(dest.AsRegister<Register>(),
+ first.AsRegister<Register>(),
+ second.AsRegister<Register>());
+ } else {
+ DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt64);
+ __ andn(dest.AsRegisterPairLow<Register>(),
+ first.AsRegisterPairLow<Register>(),
+ second.AsRegisterPairLow<Register>());
+ __ andn(dest.AsRegisterPairHigh<Register>(),
+ first.AsRegisterPairHigh<Register>(),
+ second.AsRegisterPairHigh<Register>());
+ }
+}
+
+void LocationsBuilderX86::VisitX86MaskOrResetLeastSetBit(HX86MaskOrResetLeastSetBit* instruction) {
+ DCHECK(codegen_->GetInstructionSetFeatures().HasAVX2());
+ DCHECK(instruction->GetType() == DataType::Type::kInt32) << instruction->GetType();
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorX86::VisitX86MaskOrResetLeastSetBit(
+ HX86MaskOrResetLeastSetBit* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Location src = locations->InAt(0);
+ Location dest = locations->Out();
+ DCHECK(instruction->GetResultType() == DataType::Type::kInt32);
+ switch (instruction->GetOpKind()) {
+ case HInstruction::kAnd:
+ __ blsr(dest.AsRegister<Register>(), src.AsRegister<Register>());
+ break;
+ case HInstruction::kXor:
+ __ blsmsk(dest.AsRegister<Register>(), src.AsRegister<Register>());
+ break;
+ default:
+ LOG(FATAL) << "Unreachable";
+ }
+}
+
void LocationsBuilderX86::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
void LocationsBuilderX86::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
void LocationsBuilderX86::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
@@ -7572,7 +7650,7 @@ void InstructionCodeGeneratorX86::GenerateReferenceLoadOneRegister(
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(out + offset)
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, out, out_reg, offset, /* needs_null_check */ false);
+ instruction, out, out_reg, offset, /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// Save the value of `out` into `maybe_temp` before overwriting it
@@ -7606,7 +7684,7 @@ void InstructionCodeGeneratorX86::GenerateReferenceLoadTwoRegisters(
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, out, obj_reg, offset, /* needs_null_check */ false);
+ instruction, out, obj_reg, offset, /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
@@ -7655,7 +7733,7 @@ void InstructionCodeGeneratorX86::GenerateGcRootFieldLoad(
// Slow path marking the GC root `root`.
SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathX86(
- instruction, root, /* unpoison_ref_before_marking */ false);
+ instruction, root, /* unpoison_ref_before_marking= */ false);
codegen_->AddSlowPath(slow_path);
// Test the entrypoint (`Thread::Current()->pReadBarrierMarkReg ## root.reg()`).
@@ -7785,10 +7863,10 @@ void CodeGeneratorX86::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* i
if (always_update_field) {
DCHECK(temp != nullptr);
slow_path = new (GetScopedAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86(
- instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp);
+ instruction, ref, obj, src, /* unpoison_ref_before_marking= */ true, *temp);
} else {
slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathX86(
- instruction, ref, /* unpoison_ref_before_marking */ true);
+ instruction, ref, /* unpoison_ref_before_marking= */ true);
}
AddSlowPath(slow_path);
@@ -8301,7 +8379,7 @@ void CodeGeneratorX86::PatchJitRootUse(uint8_t* code,
uint32_t code_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
uintptr_t address =
reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
- typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
+ using unaligned_uint32_t __attribute__((__aligned__(1))) = uint32_t;
reinterpret_cast<unaligned_uint32_t*>(code + code_offset)[0] =
dchecked_integral_cast<uint32_t>(address);
}
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 615477171b..deeef888e2 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -159,6 +159,7 @@ class LocationsBuilderX86 : public HGraphVisitor {
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_X86(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
@@ -190,6 +191,7 @@ class InstructionCodeGeneratorX86 : public InstructionCodeGenerator {
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_X86(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
@@ -216,6 +218,7 @@ class InstructionCodeGeneratorX86 : public InstructionCodeGenerator {
void GenerateDivRemIntegral(HBinaryOperation* instruction);
void DivRemOneOrMinusOne(HBinaryOperation* instruction);
void DivByPowerOfTwo(HDiv* instruction);
+ void RemByPowerOfTwo(HRem* instruction);
void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
void GenerateRemFP(HRem* rem);
void HandleCondition(HCondition* condition);
@@ -410,7 +413,7 @@ class CodeGeneratorX86 : public CodeGenerator {
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) override;
+ ArtMethod* method) override;
// Generate a call to a static or direct method.
void GenerateStaticOrDirectCall(
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 489652b85b..67a2aa561b 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -978,7 +978,7 @@ inline Condition X86_64FPCondition(IfCondition cond) {
HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86_64::GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke ATTRIBUTE_UNUSED) {
+ ArtMethod* method ATTRIBUTE_UNUSED) {
return desired_dispatch_info;
}
@@ -992,7 +992,7 @@ void CodeGeneratorX86_64::GenerateStaticOrDirectCall(
// temp = thread->string_init_entrypoint
uint32_t offset =
GetThreadOffset<kX86_64PointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
- __ gs()->movq(temp.AsRegister<CpuRegister>(), Address::Absolute(offset, /* no_rip */ true));
+ __ gs()->movq(temp.AsRegister<CpuRegister>(), Address::Absolute(offset, /* no_rip= */ true));
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
@@ -1001,19 +1001,19 @@ void CodeGeneratorX86_64::GenerateStaticOrDirectCall(
case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative:
DCHECK(GetCompilerOptions().IsBootImage());
__ leal(temp.AsRegister<CpuRegister>(),
- Address::Absolute(kDummy32BitOffset, /* no_rip */ false));
+ Address::Absolute(kDummy32BitOffset, /* no_rip= */ false));
RecordBootImageMethodPatch(invoke);
break;
case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
// Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
__ movl(temp.AsRegister<CpuRegister>(),
- Address::Absolute(kDummy32BitOffset, /* no_rip */ false));
+ Address::Absolute(kDummy32BitOffset, /* no_rip= */ false));
RecordBootImageRelRoPatch(GetBootImageOffset(invoke));
break;
}
case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
__ movq(temp.AsRegister<CpuRegister>(),
- Address::Absolute(kDummy32BitOffset, /* no_rip */ false));
+ Address::Absolute(kDummy32BitOffset, /* no_rip= */ false));
RecordMethodBssEntryPatch(invoke);
break;
}
@@ -1076,12 +1076,12 @@ void CodeGeneratorX86_64::GenerateVirtualCall(
}
void CodeGeneratorX86_64::RecordBootImageIntrinsicPatch(uint32_t intrinsic_data) {
- boot_image_intrinsic_patches_.emplace_back(/* target_dex_file */ nullptr, intrinsic_data);
+ boot_image_intrinsic_patches_.emplace_back(/* target_dex_file= */ nullptr, intrinsic_data);
__ Bind(&boot_image_intrinsic_patches_.back().label);
}
void CodeGeneratorX86_64::RecordBootImageRelRoPatch(uint32_t boot_image_offset) {
- boot_image_method_patches_.emplace_back(/* target_dex_file */ nullptr, boot_image_offset);
+ boot_image_method_patches_.emplace_back(/* target_dex_file= */ nullptr, boot_image_offset);
__ Bind(&boot_image_method_patches_.back().label);
}
@@ -1123,10 +1123,10 @@ Label* CodeGeneratorX86_64::NewStringBssEntryPatch(HLoadString* load_string) {
void CodeGeneratorX86_64::LoadBootImageAddress(CpuRegister reg, uint32_t boot_image_reference) {
if (GetCompilerOptions().IsBootImage()) {
- __ leal(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+ __ leal(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
RecordBootImageIntrinsicPatch(boot_image_reference);
- } else if (Runtime::Current()->IsAotCompiler()) {
- __ movl(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+ } else if (GetCompilerOptions().GetCompilePic()) {
+ __ movl(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
RecordBootImageRelRoPatch(boot_image_reference);
} else {
DCHECK(Runtime::Current()->UseJitCompilation());
@@ -1146,7 +1146,7 @@ void CodeGeneratorX86_64::AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* in
DCHECK_EQ(boot_image_offset, IntrinsicVisitor::IntegerValueOfInfo::kInvalidReference);
// Load the class the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative.
__ leal(argument,
- Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+ Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
MethodReference target_method = invoke->GetTargetMethod();
dex::TypeIndex type_idx = target_method.dex_file->GetMethodId(target_method.index).class_idx_;
boot_image_type_patches_.emplace_back(target_method.dex_file, type_idx.index_);
@@ -1277,7 +1277,7 @@ void CodeGeneratorX86_64::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_poin
}
void CodeGeneratorX86_64::GenerateInvokeRuntime(int32_t entry_point_offset) {
- __ gs()->call(Address::Absolute(entry_point_offset, /* no_rip */ true));
+ __ gs()->call(Address::Absolute(entry_point_offset, /* no_rip= */ true));
}
static constexpr int kNumberOfCpuRegisterPairs = 0;
@@ -1799,7 +1799,7 @@ void InstructionCodeGeneratorX86_64::VisitIf(HIf* if_instr) {
nullptr : codegen_->GetLabelOf(true_successor);
Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
nullptr : codegen_->GetLabelOf(false_successor);
- GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
+ GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
}
void LocationsBuilderX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
@@ -1817,9 +1817,9 @@ void LocationsBuilderX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
void InstructionCodeGeneratorX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
SlowPathCode* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathX86_64>(deoptimize);
GenerateTestAndBranch<Label>(deoptimize,
- /* condition_input_index */ 0,
+ /* condition_input_index= */ 0,
slow_path->GetEntryLabel(),
- /* false_target */ nullptr);
+ /* false_target= */ nullptr);
}
void LocationsBuilderX86_64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
@@ -1922,8 +1922,8 @@ void InstructionCodeGeneratorX86_64::VisitSelect(HSelect* select) {
} else {
NearLabel false_target;
GenerateTestAndBranch<NearLabel>(select,
- /* condition_input_index */ 2,
- /* true_target */ nullptr,
+ /* condition_input_index= */ 2,
+ /* true_target= */ nullptr,
&false_target);
codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
__ Bind(&false_target);
@@ -2425,7 +2425,7 @@ Location InvokeDexCallingConventionVisitorX86_64::GetNextLocation(DataType::Type
case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected parameter type " << type;
- break;
+ UNREACHABLE();
}
return Location::NoLocation();
}
@@ -3560,7 +3560,40 @@ void InstructionCodeGeneratorX86_64::DivRemOneOrMinusOne(HBinaryOperation* instr
LOG(FATAL) << "Unexpected type for div by (-)1 " << instruction->GetResultType();
}
}
+void InstructionCodeGeneratorX86_64::RemByPowerOfTwo(HRem* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Location second = locations->InAt(1);
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
+ CpuRegister numerator = locations->InAt(0).AsRegister<CpuRegister>();
+ int64_t imm = Int64FromConstant(second.GetConstant());
+ DCHECK(IsPowerOfTwo(AbsOrMin(imm)));
+ uint64_t abs_imm = AbsOrMin(imm);
+ CpuRegister tmp = locations->GetTemp(0).AsRegister<CpuRegister>();
+ if (instruction->GetResultType() == DataType::Type::kInt32) {
+ NearLabel done;
+ __ movl(out, numerator);
+ __ andl(out, Immediate(abs_imm-1));
+ __ j(Condition::kZero, &done);
+ __ leal(tmp, Address(out, static_cast<int32_t>(~(abs_imm-1))));
+ __ testl(numerator, numerator);
+ __ cmov(Condition::kLess, out, tmp, false);
+ __ Bind(&done);
+
+ } else {
+ DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt64);
+ codegen_->Load64BitValue(tmp, abs_imm - 1);
+ NearLabel done;
+ __ movq(out, numerator);
+ __ andq(out, tmp);
+ __ j(Condition::kZero, &done);
+ __ movq(tmp, numerator);
+ __ sarq(tmp, Immediate(63));
+ __ shlq(tmp, Immediate(WhichPowerOf2(abs_imm)));
+ __ orq(out, tmp);
+ __ Bind(&done);
+ }
+}
void InstructionCodeGeneratorX86_64::DivByPowerOfTwo(HDiv* instruction) {
LocationSummary* locations = instruction->GetLocations();
Location second = locations->InAt(1);
@@ -3575,9 +3608,17 @@ void InstructionCodeGeneratorX86_64::DivByPowerOfTwo(HDiv* instruction) {
CpuRegister tmp = locations->GetTemp(0).AsRegister<CpuRegister>();
if (instruction->GetResultType() == DataType::Type::kInt32) {
- __ leal(tmp, Address(numerator, abs_imm - 1));
- __ testl(numerator, numerator);
- __ cmov(kGreaterEqual, tmp, numerator);
+ // When denominator is equal to 2, we can add signed bit and numerator to tmp.
+ // Below we are using addl instruction instead of cmov which give us 1 cycle benefit.
+ if (abs_imm == 2) {
+ __ leal(tmp, Address(numerator, 0));
+ __ shrl(tmp, Immediate(31));
+ __ addl(tmp, numerator);
+ } else {
+ __ leal(tmp, Address(numerator, abs_imm - 1));
+ __ testl(numerator, numerator);
+ __ cmov(kGreaterEqual, tmp, numerator);
+ }
int shift = CTZ(imm);
__ sarl(tmp, Immediate(shift));
@@ -3589,11 +3630,16 @@ void InstructionCodeGeneratorX86_64::DivByPowerOfTwo(HDiv* instruction) {
} else {
DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt64);
CpuRegister rdx = locations->GetTemp(0).AsRegister<CpuRegister>();
-
- codegen_->Load64BitValue(rdx, abs_imm - 1);
- __ addq(rdx, numerator);
- __ testq(numerator, numerator);
- __ cmov(kGreaterEqual, rdx, numerator);
+ if (abs_imm == 2) {
+ __ movq(rdx, numerator);
+ __ shrq(rdx, Immediate(63));
+ __ addq(rdx, numerator);
+ } else {
+ codegen_->Load64BitValue(rdx, abs_imm - 1);
+ __ addq(rdx, numerator);
+ __ testq(numerator, numerator);
+ __ cmov(kGreaterEqual, rdx, numerator);
+ }
int shift = CTZ(imm);
__ sarq(rdx, Immediate(shift));
@@ -3633,7 +3679,7 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemWithAnyConstant(HBinaryOperat
if (instruction->GetResultType() == DataType::Type::kInt32) {
int imm = second.GetConstant()->AsIntConstant()->GetValue();
- CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
+ CalculateMagicAndShiftForDivRem(imm, false /* is_long= */, &magic, &shift);
__ movl(numerator, eax);
@@ -3670,7 +3716,7 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemWithAnyConstant(HBinaryOperat
CpuRegister rax = eax;
CpuRegister rdx = edx;
- CalculateMagicAndShiftForDivRem(imm, true /* is_long */, &magic, &shift);
+ CalculateMagicAndShiftForDivRem(imm, true /* is_long= */, &magic, &shift);
// Save the numerator.
__ movq(numerator, rax);
@@ -3737,8 +3783,12 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemIntegral(HBinaryOperation* in
// Do not generate anything. DivZeroCheck would prevent any code to be executed.
} else if (imm == 1 || imm == -1) {
DivRemOneOrMinusOne(instruction);
- } else if (instruction->IsDiv() && IsPowerOfTwo(AbsOrMin(imm))) {
- DivByPowerOfTwo(instruction->AsDiv());
+ } else if (IsPowerOfTwo(AbsOrMin(imm))) {
+ if (is_div) {
+ DivByPowerOfTwo(instruction->AsDiv());
+ } else {
+ RemByPowerOfTwo(instruction->AsRem());
+ }
} else {
DCHECK(imm <= -2 || imm >= 2);
GenerateDivRemWithAnyConstant(instruction);
@@ -4371,10 +4421,8 @@ void LocationsBuilderX86_64::VisitNewArray(HNewArray* instruction) {
}
void InstructionCodeGeneratorX86_64::VisitNewArray(HNewArray* instruction) {
- // Note: if heap poisoning is enabled, the entry point takes cares
- // of poisoning the reference.
- QuickEntrypointEnum entrypoint =
- CodeGenerator::GetArrayAllocationEntrypoint(instruction->GetLoadClass()->GetClass());
+ // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
+ QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
@@ -4506,7 +4554,7 @@ void CodeGeneratorX86_64::GenerateMemoryBarrier(MemBarrierKind kind) {
}
case MemBarrierKind::kNTStoreStore:
// Non-Temporal Store/Store needs an explicit fence.
- MemoryFence(/* non-temporal */ true);
+ MemoryFence(/* non-temporal= */ true);
break;
}
}
@@ -4583,7 +4631,7 @@ void InstructionCodeGeneratorX86_64::HandleFieldGet(HInstruction* instruction,
// Note that a potential implicit null check is handled in this
// CodeGeneratorX86_64::GenerateFieldLoadWithBakerReadBarrier call.
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, out, base, offset, /* needs_null_check */ true);
+ instruction, out, base, offset, /* needs_null_check= */ true);
if (is_volatile) {
codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
}
@@ -5038,7 +5086,7 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
// Note that a potential implicit null check is handled in this
// CodeGeneratorX86_64::GenerateArrayLoadWithBakerReadBarrier call.
codegen_->GenerateArrayLoadWithBakerReadBarrier(
- instruction, out_loc, obj, data_offset, index, /* needs_null_check */ true);
+ instruction, out_loc, obj, data_offset, index, /* needs_null_check= */ true);
} else {
CpuRegister out = out_loc.AsRegister<CpuRegister>();
__ movl(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_4, data_offset));
@@ -5438,7 +5486,7 @@ void CodeGeneratorX86_64::MarkGCCard(CpuRegister temp,
}
// Load the address of the card table into `card`.
__ gs()->movq(card, Address::Absolute(Thread::CardTableOffset<kX86_64PointerSize>().Int32Value(),
- /* no_rip */ true));
+ /* no_rip= */ true));
// Calculate the offset (in the card table) of the card corresponding to
// `object`.
__ movq(temp, object);
@@ -5518,7 +5566,7 @@ void InstructionCodeGeneratorX86_64::GenerateSuspendCheck(HSuspendCheck* instruc
}
__ gs()->cmpw(Address::Absolute(Thread::ThreadFlagsOffset<kX86_64PointerSize>().Int32Value(),
- /* no_rip */ true),
+ /* no_rip= */ true),
Immediate(0));
if (successor == nullptr) {
__ j(kNotEqual, slow_path->GetEntryLabel());
@@ -5900,25 +5948,25 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
cls,
out_loc,
Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()),
- /* fixup_label */ nullptr,
+ /* fixup_label= */ nullptr,
read_barrier_option);
break;
}
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- __ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+ __ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
codegen_->RecordBootImageTypePatch(cls);
break;
case HLoadClass::LoadKind::kBootImageRelRo: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
- __ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+ __ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
codegen_->RecordBootImageRelRoPatch(codegen_->GetBootImageOffset(cls));
break;
}
case HLoadClass::LoadKind::kBssEntry: {
Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
- /* no_rip */ false);
+ /* no_rip= */ false);
Label* fixup_label = codegen_->NewTypeBssEntryPatch(cls);
// /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, read_barrier_option);
@@ -5934,7 +5982,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
}
case HLoadClass::LoadKind::kJitTableAddress: {
Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
- /* no_rip */ true);
+ /* no_rip= */ true);
Label* fixup_label =
codegen_->NewJitRootClassPatch(cls->GetDexFile(), cls->GetTypeIndex(), cls->GetClass());
// /* GcRoot<mirror::Class> */ out = *address
@@ -6059,19 +6107,19 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) NO_THREA
switch (load->GetLoadKind()) {
case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
DCHECK(codegen_->GetCompilerOptions().IsBootImage());
- __ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+ __ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
codegen_->RecordBootImageStringPatch(load);
return;
}
case HLoadString::LoadKind::kBootImageRelRo: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
- __ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+ __ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
codegen_->RecordBootImageRelRoPatch(codegen_->GetBootImageOffset(load));
return;
}
case HLoadString::LoadKind::kBssEntry: {
Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
- /* no_rip */ false);
+ /* no_rip= */ false);
Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
// /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
@@ -6090,7 +6138,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) NO_THREA
}
case HLoadString::LoadKind::kJitTableAddress: {
Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
- /* no_rip */ true);
+ /* no_rip= */ true);
Label* fixup_label = codegen_->NewJitRootStringPatch(
load->GetDexFile(), load->GetStringIndex(), load->GetString());
// /* GcRoot<mirror::String> */ out = *address
@@ -6112,7 +6160,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) NO_THREA
static Address GetExceptionTlsAddress() {
return Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>().Int32Value(),
- /* no_rip */ true);
+ /* no_rip= */ true);
}
void LocationsBuilderX86_64::VisitLoadException(HLoadException* load) {
@@ -6387,7 +6435,7 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
}
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86_64(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ j(kNotEqual, slow_path->GetEntryLabel());
__ movl(out, Immediate(1));
@@ -6419,7 +6467,7 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
// This should also be beneficial for the other cases above.
DCHECK(locations->OnlyCallsOnSlowPath());
slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86_64(
- instruction, /* is_fatal */ false);
+ instruction, /* is_fatal= */ false);
codegen_->AddSlowPath(slow_path);
__ jmp(slow_path->GetEntryLabel());
if (zero.IsLinked()) {
@@ -6736,6 +6784,48 @@ void InstructionCodeGeneratorX86_64::VisitMonitorOperation(HMonitorOperation* in
}
}
+void LocationsBuilderX86_64::VisitX86AndNot(HX86AndNot* instruction) {
+ DCHECK(codegen_->GetInstructionSetFeatures().HasAVX2());
+ DCHECK(DataType::IsIntOrLongType(instruction->GetType())) << instruction->GetType();
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ // There is no immediate variant of negated bitwise and in X86.
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void LocationsBuilderX86_64::VisitX86MaskOrResetLeastSetBit(HX86MaskOrResetLeastSetBit* instruction) {
+ DCHECK(codegen_->GetInstructionSetFeatures().HasAVX2());
+ DCHECK(DataType::IsIntOrLongType(instruction->GetType())) << instruction->GetType();
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorX86_64::VisitX86AndNot(HX86AndNot* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ Location dest = locations->Out();
+ __ andn(dest.AsRegister<CpuRegister>(), first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
+}
+
+void InstructionCodeGeneratorX86_64::VisitX86MaskOrResetLeastSetBit(HX86MaskOrResetLeastSetBit* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Location src = locations->InAt(0);
+ Location dest = locations->Out();
+ switch (instruction->GetOpKind()) {
+ case HInstruction::kAnd:
+ __ blsr(dest.AsRegister<CpuRegister>(), src.AsRegister<CpuRegister>());
+ break;
+ case HInstruction::kXor:
+ __ blsmsk(dest.AsRegister<CpuRegister>(), src.AsRegister<CpuRegister>());
+ break;
+ default:
+ LOG(FATAL) << "Unreachable";
+ }
+}
+
void LocationsBuilderX86_64::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
void LocationsBuilderX86_64::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
void LocationsBuilderX86_64::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
@@ -6864,7 +6954,7 @@ void InstructionCodeGeneratorX86_64::GenerateReferenceLoadOneRegister(
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(out + offset)
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, out, out_reg, offset, /* needs_null_check */ false);
+ instruction, out, out_reg, offset, /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// Save the value of `out` into `maybe_temp` before overwriting it
@@ -6898,7 +6988,7 @@ void InstructionCodeGeneratorX86_64::GenerateReferenceLoadTwoRegisters(
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- instruction, out, obj_reg, offset, /* needs_null_check */ false);
+ instruction, out, obj_reg, offset, /* needs_null_check= */ false);
} else {
// Load with slow path based read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
@@ -6947,13 +7037,13 @@ void InstructionCodeGeneratorX86_64::GenerateGcRootFieldLoad(
// Slow path marking the GC root `root`.
SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathX86_64(
- instruction, root, /* unpoison_ref_before_marking */ false);
+ instruction, root, /* unpoison_ref_before_marking= */ false);
codegen_->AddSlowPath(slow_path);
// Test the `Thread::Current()->pReadBarrierMarkReg ## root.reg()` entrypoint.
const int32_t entry_point_offset =
Thread::ReadBarrierMarkEntryPointsOffset<kX86_64PointerSize>(root.reg());
- __ gs()->cmpl(Address::Absolute(entry_point_offset, /* no_rip */ true), Immediate(0));
+ __ gs()->cmpl(Address::Absolute(entry_point_offset, /* no_rip= */ true), Immediate(0));
// The entrypoint is null when the GC is not marking.
__ j(kNotEqual, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
@@ -7079,10 +7169,10 @@ void CodeGeneratorX86_64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction
DCHECK(temp1 != nullptr);
DCHECK(temp2 != nullptr);
slow_path = new (GetScopedAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86_64(
- instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp1, *temp2);
+ instruction, ref, obj, src, /* unpoison_ref_before_marking= */ true, *temp1, *temp2);
} else {
slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathX86_64(
- instruction, ref, /* unpoison_ref_before_marking */ true);
+ instruction, ref, /* unpoison_ref_before_marking= */ true);
}
AddSlowPath(slow_path);
@@ -7542,7 +7632,7 @@ void CodeGeneratorX86_64::PatchJitRootUse(uint8_t* code,
uint32_t code_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment;
uintptr_t address =
reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
- typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
+ using unaligned_uint32_t __attribute__((__aligned__(1))) = uint32_t;
reinterpret_cast<unaligned_uint32_t*>(code + code_offset)[0] =
dchecked_integral_cast<uint32_t>(address);
}
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index f77a5c84b4..f74e130702 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -161,6 +161,7 @@ class LocationsBuilderX86_64 : public HGraphVisitor {
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_X86_64(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
@@ -192,6 +193,7 @@ class InstructionCodeGeneratorX86_64 : public InstructionCodeGenerator {
FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
FOR_EACH_CONCRETE_INSTRUCTION_X86_64(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(DECLARE_VISIT_INSTRUCTION)
#undef DECLARE_VISIT_INSTRUCTION
@@ -213,6 +215,7 @@ class InstructionCodeGeneratorX86_64 : public InstructionCodeGenerator {
void GenerateRemFP(HRem* rem);
void DivRemOneOrMinusOne(HBinaryOperation* instruction);
void DivByPowerOfTwo(HDiv* instruction);
+ void RemByPowerOfTwo(HRem* instruction);
void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
void GenerateDivRemIntegral(HBinaryOperation* instruction);
void HandleCondition(HCondition* condition);
@@ -409,7 +412,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
// otherwise return a fall-back info that should be used instead.
HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
- HInvokeStaticOrDirect* invoke) override;
+ ArtMethod* method) override;
void GenerateStaticOrDirectCall(
HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc
index d6c97552dc..f406983fc2 100644
--- a/compiler/optimizing/code_sinking.cc
+++ b/compiler/optimizing/code_sinking.cc
@@ -180,7 +180,7 @@ static HInstruction* FindIdealPosition(HInstruction* instruction,
DCHECK(!instruction->IsPhi()); // Makes no sense for Phi.
// Find the target block.
- CommonDominator finder(/* start_block */ nullptr);
+ CommonDominator finder(/* block= */ nullptr);
for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
HInstruction* user = use.GetUser();
if (!(filter && ShouldFilterUse(instruction, user, post_dominated))) {
@@ -259,12 +259,12 @@ void CodeSinking::SinkCodeToUncommonBranch(HBasicBlock* end_block) {
size_t number_of_instructions = graph_->GetCurrentInstructionId();
ScopedArenaVector<HInstruction*> worklist(allocator.Adapter(kArenaAllocMisc));
- ArenaBitVector processed_instructions(&allocator, number_of_instructions, /* expandable */ false);
+ ArenaBitVector processed_instructions(&allocator, number_of_instructions, /* expandable= */ false);
processed_instructions.ClearAllBits();
- ArenaBitVector post_dominated(&allocator, graph_->GetBlocks().size(), /* expandable */ false);
+ ArenaBitVector post_dominated(&allocator, graph_->GetBlocks().size(), /* expandable= */ false);
post_dominated.ClearAllBits();
ArenaBitVector instructions_that_can_move(
- &allocator, number_of_instructions, /* expandable */ false);
+ &allocator, number_of_instructions, /* expandable= */ false);
instructions_that_can_move.ClearAllBits();
ScopedArenaVector<HInstruction*> move_in_order(allocator.Adapter(kArenaAllocMisc));
@@ -414,7 +414,7 @@ void CodeSinking::SinkCodeToUncommonBranch(HBasicBlock* end_block) {
}
// Find the position of the instruction we're storing into, filtering out this
// store and all other stores to that instruction.
- position = FindIdealPosition(instruction->InputAt(0), post_dominated, /* filter */ true);
+ position = FindIdealPosition(instruction->InputAt(0), post_dominated, /* filter= */ true);
// The position needs to be dominated by the store, in order for the store to move there.
if (position == nullptr || !instruction->GetBlock()->Dominates(position->GetBlock())) {
@@ -434,7 +434,7 @@ void CodeSinking::SinkCodeToUncommonBranch(HBasicBlock* end_block) {
continue;
}
MaybeRecordStat(stats_, MethodCompilationStat::kInstructionSunk);
- instruction->MoveBefore(position, /* ensure_safety */ false);
+ instruction->MoveBefore(position, /* do_checks= */ false);
}
}
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index f186191a0f..b5a7c137f6 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -823,6 +823,33 @@ TEST_F(CodegenTest, ARM64ParallelMoveResolverSIMD) {
InternalCodeAllocator code_allocator;
codegen.Finalize(&code_allocator);
}
+
+// Check that ART ISA Features are propagated to VIXL for arm64 (using cortex-a75 as example).
+TEST_F(CodegenTest, ARM64IsaVIXLFeaturesA75) {
+ OverrideInstructionSetFeatures(InstructionSet::kArm64, "cortex-a75");
+ HGraph* graph = CreateGraph();
+ arm64::CodeGeneratorARM64 codegen(graph, *compiler_options_);
+ vixl::CPUFeatures* features = codegen.GetVIXLAssembler()->GetCPUFeatures();
+
+ EXPECT_TRUE(features->Has(vixl::CPUFeatures::kCRC32));
+ EXPECT_TRUE(features->Has(vixl::CPUFeatures::kDotProduct));
+ EXPECT_TRUE(features->Has(vixl::CPUFeatures::kFPHalf));
+ EXPECT_TRUE(features->Has(vixl::CPUFeatures::kAtomics));
+}
+
+// Check that ART ISA Features are propagated to VIXL for arm64 (using cortex-a53 as example).
+TEST_F(CodegenTest, ARM64IsaVIXLFeaturesA53) {
+ OverrideInstructionSetFeatures(InstructionSet::kArm64, "cortex-a53");
+ HGraph* graph = CreateGraph();
+ arm64::CodeGeneratorARM64 codegen(graph, *compiler_options_);
+ vixl::CPUFeatures* features = codegen.GetVIXLAssembler()->GetCPUFeatures();
+
+ EXPECT_TRUE(features->Has(vixl::CPUFeatures::kCRC32));
+ EXPECT_FALSE(features->Has(vixl::CPUFeatures::kDotProduct));
+ EXPECT_FALSE(features->Has(vixl::CPUFeatures::kFPHalf));
+ EXPECT_FALSE(features->Has(vixl::CPUFeatures::kAtomics));
+}
+
#endif
#ifdef ART_ENABLE_CODEGEN_mips
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index b1436f863c..74d9d3a993 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -70,7 +70,7 @@ class ConstantFoldingTest : public OptimizingUnitTest {
check_after_cf(graph_);
- HDeadCodeElimination(graph_, nullptr /* stats */, "dead_code_elimination").Run();
+ HDeadCodeElimination(graph_, /* stats= */ nullptr, "dead_code_elimination").Run();
GraphChecker graph_checker_dce(graph_);
graph_checker_dce.Run();
ASSERT_TRUE(graph_checker_dce.IsValid());
diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.cc b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
index 3cb8bf2f47..3a1a9e023d 100644
--- a/compiler/optimizing/constructor_fence_redundancy_elimination.cc
+++ b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
@@ -78,7 +78,7 @@ class CFREVisitor : public HGraphVisitor {
VisitSetLocation(instruction, value);
}
- void VisitDeoptimize(HDeoptimize* instruction ATTRIBUTE_UNUSED) {
+ void VisitDeoptimize(HDeoptimize* instruction ATTRIBUTE_UNUSED) override {
// Pessimize: Merge all fences.
MergeCandidateFences();
}
diff --git a/compiler/optimizing/data_type.h b/compiler/optimizing/data_type.h
index 5ac6e46003..3cbcc9e0c3 100644
--- a/compiler/optimizing/data_type.h
+++ b/compiler/optimizing/data_type.h
@@ -231,6 +231,21 @@ class DataType {
}
}
+ static Type ToUnsigned(Type type) {
+ switch (type) {
+ case Type::kInt8:
+ return Type::kUint8;
+ case Type::kInt16:
+ return Type::kUint16;
+ case Type::kInt32:
+ return Type::kUint32;
+ case Type::kInt64:
+ return Type::kUint64;
+ default:
+ return type;
+ }
+ }
+
static const char* PrettyDescriptor(Type type);
private:
diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc
index 277453545a..f5cd4dc27a 100644
--- a/compiler/optimizing/dead_code_elimination_test.cc
+++ b/compiler/optimizing/dead_code_elimination_test.cc
@@ -43,7 +43,7 @@ void DeadCodeEliminationTest::TestCode(const std::vector<uint16_t>& data,
std::string actual_before = printer_before.str();
ASSERT_EQ(actual_before, expected_before);
- HDeadCodeElimination(graph, nullptr /* stats */, "dead_code_elimination").Run();
+ HDeadCodeElimination(graph, /* stats= */ nullptr, "dead_code_elimination").Run();
GraphChecker graph_checker(graph);
graph_checker.Run();
ASSERT_TRUE(graph_checker.IsValid());
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index a689f35e0f..01d9603802 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -635,8 +635,8 @@ void GraphChecker::HandleTypeCheckInstruction(HTypeCheckInstruction* check) {
}
}
CheckTypeCheckBitstringInput(
- check, /* input_pos */ 2, check_values, expected_path_to_root, "path_to_root");
- CheckTypeCheckBitstringInput(check, /* input_pos */ 3, check_values, expected_mask, "mask");
+ check, /* input_pos= */ 2, check_values, expected_path_to_root, "path_to_root");
+ CheckTypeCheckBitstringInput(check, /* input_pos= */ 3, check_values, expected_mask, "mask");
} else {
if (!input->IsLoadClass()) {
AddError(StringPrintf("%s:%d (classic) expects a HLoadClass as second input, not %s:%d.",
@@ -931,7 +931,7 @@ void GraphChecker::VisitPhi(HPhi* phi) {
// because the BitVector reallocation strategy has very bad worst-case behavior.
ArenaBitVector visited(&allocator,
GetGraph()->GetCurrentInstructionId(),
- /* expandable */ false,
+ /* expandable= */ false,
kArenaAllocGraphChecker);
visited.ClearAllBits();
if (!IsConstantEquivalent(phi, other_phi, &visited)) {
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 31db8c205f..2a7bbcb72f 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -106,8 +106,7 @@ std::ostream& operator<<(std::ostream& os, const StringList& list) {
}
}
-typedef Disassembler* create_disasm_prototype(InstructionSet instruction_set,
- DisassemblerOptions* options);
+using create_disasm_prototype = Disassembler*(InstructionSet, DisassemblerOptions*);
class HGraphVisualizerDisassembler {
public:
HGraphVisualizerDisassembler(InstructionSet instruction_set,
@@ -131,10 +130,10 @@ class HGraphVisualizerDisassembler {
// been generated, so we can read data in literal pools.
disassembler_ = std::unique_ptr<Disassembler>((*create_disassembler)(
instruction_set,
- new DisassemblerOptions(/* absolute_addresses */ false,
+ new DisassemblerOptions(/* absolute_addresses= */ false,
base_address,
end_address,
- /* can_read_literals */ true,
+ /* can_read_literals= */ true,
Is64BitInstructionSet(instruction_set)
? &Thread::DumpThreadOffset<PointerSize::k64>
: &Thread::DumpThreadOffset<PointerSize::k32>)));
@@ -394,7 +393,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
void VisitLoadMethodType(HLoadMethodType* load_method_type) override {
StartAttributeStream("load_kind") << "RuntimeCall";
const DexFile& dex_file = load_method_type->GetDexFile();
- const DexFile::ProtoId& proto_id = dex_file.GetProtoId(load_method_type->GetProtoIndex());
+ const dex::ProtoId& proto_id = dex_file.GetProtoId(load_method_type->GetProtoIndex());
StartAttributeStream("method_type") << dex_file.GetProtoSignature(proto_id);
}
@@ -564,6 +563,14 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
StartAttributeStream("kind") << instruction->GetOpKind();
}
+ void VisitVecDotProd(HVecDotProd* instruction) override {
+ VisitVecOperation(instruction);
+ DataType::Type arg_type = instruction->InputAt(1)->AsVecOperation()->GetPackedType();
+ StartAttributeStream("type") << (instruction->IsZeroExtending() ?
+ DataType::ToUnsigned(arg_type) :
+ DataType::ToSigned(arg_type));
+ }
+
#if defined(ART_ENABLE_CODEGEN_arm) || defined(ART_ENABLE_CODEGEN_arm64)
void VisitMultiplyAccumulate(HMultiplyAccumulate* instruction) override {
StartAttributeStream("kind") << instruction->GetOpKind();
@@ -917,8 +924,8 @@ void HGraphVisualizer::DumpGraphWithDisassembly() const {
HGraphVisualizerPrinter printer(graph_,
*output_,
"disassembly",
- /* is_after_pass */ true,
- /* graph_in_bad_state */ false,
+ /* is_after_pass= */ true,
+ /* graph_in_bad_state= */ false,
codegen_,
codegen_.GetDisassemblyInformation());
printer.Run();
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index e6b6326726..3689d1d232 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -348,7 +348,7 @@ class GlobalValueNumberer : public ValueObject {
side_effects_(side_effects),
sets_(graph->GetBlocks().size(), nullptr, allocator_.Adapter(kArenaAllocGvn)),
visited_blocks_(
- &allocator_, graph->GetBlocks().size(), /* expandable */ false, kArenaAllocGvn) {
+ &allocator_, graph->GetBlocks().size(), /* expandable= */ false, kArenaAllocGvn) {
visited_blocks_.ClearAllBits();
}
@@ -546,12 +546,12 @@ HBasicBlock* GlobalValueNumberer::FindVisitedBlockWithRecyclableSet(
// that is larger, we return it if no perfectly-matching set is found.
// Note that we defer testing WillBeReferencedAgain until all other criteria
// have been satisfied because it might be expensive.
- if (current_set->CanHoldCopyOf(reference_set, /* exact_match */ true)) {
+ if (current_set->CanHoldCopyOf(reference_set, /* exact_match= */ true)) {
if (!WillBeReferencedAgain(current_block)) {
return current_block;
}
} else if (secondary_match == nullptr &&
- current_set->CanHoldCopyOf(reference_set, /* exact_match */ false)) {
+ current_set->CanHoldCopyOf(reference_set, /* exact_match= */ false)) {
if (!WillBeReferencedAgain(current_block)) {
secondary_match = current_block;
}
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index a4d638f4c6..3a10d5831d 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -1074,8 +1074,8 @@ bool HInductionVarAnalysis::IsTaken(InductionInfo* lower_expr,
&& lower_value >= upper_value;
default:
LOG(FATAL) << "CONDITION UNREACHABLE";
+ UNREACHABLE();
}
- return false; // not certain, may be untaken
}
bool HInductionVarAnalysis::IsFinite(InductionInfo* upper_expr,
@@ -1099,8 +1099,8 @@ bool HInductionVarAnalysis::IsFinite(InductionInfo* upper_expr,
return (IsAtLeast(upper_expr, &value) && value >= (min - stride_value));
default:
LOG(FATAL) << "CONDITION UNREACHABLE";
+ UNREACHABLE();
}
- return false; // not certain, may be infinite
}
bool HInductionVarAnalysis::FitsNarrowerControl(InductionInfo* lower_expr,
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index 55eca2316a..4c78fa8f06 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -216,13 +216,13 @@ bool InductionVarRange::GetInductionRange(HInstruction* context,
chase_hint_ = chase_hint;
bool in_body = context->GetBlock() != loop->GetHeader();
int64_t stride_value = 0;
- *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min */ true));
- *max_val = SimplifyMax(GetVal(info, trip, in_body, /* is_min */ false), chase_hint);
+ *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min= */ true));
+ *max_val = SimplifyMax(GetVal(info, trip, in_body, /* is_min= */ false), chase_hint);
*needs_finite_test = NeedsTripCount(info, &stride_value) && IsUnsafeTripCount(trip);
chase_hint_ = nullptr;
// Retry chasing constants for wrap-around (merge sensitive).
if (!min_val->is_known && info->induction_class == HInductionVarAnalysis::kWrapAround) {
- *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min */ true));
+ *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min= */ true));
}
return true;
}
@@ -445,8 +445,8 @@ bool InductionVarRange::IsConstant(HInductionVarAnalysis::InductionInfo* info,
}
// Try range analysis on the invariant, only accept a proper range
// to avoid arithmetic wrap-around anomalies.
- Value min_val = GetVal(info, nullptr, /* in_body */ true, /* is_min */ true);
- Value max_val = GetVal(info, nullptr, /* in_body */ true, /* is_min */ false);
+ Value min_val = GetVal(info, nullptr, /* in_body= */ true, /* is_min= */ true);
+ Value max_val = GetVal(info, nullptr, /* in_body= */ true, /* is_min= */ false);
if (IsConstantValue(min_val) &&
IsConstantValue(max_val) && min_val.b_constant <= max_val.b_constant) {
if ((request == kExact && min_val.b_constant == max_val.b_constant) || request == kAtMost) {
@@ -791,10 +791,10 @@ InductionVarRange::Value InductionVarRange::GetMul(HInductionVarAnalysis::Induct
return MulRangeAndConstant(value, info1, trip, in_body, is_min);
}
// Interval ranges.
- Value v1_min = GetVal(info1, trip, in_body, /* is_min */ true);
- Value v1_max = GetVal(info1, trip, in_body, /* is_min */ false);
- Value v2_min = GetVal(info2, trip, in_body, /* is_min */ true);
- Value v2_max = GetVal(info2, trip, in_body, /* is_min */ false);
+ Value v1_min = GetVal(info1, trip, in_body, /* is_min= */ true);
+ Value v1_max = GetVal(info1, trip, in_body, /* is_min= */ false);
+ Value v2_min = GetVal(info2, trip, in_body, /* is_min= */ true);
+ Value v2_max = GetVal(info2, trip, in_body, /* is_min= */ false);
// Positive range vs. positive or negative range.
if (IsConstantValue(v1_min) && v1_min.b_constant >= 0) {
if (IsConstantValue(v2_min) && v2_min.b_constant >= 0) {
@@ -825,10 +825,10 @@ InductionVarRange::Value InductionVarRange::GetDiv(HInductionVarAnalysis::Induct
return DivRangeAndConstant(value, info1, trip, in_body, is_min);
}
// Interval ranges.
- Value v1_min = GetVal(info1, trip, in_body, /* is_min */ true);
- Value v1_max = GetVal(info1, trip, in_body, /* is_min */ false);
- Value v2_min = GetVal(info2, trip, in_body, /* is_min */ true);
- Value v2_max = GetVal(info2, trip, in_body, /* is_min */ false);
+ Value v1_min = GetVal(info1, trip, in_body, /* is_min= */ true);
+ Value v1_max = GetVal(info1, trip, in_body, /* is_min= */ false);
+ Value v2_min = GetVal(info2, trip, in_body, /* is_min= */ true);
+ Value v2_max = GetVal(info2, trip, in_body, /* is_min= */ false);
// Positive range vs. positive or negative range.
if (IsConstantValue(v1_min) && v1_min.b_constant >= 0) {
if (IsConstantValue(v2_min) && v2_min.b_constant >= 0) {
@@ -1019,10 +1019,10 @@ bool InductionVarRange::GenerateRangeOrLastValue(HInstruction* context,
// Code generation for taken test: generate the code when requested or otherwise analyze
// if code generation is feasible when taken test is needed.
if (taken_test != nullptr) {
- return GenerateCode(trip->op_b, nullptr, graph, block, taken_test, in_body, /* is_min */ false);
+ return GenerateCode(trip->op_b, nullptr, graph, block, taken_test, in_body, /* is_min= */ false);
} else if (*needs_taken_test) {
if (!GenerateCode(
- trip->op_b, nullptr, nullptr, nullptr, nullptr, in_body, /* is_min */ false)) {
+ trip->op_b, nullptr, nullptr, nullptr, nullptr, in_body, /* is_min= */ false)) {
return false;
}
}
@@ -1030,9 +1030,9 @@ bool InductionVarRange::GenerateRangeOrLastValue(HInstruction* context,
return
// Success on lower if invariant (not set), or code can be generated.
((info->induction_class == HInductionVarAnalysis::kInvariant) ||
- GenerateCode(info, trip, graph, block, lower, in_body, /* is_min */ true)) &&
+ GenerateCode(info, trip, graph, block, lower, in_body, /* is_min= */ true)) &&
// And success on upper.
- GenerateCode(info, trip, graph, block, upper, in_body, /* is_min */ false);
+ GenerateCode(info, trip, graph, block, upper, in_body, /* is_min= */ false);
}
bool InductionVarRange::GenerateLastValuePolynomial(HInductionVarAnalysis::InductionInfo* info,
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index e5bc6ef22c..f6af384af0 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -252,24 +252,24 @@ class InductionVarRangeTest : public OptimizingUnitTest {
Value GetMin(HInductionVarAnalysis::InductionInfo* info,
HInductionVarAnalysis::InductionInfo* trip) {
- return range_.GetVal(info, trip, /* in_body */ true, /* is_min */ true);
+ return range_.GetVal(info, trip, /* in_body= */ true, /* is_min= */ true);
}
Value GetMax(HInductionVarAnalysis::InductionInfo* info,
HInductionVarAnalysis::InductionInfo* trip) {
- return range_.GetVal(info, trip, /* in_body */ true, /* is_min */ false);
+ return range_.GetVal(info, trip, /* in_body= */ true, /* is_min= */ false);
}
Value GetMul(HInductionVarAnalysis::InductionInfo* info1,
HInductionVarAnalysis::InductionInfo* info2,
bool is_min) {
- return range_.GetMul(info1, info2, nullptr, /* in_body */ true, is_min);
+ return range_.GetMul(info1, info2, nullptr, /* in_body= */ true, is_min);
}
Value GetDiv(HInductionVarAnalysis::InductionInfo* info1,
HInductionVarAnalysis::InductionInfo* info2,
bool is_min) {
- return range_.GetDiv(info1, info2, nullptr, /* in_body */ true, is_min);
+ return range_.GetDiv(info1, info2, nullptr, /* in_body= */ true, is_min);
}
Value GetRem(HInductionVarAnalysis::InductionInfo* info1,
@@ -701,7 +701,11 @@ TEST_F(InductionVarRangeTest, MaxValue) {
TEST_F(InductionVarRangeTest, ArrayLengthAndHints) {
// We pass a bogus constant for the class to avoid mocking one.
- HInstruction* new_array = new (GetAllocator()) HNewArray(x_, x_, 0);
+ HInstruction* new_array = new (GetAllocator()) HNewArray(
+ /* cls= */ x_,
+ /* length= */ x_,
+ /* dex_pc= */ 0,
+ /* component_size_shift= */ 0);
entry_block_->AddInstruction(new_array);
HInstruction* array_length = new (GetAllocator()) HArrayLength(new_array, 0);
entry_block_->AddInstruction(array_length);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 3ba741472e..96d6d2a1ae 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -18,6 +18,7 @@
#include "art_method-inl.h"
#include "base/enums.h"
+#include "base/logging.h"
#include "builder.h"
#include "class_linker.h"
#include "class_root.h"
@@ -27,7 +28,6 @@
#include "dex/inline_method_analyser.h"
#include "dex/verification_results.h"
#include "dex/verified_method.h"
-#include "driver/compiler_driver-inl.h"
#include "driver/compiler_options.h"
#include "driver/dex_compilation_unit.h"
#include "instruction_simplifier.h"
@@ -36,8 +36,9 @@
#include "jit/jit_code_cache.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache.h"
+#include "mirror/object_array-alloc-inl.h"
+#include "mirror/object_array-inl.h"
#include "nodes.h"
-#include "optimizing_compiler.h"
#include "reference_type_propagation.h"
#include "register_allocator_linear_scan.h"
#include "scoped_thread_state_change-inl.h"
@@ -149,13 +150,13 @@ bool HInliner::Run() {
// If we're compiling with a core image (which is only used for
// test purposes), honor inlining directives in method names:
- // - if a method's name contains the substring "$inline$", ensure
- // that this method is actually inlined;
// - if a method's name contains the substring "$noinline$", do not
- // inline that method.
+ // inline that method;
+ // - if a method's name contains the substring "$inline$", ensure
+ // that this method is actually inlined.
// We limit the latter to AOT compilation, as the JIT may or may not inline
// depending on the state of classes at runtime.
- const bool honor_noinline_directives = IsCompilingWithCoreImage();
+ const bool honor_noinline_directives = codegen_->GetCompilerOptions().CompilingWithCoreImage();
const bool honor_inline_directives =
honor_noinline_directives && Runtime::Current()->IsAotCompiler();
@@ -174,7 +175,7 @@ bool HInliner::Run() {
if (honor_noinline_directives) {
// Debugging case: directives in method names control or assert on inlining.
std::string callee_name = outer_compilation_unit_.GetDexFile()->PrettyMethod(
- call->GetDexMethodIndex(), /* with_signature */ false);
+ call->GetDexMethodIndex(), /* with_signature= */ false);
// Tests prevent inlining by having $noinline$ in their method names.
if (callee_name.find("$noinline$") == std::string::npos) {
if (TryInline(call)) {
@@ -406,7 +407,7 @@ ArtMethod* HInliner::TryCHADevirtualization(ArtMethod* resolved_method) {
return single_impl;
}
-static bool IsMethodUnverified(CompilerDriver* const compiler_driver, ArtMethod* method)
+static bool IsMethodUnverified(const CompilerOptions& compiler_options, ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (!method->GetDeclaringClass()->IsVerified()) {
if (Runtime::Current()->UseJitCompilation()) {
@@ -415,8 +416,9 @@ static bool IsMethodUnverified(CompilerDriver* const compiler_driver, ArtMethod*
return true;
}
uint16_t class_def_idx = method->GetDeclaringClass()->GetDexClassDefIndex();
- if (!compiler_driver->IsMethodVerifiedWithoutFailures(
- method->GetDexMethodIndex(), class_def_idx, *method->GetDexFile())) {
+ if (!compiler_options.IsMethodVerifiedWithoutFailures(method->GetDexMethodIndex(),
+ class_def_idx,
+ *method->GetDexFile())) {
// Method has soft or hard failures, don't analyze.
return true;
}
@@ -424,11 +426,11 @@ static bool IsMethodUnverified(CompilerDriver* const compiler_driver, ArtMethod*
return false;
}
-static bool AlwaysThrows(CompilerDriver* const compiler_driver, ArtMethod* method)
+static bool AlwaysThrows(const CompilerOptions& compiler_options, ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(method != nullptr);
// Skip non-compilable and unverified methods.
- if (!method->IsCompilable() || IsMethodUnverified(compiler_driver, method)) {
+ if (!method->IsCompilable() || IsMethodUnverified(compiler_options, method)) {
return false;
}
// Skip native methods, methods with try blocks, and methods that are too large.
@@ -502,7 +504,7 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) {
bool result = TryInlineAndReplace(invoke_instruction,
actual_method,
ReferenceTypeInfo::CreateInvalid(),
- /* do_rtp */ true,
+ /* do_rtp= */ true,
cha_devirtualize);
if (result) {
// Successfully inlined.
@@ -516,7 +518,7 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) {
MaybeRecordStat(stats_, MethodCompilationStat::kInlinedInvokeVirtualOrInterface);
}
}
- } else if (!cha_devirtualize && AlwaysThrows(compiler_driver_, actual_method)) {
+ } else if (!cha_devirtualize && AlwaysThrows(codegen_->GetCompilerOptions(), actual_method)) {
// Set always throws property for non-inlined method call with single target
// (unless it was obtained through CHA, because that would imply we have
// to add the CHA dependency, which seems not worth it).
@@ -678,7 +680,7 @@ HInliner::InlineCacheType HInliner::GetInlineCacheAOT(
/*out*/Handle<mirror::ObjectArray<mirror::Class>>* inline_cache)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(Runtime::Current()->IsAotCompiler());
- const ProfileCompilationInfo* pci = compiler_driver_->GetProfileCompilationInfo();
+ const ProfileCompilationInfo* pci = codegen_->GetCompilerOptions().GetProfileCompilationInfo();
if (pci == nullptr) {
return kInlineCacheNoData;
}
@@ -856,9 +858,9 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
HBasicBlock* bb_cursor = invoke_instruction->GetBlock();
if (!TryInlineAndReplace(invoke_instruction,
resolved_method,
- ReferenceTypeInfo::Create(monomorphic_type, /* is_exact */ true),
- /* do_rtp */ false,
- /* cha_devirtualize */ false)) {
+ ReferenceTypeInfo::Create(monomorphic_type, /* is_exact= */ true),
+ /* do_rtp= */ false,
+ /* cha_devirtualize= */ false)) {
return false;
}
@@ -869,7 +871,7 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
class_index,
monomorphic_type,
invoke_instruction,
- /* with_deoptimization */ true);
+ /* with_deoptimization= */ true);
// Run type propagation to get the guard typed, and eventually propagate the
// type of the receiver.
@@ -877,7 +879,7 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
- /* is_first_run */ false);
+ /* is_first_run= */ false);
rtp_fixup.Run();
MaybeRecordStat(stats_, MethodCompilationStat::kInlinedMonomorphicCall);
@@ -947,7 +949,7 @@ HInstruction* HInliner::AddTypeGuard(HInstruction* receiver,
klass,
is_referrer,
invoke_instruction->GetDexPc(),
- /* needs_access_check */ false);
+ /* needs_access_check= */ false);
HLoadClass::LoadKind kind = HSharpening::ComputeLoadClassKind(
load_class, codegen_, caller_compilation_unit_);
DCHECK(kind != HLoadClass::LoadKind::kInvalid)
@@ -1025,7 +1027,7 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
if (!class_index.IsValid() ||
!TryBuildAndInline(invoke_instruction,
method,
- ReferenceTypeInfo::Create(handle, /* is_exact */ true),
+ ReferenceTypeInfo::Create(handle, /* is_exact= */ true),
&return_replacement)) {
all_targets_inlined = false;
} else {
@@ -1077,7 +1079,7 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
- /* is_first_run */ false);
+ /* is_first_run= */ false);
rtp_fixup.Run();
return true;
}
@@ -1148,14 +1150,14 @@ void HInliner::CreateDiamondPatternForPolymorphicInline(HInstruction* compare,
graph_->UpdateLoopAndTryInformationOfNewBlock(
- then, original_invoke_block, /* replace_if_back_edge */ false);
+ then, original_invoke_block, /* replace_if_back_edge= */ false);
graph_->UpdateLoopAndTryInformationOfNewBlock(
- otherwise, original_invoke_block, /* replace_if_back_edge */ false);
+ otherwise, original_invoke_block, /* replace_if_back_edge= */ false);
// In case the original invoke location was a back edge, we need to update
// the loop to now have the merge block as a back edge.
graph_->UpdateLoopAndTryInformationOfNewBlock(
- merge, original_invoke_block, /* replace_if_back_edge */ true);
+ merge, original_invoke_block, /* replace_if_back_edge= */ true);
}
bool HInliner::TryInlinePolymorphicCallToSameTarget(
@@ -1273,7 +1275,7 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(
outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
- /* is_first_run */ false);
+ /* is_first_run= */ false);
rtp_fixup.Run();
MaybeRecordStat(stats_, MethodCompilationStat::kInlinedPolymorphicCall);
@@ -1296,9 +1298,7 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction,
// If invoke_instruction is devirtualized to a different method, give intrinsics
// another chance before we try to inline it.
- bool wrong_invoke_type = false;
- if (invoke_instruction->GetResolvedMethod() != method &&
- IntrinsicsRecognizer::Recognize(invoke_instruction, method, &wrong_invoke_type)) {
+ if (invoke_instruction->GetResolvedMethod() != method && method->IsIntrinsic()) {
MaybeRecordStat(stats_, MethodCompilationStat::kIntrinsicRecognized);
if (invoke_instruction->IsInvokeInterface()) {
// We don't intrinsify an invoke-interface directly.
@@ -1311,6 +1311,7 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction,
invoke_instruction->GetDexMethodIndex(), // Use interface method's dex method index.
method,
method->GetMethodIndex());
+ DCHECK_NE(new_invoke->GetIntrinsic(), Intrinsics::kNone);
HInputsRef inputs = invoke_instruction->GetInputs();
for (size_t index = 0; index != inputs.size(); ++index) {
new_invoke->SetArgumentAt(index, inputs[index]);
@@ -1320,14 +1321,11 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction,
if (invoke_instruction->GetType() == DataType::Type::kReference) {
new_invoke->SetReferenceTypeInfo(invoke_instruction->GetReferenceTypeInfo());
}
- // Run intrinsic recognizer again to set new_invoke's intrinsic.
- IntrinsicsRecognizer::Recognize(new_invoke, method, &wrong_invoke_type);
- DCHECK_NE(new_invoke->GetIntrinsic(), Intrinsics::kNone);
return_replacement = new_invoke;
// invoke_instruction is replaced with new_invoke.
should_remove_invoke_instruction = true;
} else {
- // invoke_instruction is intrinsified and stays.
+ invoke_instruction->SetResolvedMethod(method);
}
} else if (!TryBuildAndInline(invoke_instruction, method, receiver_type, &return_replacement)) {
if (invoke_instruction->IsInvokeInterface()) {
@@ -1401,7 +1399,7 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction,
outer_compilation_unit_.GetClassLoader(),
outer_compilation_unit_.GetDexCache(),
handles_,
- /* is_first_run */ false).Run();
+ /* is_first_run= */ false).Run();
}
return true;
}
@@ -1421,10 +1419,6 @@ size_t HInliner::CountRecursiveCallsOf(ArtMethod* method) const {
static inline bool MayInline(const CompilerOptions& compiler_options,
const DexFile& inlined_from,
const DexFile& inlined_into) {
- if (kIsTargetBuild) {
- return true;
- }
-
// We're not allowed to inline across dex files if we're the no-inline-from dex file.
if (!IsSameDexFile(inlined_from, inlined_into) &&
ContainsElement(compiler_options.GetNoInlineFromDexFile(), &inlined_from)) {
@@ -1506,7 +1500,7 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
return false;
}
- if (IsMethodUnverified(compiler_driver_, method)) {
+ if (IsMethodUnverified(codegen_->GetCompilerOptions(), method)) {
LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedNotVerified)
<< "Method " << method->PrettyMethod()
<< " couldn't be verified, so it cannot be inlined";
@@ -1631,7 +1625,8 @@ bool HInliner::TryPatternSubstitution(HInvoke* invoke_instruction,
[](uint16_t index) { return index != DexFile::kDexNoIndex16; }));
// Create HInstanceFieldSet for each IPUT that stores non-zero data.
- HInstruction* obj = GetInvokeInputForArgVRegIndex(invoke_instruction, /* this */ 0u);
+ HInstruction* obj = GetInvokeInputForArgVRegIndex(invoke_instruction,
+ /* arg_vreg_index= */ 0u);
bool needs_constructor_barrier = false;
for (size_t i = 0; i != number_of_iputs; ++i) {
HInstruction* value = GetInvokeInputForArgVRegIndex(invoke_instruction, iput_args[i]);
@@ -1649,7 +1644,7 @@ bool HInliner::TryPatternSubstitution(HInvoke* invoke_instruction,
}
}
if (needs_constructor_barrier) {
- // See CompilerDriver::RequiresConstructorBarrier for more details.
+ // See DexCompilationUnit::RequiresConstructorBarrier for more details.
DCHECK(obj != nullptr) << "only non-static methods can have a constructor fence";
HConstructorFence* constructor_fence =
@@ -1673,7 +1668,7 @@ HInstanceFieldGet* HInliner::CreateInstanceFieldGet(uint32_t field_index,
REQUIRES_SHARED(Locks::mutator_lock_) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ArtField* resolved_field =
- class_linker->LookupResolvedField(field_index, referrer, /* is_static */ false);
+ class_linker->LookupResolvedField(field_index, referrer, /* is_static= */ false);
DCHECK(resolved_field != nullptr);
HInstanceFieldGet* iget = new (graph_->GetAllocator()) HInstanceFieldGet(
obj,
@@ -1686,7 +1681,7 @@ HInstanceFieldGet* HInliner::CreateInstanceFieldGet(uint32_t field_index,
*referrer->GetDexFile(),
// Read barrier generates a runtime call in slow path and we need a valid
// dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537.
- /* dex_pc */ 0);
+ /* dex_pc= */ 0);
if (iget->GetType() == DataType::Type::kReference) {
// Use the same dex_cache that we used for field lookup as the hint_dex_cache.
Handle<mirror::DexCache> dex_cache = handles_->NewHandle(referrer->GetDexCache());
@@ -1694,7 +1689,7 @@ HInstanceFieldGet* HInliner::CreateInstanceFieldGet(uint32_t field_index,
outer_compilation_unit_.GetClassLoader(),
dex_cache,
handles_,
- /* is_first_run */ false);
+ /* is_first_run= */ false);
rtp.Visit(iget);
}
return iget;
@@ -1708,7 +1703,7 @@ HInstanceFieldSet* HInliner::CreateInstanceFieldSet(uint32_t field_index,
REQUIRES_SHARED(Locks::mutator_lock_) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ArtField* resolved_field =
- class_linker->LookupResolvedField(field_index, referrer, /* is_static */ false);
+ class_linker->LookupResolvedField(field_index, referrer, /* is_static= */ false);
DCHECK(resolved_field != nullptr);
if (is_final != nullptr) {
// This information is needed only for constructors.
@@ -1727,7 +1722,7 @@ HInstanceFieldSet* HInliner::CreateInstanceFieldSet(uint32_t field_index,
*referrer->GetDexFile(),
// Read barrier generates a runtime call in slow path and we need a valid
// dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537.
- /* dex_pc */ 0);
+ /* dex_pc= */ 0);
return iput;
}
@@ -1739,6 +1734,21 @@ static inline Handle<T> NewHandleIfDifferent(T* object,
return (object != hint.Get()) ? handles->NewHandle(object) : hint;
}
+static bool CanEncodeInlinedMethodInStackMap(const DexFile& caller_dex_file, ArtMethod* callee)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!Runtime::Current()->IsAotCompiler()) {
+ // JIT can always encode methods in stack maps.
+ return true;
+ }
+ if (IsSameDexFile(caller_dex_file, *callee->GetDexFile())) {
+ return true;
+ }
+ // TODO(ngeoffray): Support more AOT cases for inlining:
+ // - methods in multidex
+ // - methods in boot image for on-device non-PIC compilation.
+ return false;
+}
+
bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
ReferenceTypeInfo receiver_type,
@@ -1746,7 +1756,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
HInstruction** return_replacement) {
DCHECK(!(resolved_method->IsStatic() && receiver_type.IsValid()));
ScopedObjectAccess soa(Thread::Current());
- const DexFile::CodeItem* code_item = resolved_method->GetCodeItem();
+ const dex::CodeItem* code_item = resolved_method->GetCodeItem();
const DexFile& callee_dex_file = *resolved_method->GetDexFile();
uint32_t method_index = resolved_method->GetDexMethodIndex();
CodeItemDebugInfoAccessor code_item_accessor(resolved_method->DexInstructionDebugInfo());
@@ -1759,6 +1769,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
caller_compilation_unit_.GetClassLoader(),
handles_);
+ Handle<mirror::Class> compiling_class = handles_->NewHandle(resolved_method->GetDeclaringClass());
DexCompilationUnit dex_compilation_unit(
class_loader,
class_linker,
@@ -1767,8 +1778,9 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
resolved_method->GetDeclaringClass()->GetDexClassDefIndex(),
method_index,
resolved_method->GetAccessFlags(),
- /* verified_method */ nullptr,
- dex_cache);
+ /* verified_method= */ nullptr,
+ dex_cache,
+ compiling_class);
InvokeType invoke_type = invoke_instruction->GetInvokeType();
if (invoke_type == kInterface) {
@@ -1777,6 +1789,14 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
invoke_type = kVirtual;
}
+ bool caller_dead_reference_safe = graph_->IsDeadReferenceSafe();
+ const dex::ClassDef& callee_class = resolved_method->GetClassDef();
+ // MethodContainsRSensitiveAccess is currently slow, but HasDeadReferenceSafeAnnotation()
+ // is currently rarely true.
+ bool callee_dead_reference_safe =
+ annotations::HasDeadReferenceSafeAnnotation(callee_dex_file, callee_class)
+ && !annotations::MethodContainsRSensitiveAccess(callee_dex_file, callee_class, method_index);
+
const int32_t caller_instruction_counter = graph_->GetCurrentInstructionId();
HGraph* callee_graph = new (graph_->GetAllocator()) HGraph(
graph_->GetAllocator(),
@@ -1785,8 +1805,9 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
method_index,
codegen_->GetCompilerOptions().GetInstructionSet(),
invoke_type,
+ callee_dead_reference_safe,
graph_->IsDebuggable(),
- /* osr */ false,
+ /* osr= */ false,
caller_instruction_counter);
callee_graph->SetArtMethod(resolved_method);
@@ -1807,7 +1828,6 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
code_item_accessor,
&dex_compilation_unit,
&outer_compilation_unit_,
- compiler_driver_,
codegen_,
inline_stats_,
resolved_method->GetQuickenedInfo(),
@@ -1868,7 +1888,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
outer_compilation_unit_.GetClassLoader(),
dex_compilation_unit.GetDexCache(),
handles_,
- /* is_first_run */ false).Run();
+ /* is_first_run= */ false).Run();
}
RunOptimizations(callee_graph, code_item, dex_compilation_unit);
@@ -2012,23 +2032,26 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
inline_stats_->AddTo(stats_);
}
+ if (caller_dead_reference_safe && !callee_dead_reference_safe) {
+ // Caller was dead reference safe, but is not anymore, since we inlined dead
+ // reference unsafe code. Prior transformations remain valid, since they did not
+ // affect the inlined code.
+ graph_->MarkDeadReferenceUnsafe();
+ }
+
return true;
}
void HInliner::RunOptimizations(HGraph* callee_graph,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
const DexCompilationUnit& dex_compilation_unit) {
// Note: if the outermost_graph_ is being compiled OSR, we should not run any
// optimization that could lead to a HDeoptimize. The following optimizations do not.
HDeadCodeElimination dce(callee_graph, inline_stats_, "dead_code_elimination$inliner");
HConstantFolding fold(callee_graph, "constant_folding$inliner");
- HSharpening sharpening(callee_graph, codegen_);
InstructionSimplifier simplify(callee_graph, codegen_, inline_stats_);
- IntrinsicsRecognizer intrinsics(callee_graph, inline_stats_);
HOptimization* optimizations[] = {
- &intrinsics,
- &sharpening,
&simplify,
&fold,
&dce,
@@ -2063,7 +2086,6 @@ void HInliner::RunOptimizations(HGraph* callee_graph,
codegen_,
outer_compilation_unit_,
dex_compilation_unit,
- compiler_driver_,
handles_,
inline_stats_,
total_number_of_dex_registers_ + accessor.RegistersSize(),
@@ -2097,7 +2119,7 @@ bool HInliner::ArgumentTypesMoreSpecific(HInvoke* invoke_instruction, ArtMethod*
// is more specific than the class which declares the method.
if (!resolved_method->IsStatic()) {
if (IsReferenceTypeRefinement(GetClassRTI(resolved_method->GetDeclaringClass()),
- /* declared_can_be_null */ false,
+ /* declared_can_be_null= */ false,
invoke_instruction->InputAt(0u))) {
return true;
}
@@ -2106,7 +2128,7 @@ bool HInliner::ArgumentTypesMoreSpecific(HInvoke* invoke_instruction, ArtMethod*
// Iterate over the list of parameter types and test whether any of the
// actual inputs has a more specific reference type than the type declared in
// the signature.
- const DexFile::TypeList* param_list = resolved_method->GetParameterTypeList();
+ const dex::TypeList* param_list = resolved_method->GetParameterTypeList();
for (size_t param_idx = 0,
input_idx = resolved_method->IsStatic() ? 0 : 1,
e = (param_list == nullptr ? 0 : param_list->Size());
@@ -2117,7 +2139,7 @@ bool HInliner::ArgumentTypesMoreSpecific(HInvoke* invoke_instruction, ArtMethod*
ObjPtr<mirror::Class> param_cls = resolved_method->LookupResolvedClassFromTypeIndex(
param_list->GetTypeItem(param_idx).type_idx_);
if (IsReferenceTypeRefinement(GetClassRTI(param_cls),
- /* declared_can_be_null */ true,
+ /* declared_can_be_null= */ true,
input)) {
return true;
}
@@ -2134,7 +2156,7 @@ bool HInliner::ReturnTypeMoreSpecific(HInvoke* invoke_instruction,
if (return_replacement->GetType() == DataType::Type::kReference) {
// Test if the return type is a refinement of the declared return type.
if (IsReferenceTypeRefinement(invoke_instruction->GetReferenceTypeInfo(),
- /* declared_can_be_null */ true,
+ /* declared_can_be_null= */ true,
return_replacement)) {
return true;
} else if (return_replacement->IsInstanceFieldGet()) {
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 6fd0c204b2..efd4c74079 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -38,7 +38,6 @@ class HInliner : public HOptimization {
CodeGenerator* codegen,
const DexCompilationUnit& outer_compilation_unit,
const DexCompilationUnit& caller_compilation_unit,
- CompilerDriver* compiler_driver,
VariableSizedHandleScope* handles,
OptimizingCompilerStats* stats,
size_t total_number_of_dex_registers,
@@ -51,7 +50,6 @@ class HInliner : public HOptimization {
outer_compilation_unit_(outer_compilation_unit),
caller_compilation_unit_(caller_compilation_unit),
codegen_(codegen),
- compiler_driver_(compiler_driver),
total_number_of_dex_registers_(total_number_of_dex_registers),
total_number_of_instructions_(total_number_of_instructions),
parent_(parent),
@@ -101,7 +99,7 @@ class HInliner : public HOptimization {
// Run simple optimizations on `callee_graph`.
void RunOptimizations(HGraph* callee_graph,
- const DexFile::CodeItem* code_item,
+ const dex::CodeItem* code_item,
const DexCompilationUnit& dex_compilation_unit)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -280,7 +278,6 @@ class HInliner : public HOptimization {
const DexCompilationUnit& outer_compilation_unit_;
const DexCompilationUnit& caller_compilation_unit_;
CodeGenerator* const codegen_;
- CompilerDriver* const compiler_driver_;
const size_t total_number_of_dex_registers_;
size_t total_number_of_instructions_;
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index e555d0d890..5e7b57523f 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -19,12 +19,13 @@
#include "art_method-inl.h"
#include "base/arena_bit_vector.h"
#include "base/bit_vector-inl.h"
+#include "base/logging.h"
#include "block_builder.h"
-#include "class_linker.h"
+#include "class_linker-inl.h"
+#include "code_generator.h"
#include "data_type-inl.h"
#include "dex/bytecode_utils.h"
#include "dex/dex_instruction-inl.h"
-#include "driver/compiler_driver-inl.h"
#include "driver/dex_compilation_unit.h"
#include "driver/compiler_options.h"
#include "imtable-inl.h"
@@ -47,7 +48,6 @@ HInstructionBuilder::HInstructionBuilder(HGraph* graph,
DataType::Type return_type,
const DexCompilationUnit* dex_compilation_unit,
const DexCompilationUnit* outer_compilation_unit,
- CompilerDriver* compiler_driver,
CodeGenerator* code_generator,
ArrayRef<const uint8_t> interpreter_metadata,
OptimizingCompilerStats* compiler_stats,
@@ -61,7 +61,6 @@ HInstructionBuilder::HInstructionBuilder(HGraph* graph,
return_type_(return_type),
block_builder_(block_builder),
ssa_builder_(ssa_builder),
- compiler_driver_(compiler_driver),
code_generator_(code_generator),
dex_compilation_unit_(dex_compilation_unit),
outer_compilation_unit_(outer_compilation_unit),
@@ -73,7 +72,8 @@ HInstructionBuilder::HInstructionBuilder(HGraph* graph,
current_locals_(nullptr),
latest_result_(nullptr),
current_this_parameter_(nullptr),
- loop_headers_(local_allocator->Adapter(kArenaAllocGraphBuilder)) {
+ loop_headers_(local_allocator->Adapter(kArenaAllocGraphBuilder)),
+ class_cache_(std::less<dex::TypeIndex>(), local_allocator->Adapter(kArenaAllocGraphBuilder)) {
loop_headers_.reserve(kDefaultNumberOfLoops);
}
@@ -319,8 +319,8 @@ bool HInstructionBuilder::Build() {
// Find locations where we want to generate extra stackmaps for native debugging.
// This allows us to generate the info only at interesting points (for example,
// at start of java statement) rather than before every dex instruction.
- const bool native_debuggable = compiler_driver_ != nullptr &&
- compiler_driver_->GetCompilerOptions().GetNativeDebuggable();
+ const bool native_debuggable = code_generator_ != nullptr &&
+ code_generator_->GetCompilerOptions().GetNativeDebuggable();
ArenaBitVector* native_debug_info_locations = nullptr;
if (native_debuggable) {
native_debug_info_locations = FindNativeDebugInfoLocations();
@@ -434,7 +434,7 @@ void HInstructionBuilder::BuildIntrinsic(ArtMethod* method) {
HInvokeStaticOrDirect::DispatchInfo dispatch_info = {
HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall,
HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
- /* method_load_data */ 0u
+ /* method_load_data= */ 0u
};
InvokeType invoke_type = dex_compilation_unit_->IsStatic() ? kStatic : kDirect;
HInvokeStaticOrDirect* invoke = new (allocator_) HInvokeStaticOrDirect(
@@ -449,7 +449,7 @@ void HInstructionBuilder::BuildIntrinsic(ArtMethod* method) {
target_method,
HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
RangeInstructionOperands operands(graph_->GetNumberOfVRegs() - in_vregs, in_vregs);
- HandleInvoke(invoke, operands, dex_file_->GetMethodShorty(method_idx), /* is_unresolved */ false);
+ HandleInvoke(invoke, operands, dex_file_->GetMethodShorty(method_idx), /* is_unresolved= */ false);
// Add the return instruction.
if (return_type_ == DataType::Type::kVoid) {
@@ -466,22 +466,17 @@ void HInstructionBuilder::BuildIntrinsic(ArtMethod* method) {
}
ArenaBitVector* HInstructionBuilder::FindNativeDebugInfoLocations() {
- // The callback gets called when the line number changes.
- // In other words, it marks the start of new java statement.
- struct Callback {
- static bool Position(void* ctx, const DexFile::PositionInfo& entry) {
- static_cast<ArenaBitVector*>(ctx)->SetBit(entry.address_);
- return false;
- }
- };
ArenaBitVector* locations = ArenaBitVector::Create(local_allocator_,
code_item_accessor_.InsnsSizeInCodeUnits(),
- /* expandable */ false,
+ /* expandable= */ false,
kArenaAllocGraphBuilder);
locations->ClearAllBits();
- dex_file_->DecodeDebugPositionInfo(code_item_accessor_.DebugInfoOffset(),
- Callback::Position,
- locations);
+ // The visitor gets called when the line number changes.
+ // In other words, it marks the start of new java statement.
+ code_item_accessor_.DecodeDebugPositionInfo([&](const DexFile::PositionInfo& entry) {
+ locations->SetBit(entry.address_);
+ return false;
+ });
// Instruction-specific tweaks.
for (const DexInstructionPcPair& inst : code_item_accessor_) {
switch (inst->Opcode()) {
@@ -564,7 +559,7 @@ void HInstructionBuilder::InitializeParameters() {
uint16_t locals_index = graph_->GetNumberOfLocalVRegs();
uint16_t parameter_index = 0;
- const DexFile::MethodId& referrer_method_id =
+ const dex::MethodId& referrer_method_id =
dex_file_->GetMethodId(dex_compilation_unit_->GetDexMethodIndex());
if (!dex_compilation_unit_->IsStatic()) {
// Add the implicit 'this' argument, not expressed in the signature.
@@ -572,7 +567,7 @@ void HInstructionBuilder::InitializeParameters() {
referrer_method_id.class_idx_,
parameter_index++,
DataType::Type::kReference,
- /* is_this */ true);
+ /* is_this= */ true);
AppendInstruction(parameter);
UpdateLocal(locals_index++, parameter);
number_of_parameters--;
@@ -581,15 +576,15 @@ void HInstructionBuilder::InitializeParameters() {
DCHECK(current_this_parameter_ == nullptr);
}
- const DexFile::ProtoId& proto = dex_file_->GetMethodPrototype(referrer_method_id);
- const DexFile::TypeList* arg_types = dex_file_->GetProtoParameters(proto);
+ const dex::ProtoId& proto = dex_file_->GetMethodPrototype(referrer_method_id);
+ const dex::TypeList* arg_types = dex_file_->GetProtoParameters(proto);
for (int i = 0, shorty_pos = 1; i < number_of_parameters; i++) {
HParameterValue* parameter = new (allocator_) HParameterValue(
*dex_file_,
arg_types->GetTypeItem(shorty_pos - 1).type_idx_,
parameter_index++,
DataType::FromShorty(shorty[shorty_pos]),
- /* is_this */ false);
+ /* is_this= */ false);
++shorty_pos;
AppendInstruction(parameter);
// Store the parameter value in the local that the dex code will use
@@ -714,20 +709,18 @@ void HInstructionBuilder::Binop_22b(const Instruction& instruction, bool reverse
// Does the method being compiled need any constructor barriers being inserted?
// (Always 'false' for methods that aren't <init>.)
-static bool RequiresConstructorBarrier(const DexCompilationUnit* cu, CompilerDriver* driver) {
+static bool RequiresConstructorBarrier(const DexCompilationUnit* cu) {
// Can be null in unit tests only.
if (UNLIKELY(cu == nullptr)) {
return false;
}
- Thread* self = Thread::Current();
- return cu->IsConstructor()
- && !cu->IsStatic()
- // RequiresConstructorBarrier must only be queried for <init> methods;
- // it's effectively "false" for every other method.
- //
- // See CompilerDriver::RequiresConstructBarrier for more explanation.
- && driver->RequiresConstructorBarrier(self, cu->GetDexFile(), cu->GetClassDefIndex());
+ // Constructor barriers are applicable only for <init> methods.
+ if (LIKELY(!cu->IsConstructor() || cu->IsStatic())) {
+ return false;
+ }
+
+ return cu->RequiresConstructorBarrier();
}
// Returns true if `block` has only one successor which starts at the next
@@ -773,7 +766,7 @@ void HInstructionBuilder::BuildReturn(const Instruction& instruction,
// Only <init> (which is a return-void) could possibly have a constructor fence.
// This may insert additional redundant constructor fences from the super constructors.
// TODO: remove redundant constructor fences (b/36656456).
- if (RequiresConstructorBarrier(dex_compilation_unit_, compiler_driver_)) {
+ if (RequiresConstructorBarrier(dex_compilation_unit_)) {
// Compiling instance constructor.
DCHECK_STREQ("<init>", graph_->GetMethodName());
@@ -787,7 +780,7 @@ void HInstructionBuilder::BuildReturn(const Instruction& instruction,
}
AppendInstruction(new (allocator_) HReturnVoid(dex_pc));
} else {
- DCHECK(!RequiresConstructorBarrier(dex_compilation_unit_, compiler_driver_));
+ DCHECK(!RequiresConstructorBarrier(dex_compilation_unit_));
HInstruction* value = LoadLocal(instruction.VRegA(), type);
AppendInstruction(new (allocator_) HReturn(value, dex_pc));
}
@@ -854,7 +847,7 @@ ArtMethod* HInstructionBuilder::ResolveMethod(uint16_t method_idx, InvokeType in
// make this an invoke-unresolved to handle cross-dex invokes or abstract super methods, both of
// which require runtime handling.
if (invoke_type == kSuper) {
- ObjPtr<mirror::Class> compiling_class = ResolveCompilingClass(soa);
+ ObjPtr<mirror::Class> compiling_class = dex_compilation_unit_->GetCompilingClass().Get();
if (compiling_class == nullptr) {
// We could not determine the method's class we need to wait until runtime.
DCHECK(Runtime::Current()->IsAotCompiler());
@@ -884,8 +877,8 @@ ArtMethod* HInstructionBuilder::ResolveMethod(uint16_t method_idx, InvokeType in
// The back-end code generator relies on this check in order to ensure that it will not
// attempt to read the dex_cache with a dex_method_index that is not from the correct
// dex_file. If we didn't do this check then the dex_method_index will not be updated in the
- // builder, which means that the code-generator (and compiler driver during sharpening and
- // inliner, maybe) might invoke an incorrect method.
+ // builder, which means that the code-generator (and sharpening and inliner, maybe)
+ // might invoke an incorrect method.
// TODO: The actual method could still be referenced in the current dex file, so we
// could try locating it.
// TODO: Remove the dex_file restriction.
@@ -933,7 +926,7 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
dex_pc,
method_idx,
invoke_type);
- return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ true);
+ return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ true);
}
// Replace calls to String.<init> with StringFactory.
@@ -952,10 +945,10 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
HInvoke* invoke = new (allocator_) HInvokeStaticOrDirect(
allocator_,
number_of_arguments - 1,
- DataType::Type::kReference /*return_type */,
+ /* return_type= */ DataType::Type::kReference,
dex_pc,
method_idx,
- nullptr /* resolved_method */,
+ /* resolved_method= */ nullptr,
dispatch_info,
invoke_type,
target_method,
@@ -974,7 +967,7 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
ScopedObjectAccess soa(Thread::Current());
if (invoke_type == kStatic) {
clinit_check =
- ProcessClinitCheckForInvoke(soa, dex_pc, resolved_method, &clinit_check_requirement);
+ ProcessClinitCheckForInvoke(dex_pc, resolved_method, &clinit_check_requirement);
} else if (invoke_type == kSuper) {
if (IsSameDexFile(*resolved_method->GetDexFile(), *dex_compilation_unit_->GetDexFile())) {
// Update the method index to the one resolved. Note that this may be a no-op if
@@ -983,11 +976,8 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
}
}
- HInvokeStaticOrDirect::DispatchInfo dispatch_info = {
- HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall,
- HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
- 0u
- };
+ HInvokeStaticOrDirect::DispatchInfo dispatch_info =
+ HSharpening::SharpenInvokeStaticOrDirect(resolved_method, code_generator_);
MethodReference target_method(resolved_method->GetDexFile(),
resolved_method->GetDexMethodIndex());
invoke = new (allocator_) HInvokeStaticOrDirect(allocator_,
@@ -1020,7 +1010,7 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
resolved_method,
ImTable::GetImtIndex(resolved_method));
}
- return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ false, clinit_check);
+ return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ false, clinit_check);
}
bool HInstructionBuilder::BuildInvokePolymorphic(uint32_t dex_pc,
@@ -1036,7 +1026,7 @@ bool HInstructionBuilder::BuildInvokePolymorphic(uint32_t dex_pc,
return_type,
dex_pc,
method_idx);
- return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ false);
+ return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ false);
}
@@ -1052,7 +1042,7 @@ bool HInstructionBuilder::BuildInvokeCustom(uint32_t dex_pc,
call_site_idx,
return_type,
dex_pc);
- return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ false);
+ return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ false);
}
HNewInstance* HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, uint32_t dex_pc) {
@@ -1063,7 +1053,7 @@ HNewInstance* HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, u
HInstruction* cls = load_class;
Handle<mirror::Class> klass = load_class->GetClass();
- if (!IsInitialized(soa, klass)) {
+ if (!IsInitialized(klass)) {
cls = new (allocator_) HClinitCheck(load_class, dex_pc);
AppendInstruction(cls);
}
@@ -1292,7 +1282,7 @@ static bool HasTrivialInitialization(ObjPtr<mirror::Class> cls,
return true;
}
-bool HInstructionBuilder::IsInitialized(ScopedObjectAccess& soa, Handle<mirror::Class> cls) const {
+bool HInstructionBuilder::IsInitialized(Handle<mirror::Class> cls) const {
if (cls == nullptr) {
return false;
}
@@ -1307,37 +1297,33 @@ bool HInstructionBuilder::IsInitialized(ScopedObjectAccess& soa, Handle<mirror::
}
// Assume loaded only if klass is in the boot image. App classes cannot be assumed
// loaded because we don't even know what class loader will be used to load them.
- if (IsInBootImage(cls.Get(), compiler_driver_->GetCompilerOptions())) {
+ if (IsInBootImage(cls.Get(), code_generator_->GetCompilerOptions())) {
return true;
}
}
- // We can avoid the class initialization check for `cls` in static methods in the
- // very same class. Instance methods of the same class can run on an escaped instance
+ // We can avoid the class initialization check for `cls` in static methods and constructors
+ // in the very same class; invoking a static method involves a class initialization check
+ // and so does the instance allocation that must be executed before invoking a constructor.
+ // Other instance methods of the same class can run on an escaped instance
// of an erroneous class. Even a superclass may need to be checked as the subclass
// can be completely initialized while the superclass is initializing and the subclass
// remains initialized when the superclass initializer throws afterwards. b/62478025
// Note: The HClinitCheck+HInvokeStaticOrDirect merging can still apply.
- ObjPtr<mirror::Class> outermost_cls = ResolveOutermostCompilingClass(soa);
- bool is_static = (dex_compilation_unit_->GetAccessFlags() & kAccStatic) != 0u;
- if (is_static && outermost_cls == cls.Get()) {
+ auto is_static_method_or_constructor_of_cls = [cls](const DexCompilationUnit& compilation_unit)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return (compilation_unit.GetAccessFlags() & (kAccStatic | kAccConstructor)) != 0u &&
+ compilation_unit.GetCompilingClass().Get() == cls.Get();
+ };
+ if (is_static_method_or_constructor_of_cls(*outer_compilation_unit_) ||
+ // Check also the innermost method. Though excessive copies of ClinitCheck can be
+ // eliminated by GVN, that happens only after the decision whether to inline the
+ // graph or not and that may depend on the presence of the ClinitCheck.
+ // TODO: We should walk over the entire inlined method chain, but we don't pass that
+ // information to the builder.
+ is_static_method_or_constructor_of_cls(*dex_compilation_unit_)) {
return true;
}
- // Remember if the compiled class is a subclass of `cls`. By the time this is used
- // below the `outermost_cls` may be invalidated by calling ResolveCompilingClass().
- bool is_subclass = IsSubClass(outermost_cls, cls.Get());
- if (dex_compilation_unit_ != outer_compilation_unit_) {
- // Check also the innermost method. Though excessive copies of ClinitCheck can be
- // eliminated by GVN, that happens only after the decision whether to inline the
- // graph or not and that may depend on the presence of the ClinitCheck.
- // TODO: We should walk over the entire inlined method chain, but we don't pass that
- // information to the builder.
- ObjPtr<mirror::Class> innermost_cls = ResolveCompilingClass(soa);
- if (is_static && innermost_cls == cls.Get()) {
- return true;
- }
- is_subclass = is_subclass || IsSubClass(innermost_cls, cls.Get());
- }
// Otherwise, we may be able to avoid the check if `cls` is a superclass of a method being
// compiled here (anywhere in the inlining chain) as the `cls` must have started initializing
@@ -1358,7 +1344,12 @@ bool HInstructionBuilder::IsInitialized(ScopedObjectAccess& soa, Handle<mirror::
// TODO: We should walk over the entire inlined methods chain, but we don't pass that
// information to the builder. (We could also check if we're guaranteed a non-null instance
// of `cls` at this location but that's outside the scope of the instruction builder.)
- if (is_subclass && HasTrivialInitialization(cls.Get(), compiler_driver_->GetCompilerOptions())) {
+ bool is_subclass = IsSubClass(outer_compilation_unit_->GetCompilingClass().Get(), cls.Get());
+ if (dex_compilation_unit_ != outer_compilation_unit_) {
+ is_subclass = is_subclass ||
+ IsSubClass(dex_compilation_unit_->GetCompilingClass().Get(), cls.Get());
+ }
+ if (is_subclass && HasTrivialInitialization(cls.Get(), code_generator_->GetCompilerOptions())) {
return true;
}
@@ -1366,22 +1357,20 @@ bool HInstructionBuilder::IsInitialized(ScopedObjectAccess& soa, Handle<mirror::
}
HClinitCheck* HInstructionBuilder::ProcessClinitCheckForInvoke(
- ScopedObjectAccess& soa,
uint32_t dex_pc,
ArtMethod* resolved_method,
HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement) {
Handle<mirror::Class> klass = handles_->NewHandle(resolved_method->GetDeclaringClass());
HClinitCheck* clinit_check = nullptr;
- if (IsInitialized(soa, klass)) {
+ if (IsInitialized(klass)) {
*clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kNone;
} else {
- HLoadClass* cls = BuildLoadClass(soa,
- klass->GetDexTypeIndex(),
+ HLoadClass* cls = BuildLoadClass(klass->GetDexTypeIndex(),
klass->GetDexFile(),
klass,
dex_pc,
- /* needs_access_check */ false);
+ /* needs_access_check= */ false);
if (cls != nullptr) {
*clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit;
clinit_check = new (allocator_) HClinitCheck(cls, dex_pc);
@@ -1505,27 +1494,28 @@ bool HInstructionBuilder::HandleStringInit(HInvoke* invoke,
// to be visited once it is clear whether it has remaining uses.
if (arg_this->IsNewInstance()) {
ssa_builder_->AddUninitializedString(arg_this->AsNewInstance());
- // Walk over all vregs and replace any occurrence of `arg_this` with `invoke`.
- for (size_t vreg = 0, e = current_locals_->size(); vreg < e; ++vreg) {
- if ((*current_locals_)[vreg] == arg_this) {
- (*current_locals_)[vreg] = invoke;
- }
- }
} else {
DCHECK(arg_this->IsPhi());
// We can get a phi as input of a String.<init> if there is a loop between the
// allocation and the String.<init> call. As we don't know which other phis might alias
- // with `arg_this`, we keep a record of these phis and will analyze their inputs and
- // uses once the inputs and users are populated (in ssa_builder.cc).
- // Note: we only do this for phis, as it is a somewhat more expensive operation than
- // what we're doing above when the input is the `HNewInstance`.
- ssa_builder_->AddUninitializedStringPhi(arg_this->AsPhi(), invoke);
+ // with `arg_this`, we keep a record of those invocations so we can later replace
+ // the allocation with the invocation.
+ // Add the actual 'this' input so the analysis knows what is the allocation instruction.
+ // The input will be removed during the analysis.
+ invoke->AddInput(arg_this);
+ ssa_builder_->AddUninitializedStringPhi(invoke);
+ }
+ // Walk over all vregs and replace any occurrence of `arg_this` with `invoke`.
+ for (size_t vreg = 0, e = current_locals_->size(); vreg < e; ++vreg) {
+ if ((*current_locals_)[vreg] == arg_this) {
+ (*current_locals_)[vreg] = invoke;
+ }
}
return true;
}
static DataType::Type GetFieldAccessType(const DexFile& dex_file, uint16_t field_index) {
- const DexFile::FieldId& field_id = dex_file.GetFieldId(field_index);
+ const dex::FieldId& field_id = dex_file.GetFieldId(field_index);
const char* type = dex_file.GetFieldTypeDescriptor(field_id);
return DataType::FromShorty(type[0]);
}
@@ -1549,7 +1539,7 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio
}
ScopedObjectAccess soa(Thread::Current());
- ArtField* resolved_field = ResolveField(field_index, /* is_static */ false, is_put);
+ ArtField* resolved_field = ResolveField(field_index, /* is_static= */ false, is_put);
// Generate an explicit null check on the reference, unless the field access
// is unresolved. In that case, we rely on the runtime to perform various
@@ -1612,43 +1602,6 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio
return true;
}
-static ObjPtr<mirror::Class> ResolveClassFrom(ScopedObjectAccess& soa,
- CompilerDriver* driver,
- const DexCompilationUnit& compilation_unit)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- Handle<mirror::ClassLoader> class_loader = compilation_unit.GetClassLoader();
- Handle<mirror::DexCache> dex_cache = compilation_unit.GetDexCache();
-
- return driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, &compilation_unit);
-}
-
-ObjPtr<mirror::Class> HInstructionBuilder::ResolveOutermostCompilingClass(
- ScopedObjectAccess& soa) const {
- return ResolveClassFrom(soa, compiler_driver_, *outer_compilation_unit_);
-}
-
-ObjPtr<mirror::Class> HInstructionBuilder::ResolveCompilingClass(ScopedObjectAccess& soa) const {
- return ResolveClassFrom(soa, compiler_driver_, *dex_compilation_unit_);
-}
-
-bool HInstructionBuilder::IsOutermostCompilingClass(dex::TypeIndex type_index) const {
- ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<2> hs(soa.Self());
- Handle<mirror::DexCache> dex_cache = dex_compilation_unit_->GetDexCache();
- Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
- Handle<mirror::Class> cls(hs.NewHandle(compiler_driver_->ResolveClass(
- soa, dex_cache, class_loader, type_index, dex_compilation_unit_)));
- Handle<mirror::Class> outer_class(hs.NewHandle(ResolveOutermostCompilingClass(soa)));
-
- // GetOutermostCompilingClass returns null when the class is unresolved
- // (e.g. if it derives from an unresolved class). This is bogus knowing that
- // we are compiling it.
- // When this happens we cannot establish a direct relation between the current
- // class and the outer class, so we return false.
- // (Note that this is only used for optimizing invokes and field accesses)
- return (cls != nullptr) && (outer_class.Get() == cls.Get());
-}
-
void HInstructionBuilder::BuildUnresolvedStaticFieldAccess(const Instruction& instruction,
uint32_t dex_pc,
bool is_put,
@@ -1668,18 +1621,17 @@ void HInstructionBuilder::BuildUnresolvedStaticFieldAccess(const Instruction& in
ArtField* HInstructionBuilder::ResolveField(uint16_t field_idx, bool is_static, bool is_put) {
ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<2> hs(soa.Self());
ClassLinker* class_linker = dex_compilation_unit_->GetClassLinker();
Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
- Handle<mirror::Class> compiling_class(hs.NewHandle(ResolveCompilingClass(soa)));
ArtField* resolved_field = class_linker->ResolveField(field_idx,
dex_compilation_unit_->GetDexCache(),
class_loader,
is_static);
+ DCHECK_EQ(resolved_field == nullptr, soa.Self()->IsExceptionPending());
if (UNLIKELY(resolved_field == nullptr)) {
- // Clean up any exception left by type resolution.
+ // Clean up any exception left by field resolution.
soa.Self()->ClearException();
return nullptr;
}
@@ -1691,6 +1643,7 @@ ArtField* HInstructionBuilder::ResolveField(uint16_t field_idx, bool is_static,
}
// Check access.
+ Handle<mirror::Class> compiling_class = dex_compilation_unit_->GetCompilingClass();
if (compiling_class == nullptr) {
if (!resolved_field->IsPublic()) {
return nullptr;
@@ -1720,7 +1673,7 @@ void HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
uint16_t field_index = instruction.VRegB_21c();
ScopedObjectAccess soa(Thread::Current());
- ArtField* resolved_field = ResolveField(field_index, /* is_static */ true, is_put);
+ ArtField* resolved_field = ResolveField(field_index, /* is_static= */ true, is_put);
if (resolved_field == nullptr) {
MaybeRecordStat(compilation_stats_,
@@ -1733,12 +1686,11 @@ void HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
DataType::Type field_type = GetFieldAccessType(*dex_file_, field_index);
Handle<mirror::Class> klass = handles_->NewHandle(resolved_field->GetDeclaringClass());
- HLoadClass* constant = BuildLoadClass(soa,
- klass->GetDexTypeIndex(),
+ HLoadClass* constant = BuildLoadClass(klass->GetDexTypeIndex(),
klass->GetDexFile(),
klass,
dex_pc,
- /* needs_access_check */ false);
+ /* needs_access_check= */ false);
if (constant == nullptr) {
// The class cannot be referenced from this compiled code. Generate
@@ -1750,7 +1702,7 @@ void HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
}
HInstruction* cls = constant;
- if (!IsInitialized(soa, klass)) {
+ if (!IsInitialized(klass)) {
cls = new (allocator_) HClinitCheck(constant, dex_pc);
AppendInstruction(cls);
}
@@ -1849,15 +1801,27 @@ void HInstructionBuilder::BuildArrayAccess(const Instruction& instruction,
graph_->SetHasBoundsChecks(true);
}
+HNewArray* HInstructionBuilder::BuildNewArray(uint32_t dex_pc,
+ dex::TypeIndex type_index,
+ HInstruction* length) {
+ HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
+
+ const char* descriptor = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(type_index));
+ DCHECK_EQ(descriptor[0], '[');
+ size_t component_type_shift = Primitive::ComponentSizeShift(Primitive::GetType(descriptor[1]));
+
+ HNewArray* new_array = new (allocator_) HNewArray(cls, length, dex_pc, component_type_shift);
+ AppendInstruction(new_array);
+ return new_array;
+}
+
HNewArray* HInstructionBuilder::BuildFilledNewArray(uint32_t dex_pc,
dex::TypeIndex type_index,
const InstructionOperands& operands) {
const size_t number_of_operands = operands.GetNumberOfOperands();
HInstruction* length = graph_->GetIntConstant(number_of_operands, dex_pc);
- HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
- HNewArray* const object = new (allocator_) HNewArray(cls, length, dex_pc);
- AppendInstruction(object);
+ HNewArray* new_array = BuildNewArray(dex_pc, type_index, length);
const char* descriptor = dex_file_->StringByTypeIdx(type_index);
DCHECK_EQ(descriptor[0], '[') << descriptor;
char primitive = descriptor[1];
@@ -1870,13 +1834,13 @@ HNewArray* HInstructionBuilder::BuildFilledNewArray(uint32_t dex_pc,
for (size_t i = 0; i < number_of_operands; ++i) {
HInstruction* value = LoadLocal(operands.GetOperand(i), type);
HInstruction* index = graph_->GetIntConstant(i, dex_pc);
- HArraySet* aset = new (allocator_) HArraySet(object, index, value, type, dex_pc);
+ HArraySet* aset = new (allocator_) HArraySet(new_array, index, value, type, dex_pc);
ssa_builder_->MaybeAddAmbiguousArraySet(aset);
AppendInstruction(aset);
}
- latest_result_ = object;
+ latest_result_ = new_array;
- return object;
+ return new_array;
}
template <typename T>
@@ -1979,12 +1943,11 @@ HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index, uint3
ScopedObjectAccess soa(Thread::Current());
const DexFile& dex_file = *dex_compilation_unit_->GetDexFile();
Handle<mirror::Class> klass = ResolveClass(soa, type_index);
- bool needs_access_check = LoadClassNeedsAccessCheck(soa, klass);
- return BuildLoadClass(soa, type_index, dex_file, klass, dex_pc, needs_access_check);
+ bool needs_access_check = LoadClassNeedsAccessCheck(klass);
+ return BuildLoadClass(type_index, dex_file, klass, dex_pc, needs_access_check);
}
-HLoadClass* HInstructionBuilder::BuildLoadClass(ScopedObjectAccess& soa,
- dex::TypeIndex type_index,
+HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index,
const DexFile& dex_file,
Handle<mirror::Class> klass,
uint32_t dex_pc,
@@ -2001,11 +1964,8 @@ HLoadClass* HInstructionBuilder::BuildLoadClass(ScopedObjectAccess& soa,
}
// Note: `klass` must be from `handles_`.
- bool is_referrers_class = false;
- if (klass != nullptr) {
- ObjPtr<mirror::Class> outermost_cls = ResolveOutermostCompilingClass(soa);
- is_referrers_class = (outermost_cls == klass.Get());
- }
+ bool is_referrers_class =
+ (klass != nullptr) && (outer_compilation_unit_->GetCompilingClass().Get() == klass.Get());
HLoadClass* load_class = new (allocator_) HLoadClass(
graph_->GetCurrentMethod(),
type_index,
@@ -2031,22 +1991,28 @@ HLoadClass* HInstructionBuilder::BuildLoadClass(ScopedObjectAccess& soa,
Handle<mirror::Class> HInstructionBuilder::ResolveClass(ScopedObjectAccess& soa,
dex::TypeIndex type_index) {
- Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
- ObjPtr<mirror::Class> klass = compiler_driver_->ResolveClass(
- soa, dex_compilation_unit_->GetDexCache(), class_loader, type_index, dex_compilation_unit_);
- // TODO: Avoid creating excessive handles if the method references the same class repeatedly.
- // (Use a map on the local_allocator_.)
- return handles_->NewHandle(klass);
+ auto it = class_cache_.find(type_index);
+ if (it != class_cache_.end()) {
+ return it->second;
+ }
+
+ ObjPtr<mirror::Class> klass = dex_compilation_unit_->GetClassLinker()->ResolveType(
+ type_index, dex_compilation_unit_->GetDexCache(), dex_compilation_unit_->GetClassLoader());
+ DCHECK_EQ(klass == nullptr, soa.Self()->IsExceptionPending());
+ soa.Self()->ClearException(); // Clean up the exception left by type resolution if any.
+
+ Handle<mirror::Class> h_klass = handles_->NewHandle(klass);
+ class_cache_.Put(type_index, h_klass);
+ return h_klass;
}
-bool HInstructionBuilder::LoadClassNeedsAccessCheck(ScopedObjectAccess& soa,
- Handle<mirror::Class> klass) {
+bool HInstructionBuilder::LoadClassNeedsAccessCheck(Handle<mirror::Class> klass) {
if (klass == nullptr) {
return true;
} else if (klass->IsPublic()) {
return false;
} else {
- ObjPtr<mirror::Class> compiling_class = ResolveCompilingClass(soa);
+ ObjPtr<mirror::Class> compiling_class = dex_compilation_unit_->GetCompilingClass().Get();
return compiling_class == nullptr || !compiling_class->CanAccess(klass.Get());
}
}
@@ -2075,7 +2041,7 @@ void HInstructionBuilder::BuildTypeCheck(const Instruction& instruction,
ScopedObjectAccess soa(Thread::Current());
const DexFile& dex_file = *dex_compilation_unit_->GetDexFile();
Handle<mirror::Class> klass = ResolveClass(soa, type_index);
- bool needs_access_check = LoadClassNeedsAccessCheck(soa, klass);
+ bool needs_access_check = LoadClassNeedsAccessCheck(klass);
TypeCheckKind check_kind = HSharpening::ComputeTypeCheckKind(
klass.Get(), code_generator_, needs_access_check);
@@ -2093,7 +2059,7 @@ void HInstructionBuilder::BuildTypeCheck(const Instruction& instruction,
bitstring_path_to_root = graph_->GetIntConstant(static_cast<int32_t>(path_to_root), dex_pc);
bitstring_mask = graph_->GetIntConstant(static_cast<int32_t>(mask), dex_pc);
} else {
- class_or_null = BuildLoadClass(soa, type_index, dex_file, klass, dex_pc, needs_access_check);
+ class_or_null = BuildLoadClass(type_index, dex_file, klass, dex_pc, needs_access_check);
}
DCHECK(class_or_null != nullptr);
@@ -2899,10 +2865,8 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
case Instruction::NEW_ARRAY: {
dex::TypeIndex type_index(instruction.VRegC_22c());
HInstruction* length = LoadLocal(instruction.VRegB_22c(), DataType::Type::kInt32);
- HLoadClass* cls = BuildLoadClass(type_index, dex_pc);
+ HNewArray* new_array = BuildNewArray(dex_pc, type_index, length);
- HNewArray* new_array = new (allocator_) HNewArray(cls, length, dex_pc);
- AppendInstruction(new_array);
UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction());
BuildConstructorFenceForAllocation(new_array);
break;
@@ -2982,7 +2946,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
case Instruction::IGET_CHAR_QUICK:
case Instruction::IGET_SHORT:
case Instruction::IGET_SHORT_QUICK: {
- if (!BuildInstanceFieldAccess(instruction, dex_pc, /* is_put */ false, quicken_index)) {
+ if (!BuildInstanceFieldAccess(instruction, dex_pc, /* is_put= */ false, quicken_index)) {
return false;
}
break;
@@ -3002,7 +2966,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
case Instruction::IPUT_CHAR_QUICK:
case Instruction::IPUT_SHORT:
case Instruction::IPUT_SHORT_QUICK: {
- if (!BuildInstanceFieldAccess(instruction, dex_pc, /* is_put */ true, quicken_index)) {
+ if (!BuildInstanceFieldAccess(instruction, dex_pc, /* is_put= */ true, quicken_index)) {
return false;
}
break;
@@ -3015,7 +2979,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
case Instruction::SGET_BYTE:
case Instruction::SGET_CHAR:
case Instruction::SGET_SHORT: {
- BuildStaticFieldAccess(instruction, dex_pc, /* is_put */ false);
+ BuildStaticFieldAccess(instruction, dex_pc, /* is_put= */ false);
break;
}
@@ -3026,7 +2990,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
case Instruction::SPUT_BYTE:
case Instruction::SPUT_CHAR:
case Instruction::SPUT_SHORT: {
- BuildStaticFieldAccess(instruction, dex_pc, /* is_put */ true);
+ BuildStaticFieldAccess(instruction, dex_pc, /* is_put= */ true);
break;
}
@@ -3179,7 +3143,7 @@ ObjPtr<mirror::Class> HInstructionBuilder::LookupResolvedType(
ObjPtr<mirror::Class> HInstructionBuilder::LookupReferrerClass() const {
// TODO: Cache the result in a Handle<mirror::Class>.
- const DexFile::MethodId& method_id =
+ const dex::MethodId& method_id =
dex_compilation_unit_->GetDexFile()->GetMethodId(dex_compilation_unit_->GetDexMethodIndex());
return LookupResolvedType(method_id.class_idx_, *dex_compilation_unit_);
}
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index af1b86ca6f..d701445946 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -34,7 +34,6 @@ class ArenaBitVector;
class ArtField;
class ArtMethod;
class CodeGenerator;
-class CompilerDriver;
class DexCompilationUnit;
class HBasicBlockBuilder;
class Instruction;
@@ -59,7 +58,6 @@ class HInstructionBuilder : public ValueObject {
DataType::Type return_type,
const DexCompilationUnit* dex_compilation_unit,
const DexCompilationUnit* outer_compilation_unit,
- CompilerDriver* compiler_driver,
CodeGenerator* code_generator,
ArrayRef<const uint8_t> interpreter_metadata,
OptimizingCompilerStats* compiler_stats,
@@ -179,6 +177,9 @@ class HInstructionBuilder : public ValueObject {
uint32_t call_site_idx,
const InstructionOperands& operands);
+ // Builds a new array node.
+ HNewArray* BuildNewArray(uint32_t dex_pc, dex::TypeIndex type_index, HInstruction* length);
+
// Builds a new array node and the instructions that fill it.
HNewArray* BuildFilledNewArray(uint32_t dex_pc,
dex::TypeIndex type_index,
@@ -219,8 +220,7 @@ class HInstructionBuilder : public ValueObject {
// Builds a `HLoadClass` loading the given `type_index`.
HLoadClass* BuildLoadClass(dex::TypeIndex type_index, uint32_t dex_pc);
- HLoadClass* BuildLoadClass(ScopedObjectAccess& soa,
- dex::TypeIndex type_index,
+ HLoadClass* BuildLoadClass(dex::TypeIndex type_index,
const DexFile& dex_file,
Handle<mirror::Class> klass,
uint32_t dex_pc,
@@ -230,7 +230,7 @@ class HInstructionBuilder : public ValueObject {
Handle<mirror::Class> ResolveClass(ScopedObjectAccess& soa, dex::TypeIndex type_index)
REQUIRES_SHARED(Locks::mutator_lock_);
- bool LoadClassNeedsAccessCheck(ScopedObjectAccess& soa, Handle<mirror::Class> klass)
+ bool LoadClassNeedsAccessCheck(Handle<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_);
// Builds a `HLoadMethodHandle` loading the given `method_handle_index`.
@@ -239,17 +239,6 @@ class HInstructionBuilder : public ValueObject {
// Builds a `HLoadMethodType` loading the given `proto_index`.
void BuildLoadMethodType(dex::ProtoIndex proto_index, uint32_t dex_pc);
- // Returns the outer-most compiling method's class.
- ObjPtr<mirror::Class> ResolveOutermostCompilingClass(ScopedObjectAccess& soa) const
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- // Returns the class whose method is being compiled.
- ObjPtr<mirror::Class> ResolveCompilingClass(ScopedObjectAccess& soa) const
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- // Returns whether `type_index` points to the outer-most compiling method's class.
- bool IsOutermostCompilingClass(dex::TypeIndex type_index) const;
-
void PotentiallySimplifyFakeString(uint16_t original_dex_register,
uint32_t dex_pc,
HInvoke* invoke);
@@ -272,7 +261,6 @@ class HInstructionBuilder : public ValueObject {
void HandleStringInitResult(HInvokeStaticOrDirect* invoke);
HClinitCheck* ProcessClinitCheckForInvoke(
- ScopedObjectAccess& soa,
uint32_t dex_pc,
ArtMethod* method,
HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement)
@@ -286,7 +274,7 @@ class HInstructionBuilder : public ValueObject {
void BuildConstructorFenceForAllocation(HInstruction* allocation);
// Return whether the compiler can assume `cls` is initialized.
- bool IsInitialized(ScopedObjectAccess& soa, Handle<mirror::Class> cls) const
+ bool IsInitialized(Handle<mirror::Class> cls) const
REQUIRES_SHARED(Locks::mutator_lock_);
// Try to resolve a method using the class linker. Return null if a method could
@@ -317,8 +305,6 @@ class HInstructionBuilder : public ValueObject {
HBasicBlockBuilder* const block_builder_;
SsaBuilder* const ssa_builder_;
- CompilerDriver* const compiler_driver_;
-
CodeGenerator* const code_generator_;
// The compilation unit of the current method being compiled. Note that
@@ -348,6 +334,10 @@ class HInstructionBuilder : public ValueObject {
ScopedArenaVector<HBasicBlock*> loop_headers_;
+ // Cached resolved types for the current compilation unit's DexFile.
+ // Handle<>s reference entries in the `handles_`.
+ ScopedArenaSafeMap<dex::TypeIndex, Handle<mirror::Class>> class_cache_;
+
static constexpr int kDefaultNumberOfLoops = 2;
DISALLOW_COPY_AND_ASSIGN(HInstructionBuilder);
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index bb96c211cb..a433d7ef73 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -372,7 +372,7 @@ void InstructionSimplifierVisitor::VisitShift(HBinaryOperation* instruction) {
// (as defined by shift semantics). This ensures other
// optimizations do not need to special case for such situations.
DCHECK_EQ(shift_amount->GetType(), DataType::Type::kInt32);
- instruction->ReplaceInput(GetGraph()->GetIntConstant(masked_cst), /* index */ 1);
+ instruction->ReplaceInput(GetGraph()->GetIntConstant(masked_cst), /* index= */ 1);
RecordSimplification();
return;
}
@@ -749,8 +749,8 @@ static HCondition* GetOppositeConditionSwapOps(ArenaAllocator* allocator, HInstr
return new (allocator) HBelowOrEqual(rhs, lhs);
default:
LOG(FATAL) << "Unknown ConditionType " << cond->GetKind();
+ UNREACHABLE();
}
- return nullptr;
}
static bool CmpHasBoolType(HInstruction* input, HInstruction* cmp) {
@@ -1181,8 +1181,7 @@ void InstructionSimplifierVisitor::VisitTypeConversion(HTypeConversion* instruct
HInstruction* input = instruction->GetInput();
DataType::Type input_type = input->GetType();
DataType::Type result_type = instruction->GetResultType();
- if (DataType::IsTypeConversionImplicit(input_type, result_type)) {
- // Remove the implicit conversion; this includes conversion to the same type.
+ if (instruction->IsImplicitConversion()) {
instruction->ReplaceWith(input);
instruction->GetBlock()->RemoveInstruction(instruction);
RecordSimplification();
@@ -1317,7 +1316,7 @@ void InstructionSimplifierVisitor::VisitAdd(HAdd* instruction) {
}
HNeg* neg = left_is_neg ? left->AsNeg() : right->AsNeg();
- if ((left_is_neg ^ right_is_neg) && neg->HasOnlyOneNonEnvironmentUse()) {
+ if (left_is_neg != right_is_neg && neg->HasOnlyOneNonEnvironmentUse()) {
// Replace code looking like
// NEG tmp, b
// ADD dst, a, tmp
@@ -2290,7 +2289,7 @@ void InstructionSimplifierVisitor::SimplifySystemArrayCopy(HInvoke* instruction)
// the invoke, as we would need to look it up in the current dex file, and it
// is unlikely that it exists. The most usual situation for such typed
// arraycopy methods is a direct pointer to the boot image.
- HSharpening::SharpenInvokeStaticOrDirect(invoke, codegen_);
+ invoke->SetDispatchInfo(HSharpening::SharpenInvokeStaticOrDirect(method, codegen_));
}
}
}
@@ -2362,17 +2361,17 @@ void InstructionSimplifierVisitor::SimplifyStringCharAt(HInvoke* invoke) {
ArenaAllocator* allocator = GetGraph()->GetAllocator();
// We treat String as an array to allow DCE and BCE to seamlessly work on strings,
// so create the HArrayLength, HBoundsCheck and HArrayGet.
- HArrayLength* length = new (allocator) HArrayLength(str, dex_pc, /* is_string_length */ true);
+ HArrayLength* length = new (allocator) HArrayLength(str, dex_pc, /* is_string_length= */ true);
invoke->GetBlock()->InsertInstructionBefore(length, invoke);
HBoundsCheck* bounds_check = new (allocator) HBoundsCheck(
- index, length, dex_pc, /* is_string_char_at */ true);
+ index, length, dex_pc, /* is_string_char_at= */ true);
invoke->GetBlock()->InsertInstructionBefore(bounds_check, invoke);
HArrayGet* array_get = new (allocator) HArrayGet(str,
bounds_check,
DataType::Type::kUint16,
SideEffects::None(), // Strings are immutable.
dex_pc,
- /* is_string_char_at */ true);
+ /* is_string_char_at= */ true);
invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, array_get);
bounds_check->CopyEnvironmentFrom(invoke->GetEnvironment());
GetGraph()->SetHasBoundsChecks(true);
@@ -2384,7 +2383,7 @@ void InstructionSimplifierVisitor::SimplifyStringIsEmptyOrLength(HInvoke* invoke
// We treat String as an array to allow DCE and BCE to seamlessly work on strings,
// so create the HArrayLength.
HArrayLength* length =
- new (GetGraph()->GetAllocator()) HArrayLength(str, dex_pc, /* is_string_length */ true);
+ new (GetGraph()->GetAllocator()) HArrayLength(str, dex_pc, /* is_string_length= */ true);
HInstruction* replacement;
if (invoke->GetIntrinsic() == Intrinsics::kStringIsEmpty) {
// For String.isEmpty(), create the `HEqual` representing the `length == 0`.
@@ -2535,28 +2534,28 @@ void InstructionSimplifierVisitor::VisitInvoke(HInvoke* instruction) {
SimplifySystemArrayCopy(instruction);
break;
case Intrinsics::kIntegerRotateRight:
- SimplifyRotate(instruction, /* is_left */ false, DataType::Type::kInt32);
+ SimplifyRotate(instruction, /* is_left= */ false, DataType::Type::kInt32);
break;
case Intrinsics::kLongRotateRight:
- SimplifyRotate(instruction, /* is_left */ false, DataType::Type::kInt64);
+ SimplifyRotate(instruction, /* is_left= */ false, DataType::Type::kInt64);
break;
case Intrinsics::kIntegerRotateLeft:
- SimplifyRotate(instruction, /* is_left */ true, DataType::Type::kInt32);
+ SimplifyRotate(instruction, /* is_left= */ true, DataType::Type::kInt32);
break;
case Intrinsics::kLongRotateLeft:
- SimplifyRotate(instruction, /* is_left */ true, DataType::Type::kInt64);
+ SimplifyRotate(instruction, /* is_left= */ true, DataType::Type::kInt64);
break;
case Intrinsics::kIntegerCompare:
- SimplifyCompare(instruction, /* is_signum */ false, DataType::Type::kInt32);
+ SimplifyCompare(instruction, /* is_signum= */ false, DataType::Type::kInt32);
break;
case Intrinsics::kLongCompare:
- SimplifyCompare(instruction, /* is_signum */ false, DataType::Type::kInt64);
+ SimplifyCompare(instruction, /* is_signum= */ false, DataType::Type::kInt64);
break;
case Intrinsics::kIntegerSignum:
- SimplifyCompare(instruction, /* is_signum */ true, DataType::Type::kInt32);
+ SimplifyCompare(instruction, /* is_signum= */ true, DataType::Type::kInt32);
break;
case Intrinsics::kLongSignum:
- SimplifyCompare(instruction, /* is_signum */ true, DataType::Type::kInt64);
+ SimplifyCompare(instruction, /* is_signum= */ true, DataType::Type::kInt64);
break;
case Intrinsics::kFloatIsNaN:
case Intrinsics::kDoubleIsNaN:
diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc
index 24fbb6cb4c..01e9cff6d8 100644
--- a/compiler/optimizing/instruction_simplifier_arm.cc
+++ b/compiler/optimizing/instruction_simplifier_arm.cc
@@ -43,11 +43,11 @@ class InstructionSimplifierArmVisitor : public HGraphVisitor {
bool TryMergeIntoUsersShifterOperand(HInstruction* instruction);
bool TryMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op, bool do_merge);
bool CanMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
- return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ false);
+ return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge= */ false);
}
bool MergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
DCHECK(CanMergeIntoShifterOperand(use, bitfield_op));
- return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ true);
+ return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge= */ true);
}
/**
@@ -202,6 +202,11 @@ void InstructionSimplifierArmVisitor::VisitArrayGet(HArrayGet* instruction) {
return;
}
+ // TODO: Support intermediate address for object arrays on arm.
+ if (type == DataType::Type::kReference) {
+ return;
+ }
+
if (type == DataType::Type::kInt64
|| type == DataType::Type::kFloat32
|| type == DataType::Type::kFloat64) {
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index b536cb4dc4..e23decbd71 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -45,11 +45,11 @@ class InstructionSimplifierArm64Visitor : public HGraphVisitor {
HInstruction* bitfield_op,
bool do_merge);
bool CanMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
- return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ false);
+ return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge= */ false);
}
bool MergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) {
DCHECK(CanMergeIntoShifterOperand(use, bitfield_op));
- return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ true);
+ return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge= */ true);
}
/**
diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc
index ccdcb3532d..0f30f662cd 100644
--- a/compiler/optimizing/instruction_simplifier_shared.cc
+++ b/compiler/optimizing/instruction_simplifier_shared.cc
@@ -245,11 +245,11 @@ bool TryExtractArrayAccessAddress(HInstruction* access,
return false;
}
if (kEmitCompilerReadBarrier &&
+ !kUseBakerReadBarrier &&
access->IsArrayGet() &&
access->GetType() == DataType::Type::kReference) {
- // For object arrays, the read barrier instrumentation requires
+ // For object arrays, the non-Baker read barrier instrumentation requires
// the original array pointer.
- // TODO: This can be relaxed for Baker CC.
return false;
}
diff --git a/compiler/optimizing/instruction_simplifier_x86.cc b/compiler/optimizing/instruction_simplifier_x86.cc
new file mode 100644
index 0000000000..2d8f94a85b
--- /dev/null
+++ b/compiler/optimizing/instruction_simplifier_x86.cc
@@ -0,0 +1,88 @@
+/* Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_simplifier_x86.h"
+#include "instruction_simplifier_x86_shared.h"
+#include "code_generator_x86.h"
+
+namespace art {
+
+namespace x86 {
+
+class InstructionSimplifierX86Visitor : public HGraphVisitor {
+ public:
+ InstructionSimplifierX86Visitor(HGraph* graph,
+ CodeGenerator* codegen,
+ OptimizingCompilerStats* stats)
+ : HGraphVisitor(graph),
+ codegen_(down_cast<CodeGeneratorX86*>(codegen)),
+ stats_(stats) {}
+
+ void RecordSimplification() {
+ MaybeRecordStat(stats_, MethodCompilationStat::kInstructionSimplificationsArch);
+ }
+
+ bool HasAVX2() {
+ return (codegen_->GetInstructionSetFeatures().HasAVX2());
+ }
+
+ void VisitBasicBlock(HBasicBlock* block) override {
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* instruction = it.Current();
+ if (instruction->IsInBlock()) {
+ instruction->Accept(this);
+ }
+ }
+ }
+
+ void VisitAnd(HAnd * instruction) override;
+ void VisitXor(HXor* instruction) override;
+
+ private:
+ CodeGeneratorX86* codegen_;
+ OptimizingCompilerStats* stats_;
+};
+
+
+void InstructionSimplifierX86Visitor::VisitAnd(HAnd* instruction) {
+ if (TryCombineAndNot(instruction)) {
+ RecordSimplification();
+ } else if (instruction->GetResultType() == DataType::Type::kInt32) {
+ if (TryGenerateResetLeastSetBit(instruction)) {
+ RecordSimplification();
+ }
+ }
+}
+
+void InstructionSimplifierX86Visitor::VisitXor(HXor* instruction) {
+ if (instruction->GetResultType() == DataType::Type::kInt32) {
+ if (TryGenerateMaskUptoLeastSetBit(instruction)) {
+ RecordSimplification();
+ }
+ }
+}
+
+bool InstructionSimplifierX86::Run() {
+ InstructionSimplifierX86Visitor visitor(graph_, codegen_, stats_);
+ if (visitor.HasAVX2()) {
+ visitor.VisitReversePostOrder();
+ return true;
+ }
+ return false;
+}
+
+} // namespace x86
+} // namespace art
+
diff --git a/compiler/optimizing/instruction_simplifier_x86.h b/compiler/optimizing/instruction_simplifier_x86.h
new file mode 100644
index 0000000000..6f10006db2
--- /dev/null
+++ b/compiler/optimizing/instruction_simplifier_x86.h
@@ -0,0 +1,44 @@
+/*Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_H_
+#define ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_H_
+
+#include "nodes.h"
+#include "optimization.h"
+
+namespace art {
+
+class CodeGenerator;
+namespace x86 {
+
+class InstructionSimplifierX86 : public HOptimization {
+ public:
+ InstructionSimplifierX86(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
+ : HOptimization(graph, kInstructionSimplifierX86PassName, stats),
+ codegen_(codegen) {}
+
+ static constexpr const char* kInstructionSimplifierX86PassName = "instruction_simplifier_x86";
+
+ bool Run() override;
+
+ private:
+ CodeGenerator* codegen_;
+};
+
+} // namespace x86
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_H_
diff --git a/compiler/optimizing/instruction_simplifier_x86_64.cc b/compiler/optimizing/instruction_simplifier_x86_64.cc
new file mode 100644
index 0000000000..56c6b414d7
--- /dev/null
+++ b/compiler/optimizing/instruction_simplifier_x86_64.cc
@@ -0,0 +1,82 @@
+/* Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_simplifier_x86_64.h"
+#include "instruction_simplifier_x86_shared.h"
+#include "code_generator_x86_64.h"
+
+namespace art {
+
+namespace x86_64 {
+
+class InstructionSimplifierX86_64Visitor : public HGraphVisitor {
+ public:
+ InstructionSimplifierX86_64Visitor(HGraph* graph,
+ CodeGenerator* codegen,
+ OptimizingCompilerStats* stats)
+ : HGraphVisitor(graph),
+ codegen_(down_cast<CodeGeneratorX86_64*>(codegen)),
+ stats_(stats) {}
+
+ void RecordSimplification() {
+ MaybeRecordStat(stats_, MethodCompilationStat::kInstructionSimplificationsArch);
+ }
+
+ bool HasAVX2() {
+ return codegen_->GetInstructionSetFeatures().HasAVX2();
+ }
+
+ void VisitBasicBlock(HBasicBlock* block) override {
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* instruction = it.Current();
+ if (instruction->IsInBlock()) {
+ instruction->Accept(this);
+ }
+ }
+ }
+
+ void VisitAnd(HAnd* instruction) override;
+ void VisitXor(HXor* instruction) override;
+
+ private:
+ CodeGeneratorX86_64* codegen_;
+ OptimizingCompilerStats* stats_;
+};
+
+void InstructionSimplifierX86_64Visitor::VisitAnd(HAnd* instruction) {
+ if (TryCombineAndNot(instruction)) {
+ RecordSimplification();
+ } else if (TryGenerateResetLeastSetBit(instruction)) {
+ RecordSimplification();
+ }
+}
+
+
+void InstructionSimplifierX86_64Visitor::VisitXor(HXor* instruction) {
+ if (TryGenerateMaskUptoLeastSetBit(instruction)) {
+ RecordSimplification();
+ }
+}
+
+bool InstructionSimplifierX86_64::Run() {
+ InstructionSimplifierX86_64Visitor visitor(graph_, codegen_, stats_);
+ if (visitor.HasAVX2()) {
+ visitor.VisitReversePostOrder();
+ return true;
+ }
+ return false;
+}
+} // namespace x86_64
+} // namespace art
diff --git a/compiler/optimizing/instruction_simplifier_x86_64.h b/compiler/optimizing/instruction_simplifier_x86_64.h
new file mode 100644
index 0000000000..6cae24d11a
--- /dev/null
+++ b/compiler/optimizing/instruction_simplifier_x86_64.h
@@ -0,0 +1,48 @@
+/* Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_64_H_
+#define ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_64_H_
+
+#include "nodes.h"
+#include "optimization.h"
+
+namespace art {
+
+class CodeGenerator;
+
+namespace x86_64 {
+
+class InstructionSimplifierX86_64 : public HOptimization {
+ public:
+ InstructionSimplifierX86_64(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
+ : HOptimization(graph, kInstructionSimplifierX86_64PassName, stats),
+ codegen_(codegen) {}
+
+ static constexpr const char* kInstructionSimplifierX86_64PassName =
+ "instruction_simplifier_x86_64";
+
+ bool Run() override;
+
+ private:
+ CodeGenerator* codegen_;
+};
+
+} // namespace x86_64
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_64_H_
+
+
diff --git a/compiler/optimizing/instruction_simplifier_x86_shared.cc b/compiler/optimizing/instruction_simplifier_x86_shared.cc
new file mode 100644
index 0000000000..2805abb2bb
--- /dev/null
+++ b/compiler/optimizing/instruction_simplifier_x86_shared.cc
@@ -0,0 +1,137 @@
+/* Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_simplifier_x86_shared.h"
+#include "nodes_x86.h"
+
+namespace art {
+
+bool TryCombineAndNot(HAnd* instruction) {
+ DataType::Type type = instruction->GetType();
+ if (!DataType::IsIntOrLongType(type)) {
+ return false;
+ }
+ // Replace code looking like
+ // Not tmp, y
+ // And dst, x, tmp
+ // with
+ // AndNot dst, x, y
+ HInstruction* left = instruction->GetLeft();
+ HInstruction* right = instruction->GetRight();
+ // Perform simplication only when either left or right
+ // is Not. When both are Not, instruction should be simplified with
+ // DeMorgan's Laws.
+ if (left->IsNot() ^ right->IsNot()) {
+ bool left_is_not = left->IsNot();
+ HInstruction* other_ins = (left_is_not ? right : left);
+ HNot* not_ins = (left_is_not ? left : right)->AsNot();
+ // Only do the simplification if instruction has only one use
+ // and thus can be safely removed.
+ if (not_ins->HasOnlyOneNonEnvironmentUse()) {
+ ArenaAllocator* arena = instruction->GetBlock()->GetGraph()->GetAllocator();
+ HX86AndNot* and_not = new (arena) HX86AndNot(type,
+ not_ins->GetInput(),
+ other_ins,
+ instruction->GetDexPc());
+ instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, and_not);
+ DCHECK(!not_ins->HasUses());
+ not_ins->GetBlock()->RemoveInstruction(not_ins);
+ return true;
+ }
+ }
+ return false;
+}
+
+bool TryGenerateResetLeastSetBit(HAnd* instruction) {
+ DataType::Type type = instruction->GetType();
+ if (!DataType::IsIntOrLongType(type)) {
+ return false;
+ }
+ // Replace code looking like
+ // Add tmp, x, -1 or Sub tmp, x, 1
+ // And dest x, tmp
+ // with
+ // MaskOrResetLeastSetBit dest, x
+ HInstruction* candidate = nullptr;
+ HInstruction* other = nullptr;
+ HInstruction* left = instruction->GetLeft();
+ HInstruction* right = instruction->GetRight();
+ if (AreLeastSetBitInputs(left, right)) {
+ candidate = left;
+ other = right;
+ } else if (AreLeastSetBitInputs(right, left)) {
+ candidate = right;
+ other = left;
+ }
+ if (candidate != nullptr && candidate->HasOnlyOneNonEnvironmentUse()) {
+ ArenaAllocator* arena = instruction->GetBlock()->GetGraph()->GetAllocator();
+ HX86MaskOrResetLeastSetBit* lsb = new (arena) HX86MaskOrResetLeastSetBit(
+ type, HInstruction::kAnd, other, instruction->GetDexPc());
+ instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, lsb);
+ DCHECK(!candidate->HasUses());
+ candidate->GetBlock()->RemoveInstruction(candidate);
+ return true;
+ }
+ return false;
+}
+
+bool TryGenerateMaskUptoLeastSetBit(HXor* instruction) {
+ DataType::Type type = instruction->GetType();
+ if (!DataType::IsIntOrLongType(type)) {
+ return false;
+ }
+ // Replace code looking like
+ // Add tmp, x, -1 or Sub tmp, x, 1
+ // Xor dest x, tmp
+ // with
+ // MaskOrResetLeastSetBit dest, x
+ HInstruction* left = instruction->GetLeft();
+ HInstruction* right = instruction->GetRight();
+ HInstruction* other = nullptr;
+ HInstruction* candidate = nullptr;
+ if (AreLeastSetBitInputs(left, right)) {
+ candidate = left;
+ other = right;
+ } else if (AreLeastSetBitInputs(right, left)) {
+ candidate = right;
+ other = left;
+ }
+ if (candidate != nullptr && candidate->HasOnlyOneNonEnvironmentUse()) {
+ ArenaAllocator* arena = instruction->GetBlock()->GetGraph()->GetAllocator();
+ HX86MaskOrResetLeastSetBit* lsb = new (arena) HX86MaskOrResetLeastSetBit(
+ type, HInstruction::kXor, other, instruction->GetDexPc());
+ instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, lsb);
+ DCHECK(!candidate->HasUses());
+ candidate->GetBlock()->RemoveInstruction(candidate);
+ return true;
+ }
+ return false;
+}
+
+bool AreLeastSetBitInputs(HInstruction* to_test, HInstruction* other) {
+ if (to_test->IsAdd()) {
+ HAdd* add = to_test->AsAdd();
+ HConstant* cst = add->GetConstantRight();
+ return cst != nullptr && cst->IsMinusOne() && other == add->GetLeastConstantLeft();
+ }
+ if (to_test->IsSub()) {
+ HSub* sub = to_test->AsSub();
+ HConstant* cst = sub->GetConstantRight();
+ return cst != nullptr && cst->IsOne() && other == sub->GetLeastConstantLeft();
+ }
+ return false;
+}
+
+} // namespace art
diff --git a/compiler/optimizing/instruction_simplifier_x86_shared.h b/compiler/optimizing/instruction_simplifier_x86_shared.h
new file mode 100644
index 0000000000..7f94d7ea4c
--- /dev/null
+++ b/compiler/optimizing/instruction_simplifier_x86_shared.h
@@ -0,0 +1,29 @@
+/* Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_SHARED_H_
+#define ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_SHARED_H_
+
+#include "nodes.h"
+
+namespace art {
+bool TryCombineAndNot(HAnd* instruction);
+bool TryGenerateResetLeastSetBit(HAnd* instruction);
+bool TryGenerateMaskUptoLeastSetBit(HXor* instruction);
+bool AreLeastSetBitInputs(HInstruction* to_test, HInstruction* other);
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_SHARED_H_
+
diff --git a/compiler/optimizing/intrinsic_objects.cc b/compiler/optimizing/intrinsic_objects.cc
index 3c20ad698b..c345624a7a 100644
--- a/compiler/optimizing/intrinsic_objects.cc
+++ b/compiler/optimizing/intrinsic_objects.cc
@@ -21,6 +21,7 @@
#include "class_root.h"
#include "handle.h"
#include "obj_ptr-inl.h"
+#include "mirror/object_array-alloc-inl.h"
#include "mirror/object_array-inl.h"
namespace art {
@@ -29,7 +30,7 @@ static ObjPtr<mirror::ObjectArray<mirror::Object>> LookupIntegerCache(Thread* se
ClassLinker* class_linker)
REQUIRES_SHARED(Locks::mutator_lock_) {
ObjPtr<mirror::Class> integer_cache_class = class_linker->LookupClass(
- self, "Ljava/lang/Integer$IntegerCache;", /* class_linker */ nullptr);
+ self, "Ljava/lang/Integer$IntegerCache;", /* class_loader= */ nullptr);
if (integer_cache_class == nullptr || !integer_cache_class->IsInitialized()) {
return nullptr;
}
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 1407ea92cb..2de0f0c737 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -33,179 +33,6 @@
namespace art {
-// Check that intrinsic enum values fit within space set aside in ArtMethod modifier flags.
-#define CHECK_INTRINSICS_ENUM_VALUES(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- static_assert( \
- static_cast<uint32_t>(Intrinsics::k ## Name) <= (kAccIntrinsicBits >> CTZ(kAccIntrinsicBits)), \
- "Instrinsics enumeration space overflow.");
-#include "intrinsics_list.h"
- INTRINSICS_LIST(CHECK_INTRINSICS_ENUM_VALUES)
-#undef INTRINSICS_LIST
-#undef CHECK_INTRINSICS_ENUM_VALUES
-
-// Function that returns whether an intrinsic is static/direct or virtual.
-static inline InvokeType GetIntrinsicInvokeType(Intrinsics i) {
- switch (i) {
- case Intrinsics::kNone:
- return kInterface; // Non-sensical for intrinsic.
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- case Intrinsics::k ## Name: \
- return IsStatic;
-#include "intrinsics_list.h"
- INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
-#undef INTRINSICS_LIST
-#undef OPTIMIZING_INTRINSICS
- }
- return kInterface;
-}
-
-// Function that returns whether an intrinsic needs an environment or not.
-static inline IntrinsicNeedsEnvironmentOrCache NeedsEnvironmentOrCache(Intrinsics i) {
- switch (i) {
- case Intrinsics::kNone:
- return kNeedsEnvironmentOrCache; // Non-sensical for intrinsic.
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- case Intrinsics::k ## Name: \
- return NeedsEnvironmentOrCache;
-#include "intrinsics_list.h"
- INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
-#undef INTRINSICS_LIST
-#undef OPTIMIZING_INTRINSICS
- }
- return kNeedsEnvironmentOrCache;
-}
-
-// Function that returns whether an intrinsic has side effects.
-static inline IntrinsicSideEffects GetSideEffects(Intrinsics i) {
- switch (i) {
- case Intrinsics::kNone:
- return kAllSideEffects;
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- case Intrinsics::k ## Name: \
- return SideEffects;
-#include "intrinsics_list.h"
- INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
-#undef INTRINSICS_LIST
-#undef OPTIMIZING_INTRINSICS
- }
- return kAllSideEffects;
-}
-
-// Function that returns whether an intrinsic can throw exceptions.
-static inline IntrinsicExceptions GetExceptions(Intrinsics i) {
- switch (i) {
- case Intrinsics::kNone:
- return kCanThrow;
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
- case Intrinsics::k ## Name: \
- return Exceptions;
-#include "intrinsics_list.h"
- INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
-#undef INTRINSICS_LIST
-#undef OPTIMIZING_INTRINSICS
- }
- return kCanThrow;
-}
-
-static bool CheckInvokeType(Intrinsics intrinsic, HInvoke* invoke)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- // Whenever the intrinsic is marked as static, report an error if we find an InvokeVirtual.
- //
- // Whenever the intrinsic is marked as direct and we find an InvokeVirtual, a devirtualization
- // failure occured. We might be in a situation where we have inlined a method that calls an
- // intrinsic, but that method is in a different dex file on which we do not have a
- // verified_method that would have helped the compiler driver sharpen the call. In that case,
- // make sure that the intrinsic is actually for some final method (or in a final class), as
- // otherwise the intrinsics setup is broken.
- //
- // For the last direction, we have intrinsics for virtual functions that will perform a check
- // inline. If the precise type is known, however, the instruction will be sharpened to an
- // InvokeStaticOrDirect.
- InvokeType intrinsic_type = GetIntrinsicInvokeType(intrinsic);
- InvokeType invoke_type = invoke->GetInvokeType();
-
- switch (intrinsic_type) {
- case kStatic:
- return (invoke_type == kStatic);
-
- case kDirect:
- if (invoke_type == kDirect) {
- return true;
- }
- if (invoke_type == kVirtual) {
- ArtMethod* art_method = invoke->GetResolvedMethod();
- return (art_method->IsFinal() || art_method->GetDeclaringClass()->IsFinal());
- }
- return false;
-
- case kVirtual:
- // Call might be devirtualized.
- return (invoke_type == kVirtual || invoke_type == kDirect || invoke_type == kInterface);
-
- case kSuper:
- case kInterface:
- case kPolymorphic:
- case kCustom:
- return false;
- }
- LOG(FATAL) << "Unknown intrinsic invoke type: " << intrinsic_type;
- UNREACHABLE();
-}
-
-bool IntrinsicsRecognizer::Recognize(HInvoke* invoke,
- ArtMethod* art_method,
- /*out*/ bool* wrong_invoke_type) {
- if (art_method == nullptr) {
- art_method = invoke->GetResolvedMethod();
- }
- *wrong_invoke_type = false;
- if (art_method == nullptr || !art_method->IsIntrinsic()) {
- return false;
- }
-
- // TODO: b/65872996 The intent is that polymorphic signature methods should
- // be compiler intrinsics. At present, they are only interpreter intrinsics.
- if (art_method->IsPolymorphicSignature()) {
- return false;
- }
-
- Intrinsics intrinsic = static_cast<Intrinsics>(art_method->GetIntrinsic());
- if (CheckInvokeType(intrinsic, invoke) == false) {
- *wrong_invoke_type = true;
- return false;
- }
-
- invoke->SetIntrinsic(intrinsic,
- NeedsEnvironmentOrCache(intrinsic),
- GetSideEffects(intrinsic),
- GetExceptions(intrinsic));
- return true;
-}
-
-bool IntrinsicsRecognizer::Run() {
- bool didRecognize = false;
- ScopedObjectAccess soa(Thread::Current());
- for (HBasicBlock* block : graph_->GetReversePostOrder()) {
- for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done();
- inst_it.Advance()) {
- HInstruction* inst = inst_it.Current();
- if (inst->IsInvoke()) {
- bool wrong_invoke_type = false;
- if (Recognize(inst->AsInvoke(), /* art_method */ nullptr, &wrong_invoke_type)) {
- didRecognize = true;
- MaybeRecordStat(stats_, MethodCompilationStat::kIntrinsicRecognized);
- } else if (wrong_invoke_type) {
- LOG(WARNING)
- << "Found an intrinsic with unexpected invoke type: "
- << inst->AsInvoke()->GetResolvedMethod()->PrettyMethod() << " "
- << inst->DebugName();
- }
- }
- }
- }
- return didRecognize;
-}
-
std::ostream& operator<<(std::ostream& os, const Intrinsics& intrinsic) {
switch (intrinsic) {
case Intrinsics::kNone:
@@ -250,7 +77,7 @@ static ObjPtr<mirror::Class> LookupInitializedClass(Thread* self,
const char* descriptor)
REQUIRES_SHARED(Locks::mutator_lock_) {
ObjPtr<mirror::Class> klass =
- class_linker->LookupClass(self, descriptor, /* class_loader */ nullptr);
+ class_linker->LookupClass(self, descriptor, /* class_loader= */ nullptr);
DCHECK(klass != nullptr);
DCHECK(klass->IsInitialized());
return klass;
@@ -340,14 +167,14 @@ void IntrinsicVisitor::ComputeIntegerValueOfLocations(HInvoke* invoke,
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
ObjPtr<mirror::Class> cache_class = class_linker->LookupClass(
- self, kIntegerCacheDescriptor, /* class_loader */ nullptr);
+ self, kIntegerCacheDescriptor, /* class_loader= */ nullptr);
DCHECK(cache_class != nullptr);
if (UNLIKELY(!cache_class->IsInitialized())) {
LOG(WARNING) << "Image class " << cache_class->PrettyDescriptor() << " is uninitialized.";
return;
}
ObjPtr<mirror::Class> integer_class =
- class_linker->LookupClass(self, kIntegerDescriptor, /* class_loader */ nullptr);
+ class_linker->LookupClass(self, kIntegerDescriptor, /* class_loader= */ nullptr);
DCHECK(integer_class != nullptr);
if (UNLIKELY(!integer_class->IsInitialized())) {
LOG(WARNING) << "Image class " << integer_class->PrettyDescriptor() << " is uninitialized.";
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 7594d4a50b..ab68cce304 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -24,7 +24,6 @@
namespace art {
-class CompilerDriver;
class DexFile;
// Positive floating-point infinities.
@@ -34,28 +33,6 @@ static constexpr uint64_t kPositiveInfinityDouble = UINT64_C(0x7ff0000000000000)
static constexpr uint32_t kNanFloat = 0x7fc00000U;
static constexpr uint64_t kNanDouble = 0x7ff8000000000000;
-// Recognize intrinsics from HInvoke nodes.
-class IntrinsicsRecognizer : public HOptimization {
- public:
- IntrinsicsRecognizer(HGraph* graph,
- OptimizingCompilerStats* stats,
- const char* name = kIntrinsicsRecognizerPassName)
- : HOptimization(graph, name, stats) {}
-
- bool Run() override;
-
- // Static helper that recognizes intrinsic call. Returns true on success.
- // If it fails due to invoke type mismatch, wrong_invoke_type is set.
- // Useful to recognize intrinsics on individual calls outside this full pass.
- static bool Recognize(HInvoke* invoke, ArtMethod* method, /*out*/ bool* wrong_invoke_type)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- static constexpr const char* kIntrinsicsRecognizerPassName = "intrinsics_recognition";
-
- private:
- DISALLOW_COPY_AND_ASSIGN(IntrinsicsRecognizer);
-};
-
class IntrinsicVisitor : public ValueObject {
public:
virtual ~IntrinsicVisitor() {}
@@ -264,11 +241,15 @@ void IntrinsicCodeGenerator ## Arch::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNU
// Defines a list of unreached intrinsics: that is, method calls that are recognized as
// an intrinsic, and then always converted into HIR instructions before they reach any
-// architecture-specific intrinsics code generator.
+// architecture-specific intrinsics code generator. This only applies to non-baseline
+// compilation.
#define UNREACHABLE_INTRINSIC(Arch, Name) \
void IntrinsicLocationsBuilder ## Arch::Visit ## Name(HInvoke* invoke) { \
- LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \
- << " should have been converted to HIR"; \
+ if (Runtime::Current()->IsAotCompiler() && \
+ !codegen_->GetCompilerOptions().IsBaseline()) { \
+ LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \
+ << " should have been converted to HIR"; \
+ } \
} \
void IntrinsicCodeGenerator ## Arch::Visit ## Name(HInvoke* invoke) { \
LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index fcd278837f..ec5d17a443 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -272,10 +272,10 @@ void IntrinsicLocationsBuilderARM64::VisitDoubleLongBitsToDouble(HInvoke* invoke
}
void IntrinsicCodeGeneratorARM64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetVIXLAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetVIXLAssembler());
}
void IntrinsicCodeGeneratorARM64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetVIXLAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetVIXLAssembler());
}
void IntrinsicLocationsBuilderARM64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
@@ -286,10 +286,10 @@ void IntrinsicLocationsBuilderARM64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorARM64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetVIXLAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetVIXLAssembler());
}
void IntrinsicCodeGeneratorARM64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetVIXLAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetVIXLAssembler());
}
static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -618,7 +618,7 @@ void IntrinsicLocationsBuilderARM64::VisitMathRoundDouble(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorARM64::VisitMathRoundDouble(HInvoke* invoke) {
- GenMathRound(invoke, /* is_double */ true, GetVIXLAssembler());
+ GenMathRound(invoke, /* is_double= */ true, GetVIXLAssembler());
}
void IntrinsicLocationsBuilderARM64::VisitMathRoundFloat(HInvoke* invoke) {
@@ -626,7 +626,7 @@ void IntrinsicLocationsBuilderARM64::VisitMathRoundFloat(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorARM64::VisitMathRoundFloat(HInvoke* invoke) {
- GenMathRound(invoke, /* is_double */ false, GetVIXLAssembler());
+ GenMathRound(invoke, /* is_double= */ false, GetVIXLAssembler());
}
void IntrinsicLocationsBuilderARM64::VisitMemoryPeekByte(HInvoke* invoke) {
@@ -752,13 +752,13 @@ static void GenUnsafeGet(HInvoke* invoke,
trg_loc,
base,
MemOperand(temp.X()),
- /* needs_null_check */ false,
+ /* needs_null_check= */ false,
is_volatile);
} else {
// Other cases.
MemOperand mem_op(base.X(), offset);
if (is_volatile) {
- codegen->LoadAcquire(invoke, trg, mem_op, /* needs_null_check */ true);
+ codegen->LoadAcquire(invoke, trg, mem_op, /* needs_null_check= */ true);
} else {
codegen->Load(type, trg, mem_op);
}
@@ -813,22 +813,22 @@ void IntrinsicLocationsBuilderARM64::VisitUnsafeGetObjectVolatile(HInvoke* invok
}
void IntrinsicCodeGeneratorARM64::VisitUnsafeGet(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafeGetVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafeGetLong(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafeGetObject(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_);
}
static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -896,7 +896,7 @@ static void GenUnsafePut(HInvoke* invoke,
}
if (is_volatile || is_ordered) {
- codegen->StoreRelease(invoke, type, source, mem_op, /* needs_null_check */ false);
+ codegen->StoreRelease(invoke, type, source, mem_op, /* needs_null_check= */ false);
} else {
codegen->Store(type, source, mem_op);
}
@@ -911,64 +911,64 @@ static void GenUnsafePut(HInvoke* invoke,
void IntrinsicCodeGeneratorARM64::VisitUnsafePut(HInvoke* invoke) {
GenUnsafePut(invoke,
DataType::Type::kInt32,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutOrdered(HInvoke* invoke) {
GenUnsafePut(invoke,
DataType::Type::kInt32,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutVolatile(HInvoke* invoke) {
GenUnsafePut(invoke,
DataType::Type::kInt32,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutObject(HInvoke* invoke) {
GenUnsafePut(invoke,
DataType::Type::kReference,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
GenUnsafePut(invoke,
DataType::Type::kReference,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
GenUnsafePut(invoke,
DataType::Type::kReference,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutLong(HInvoke* invoke) {
GenUnsafePut(invoke,
DataType::Type::kInt64,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
GenUnsafePut(invoke,
DataType::Type::kInt64,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
GenUnsafePut(invoke,
DataType::Type::kInt64,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
codegen_);
}
@@ -1646,7 +1646,7 @@ void IntrinsicLocationsBuilderARM64::VisitStringIndexOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorARM64::VisitStringIndexOf(HInvoke* invoke) {
- GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero */ true);
+ GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero= */ true);
}
void IntrinsicLocationsBuilderARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1662,7 +1662,7 @@ void IntrinsicLocationsBuilderARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorARM64::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero */ false);
+ GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero= */ false);
}
void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -2464,8 +2464,8 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
src.W(),
class_offset,
temp3_loc,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
// Bail out if the source is not a non primitive array.
// /* HeapReference<Class> */ temp1 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
@@ -2473,8 +2473,8 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
temp1,
component_offset,
temp3_loc,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
__ Cbz(temp1, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp1` has been unpoisoned
// by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2490,8 +2490,8 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
dest.W(),
class_offset,
temp3_loc,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
// Bail out if the destination is not a non primitive array.
@@ -2507,8 +2507,8 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
temp1,
component_offset,
temp3_loc,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
__ Cbz(temp2, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp2` has been unpoisoned
// by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2526,8 +2526,8 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
src.W(),
class_offset,
temp3_loc,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
// Note: if heap poisoning is on, we are comparing two unpoisoned references here.
__ Cmp(temp1, temp2);
@@ -2540,8 +2540,8 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
temp1,
component_offset,
temp3_loc,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
// /* HeapReference<Class> */ temp1 = temp1->super_class_
// We do not need to emit a read barrier for the following
// heap reference load, as `temp1` is only used in a
@@ -2624,16 +2624,16 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
src.W(),
class_offset,
temp3_loc,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
// /* HeapReference<Class> */ temp2 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
temp2_loc,
temp1,
component_offset,
temp3_loc,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
+ /* needs_null_check= */ false,
+ /* use_load_acquire= */ false);
__ Cbz(temp2, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp2` has been unpoisoned
// by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2787,7 +2787,7 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
}
// We only need one card marking on the destination array.
- codegen_->MarkGCCard(dest.W(), Register(), /* value_can_be_null */ false);
+ codegen_->MarkGCCard(dest.W(), Register(), /* value_can_be_null= */ false);
__ Bind(intrinsic_slow_path->GetExitLabel());
}
@@ -2820,7 +2820,7 @@ void IntrinsicLocationsBuilderARM64::VisitFloatIsInfinite(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorARM64::VisitFloatIsInfinite(HInvoke* invoke) {
- GenIsInfinite(invoke->GetLocations(), /* is64bit */ false, GetVIXLAssembler());
+ GenIsInfinite(invoke->GetLocations(), /* is64bit= */ false, GetVIXLAssembler());
}
void IntrinsicLocationsBuilderARM64::VisitDoubleIsInfinite(HInvoke* invoke) {
@@ -2828,7 +2828,7 @@ void IntrinsicLocationsBuilderARM64::VisitDoubleIsInfinite(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorARM64::VisitDoubleIsInfinite(HInvoke* invoke) {
- GenIsInfinite(invoke->GetLocations(), /* is64bit */ true, GetVIXLAssembler());
+ GenIsInfinite(invoke->GetLocations(), /* is64bit= */ true, GetVIXLAssembler());
}
void IntrinsicLocationsBuilderARM64::VisitIntegerValueOf(HInvoke* invoke) {
@@ -2924,6 +2924,251 @@ void IntrinsicLocationsBuilderARM64::VisitReachabilityFence(HInvoke* invoke) {
void IntrinsicCodeGeneratorARM64::VisitReachabilityFence(HInvoke* invoke ATTRIBUTE_UNUSED) { }
+void IntrinsicLocationsBuilderARM64::VisitCRC32Update(HInvoke* invoke) {
+ if (!codegen_->GetInstructionSetFeatures().HasCRC()) {
+ return;
+ }
+
+ LocationSummary* locations = new (allocator_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+// Lower the invoke of CRC32.update(int crc, int b).
+void IntrinsicCodeGeneratorARM64::VisitCRC32Update(HInvoke* invoke) {
+ DCHECK(codegen_->GetInstructionSetFeatures().HasCRC());
+
+ MacroAssembler* masm = GetVIXLAssembler();
+
+ Register crc = InputRegisterAt(invoke, 0);
+ Register val = InputRegisterAt(invoke, 1);
+ Register out = OutputRegister(invoke);
+
+ // The general algorithm of the CRC32 calculation is:
+ // crc = ~crc
+ // result = crc32_for_byte(crc, b)
+ // crc = ~result
+ // It is directly lowered to three instructions.
+
+ UseScratchRegisterScope temps(masm);
+ Register tmp = temps.AcquireSameSizeAs(out);
+
+ __ Mvn(tmp, crc);
+ __ Crc32b(tmp, tmp, val);
+ __ Mvn(out, tmp);
+}
+
+// Generate code using CRC32 instructions which calculates
+// a CRC32 value of a byte.
+//
+// Parameters:
+// masm - VIXL macro assembler
+// crc - a register holding an initial CRC value
+// ptr - a register holding a memory address of bytes
+// length - a register holding a number of bytes to process
+// out - a register to put a result of calculation
+static void GenerateCodeForCalculationCRC32ValueOfBytes(MacroAssembler* masm,
+ const Register& crc,
+ const Register& ptr,
+ const Register& length,
+ const Register& out) {
+ // The algorithm of CRC32 of bytes is:
+ // crc = ~crc
+ // process a few first bytes to make the array 8-byte aligned
+ // while array has 8 bytes do:
+ // crc = crc32_of_8bytes(crc, 8_bytes(array))
+ // if array has 4 bytes:
+ // crc = crc32_of_4bytes(crc, 4_bytes(array))
+ // if array has 2 bytes:
+ // crc = crc32_of_2bytes(crc, 2_bytes(array))
+ // if array has a byte:
+ // crc = crc32_of_byte(crc, 1_byte(array))
+ // crc = ~crc
+
+ vixl::aarch64::Label loop, done;
+ vixl::aarch64::Label process_4bytes, process_2bytes, process_1byte;
+ vixl::aarch64::Label aligned2, aligned4, aligned8;
+
+ // Use VIXL scratch registers as the VIXL macro assembler won't use them in
+ // instructions below.
+ UseScratchRegisterScope temps(masm);
+ Register len = temps.AcquireW();
+ Register array_elem = temps.AcquireW();
+
+ __ Mvn(out, crc);
+ __ Mov(len, length);
+
+ __ Tbz(ptr, 0, &aligned2);
+ __ Subs(len, len, 1);
+ __ B(&done, lo);
+ __ Ldrb(array_elem, MemOperand(ptr, 1, PostIndex));
+ __ Crc32b(out, out, array_elem);
+
+ __ Bind(&aligned2);
+ __ Tbz(ptr, 1, &aligned4);
+ __ Subs(len, len, 2);
+ __ B(&process_1byte, lo);
+ __ Ldrh(array_elem, MemOperand(ptr, 2, PostIndex));
+ __ Crc32h(out, out, array_elem);
+
+ __ Bind(&aligned4);
+ __ Tbz(ptr, 2, &aligned8);
+ __ Subs(len, len, 4);
+ __ B(&process_2bytes, lo);
+ __ Ldr(array_elem, MemOperand(ptr, 4, PostIndex));
+ __ Crc32w(out, out, array_elem);
+
+ __ Bind(&aligned8);
+ __ Subs(len, len, 8);
+ // If len < 8 go to process data by 4 bytes, 2 bytes and a byte.
+ __ B(&process_4bytes, lo);
+
+ // The main loop processing data by 8 bytes.
+ __ Bind(&loop);
+ __ Ldr(array_elem.X(), MemOperand(ptr, 8, PostIndex));
+ __ Subs(len, len, 8);
+ __ Crc32x(out, out, array_elem.X());
+ // if len >= 8, process the next 8 bytes.
+ __ B(&loop, hs);
+
+ // Process the data which is less than 8 bytes.
+ // The code generated below works with values of len
+ // which come in the range [-8, 0].
+ // The first three bits are used to detect whether 4 bytes or 2 bytes or
+ // a byte can be processed.
+ // The checking order is from bit 2 to bit 0:
+ // bit 2 is set: at least 4 bytes available
+ // bit 1 is set: at least 2 bytes available
+ // bit 0 is set: at least a byte available
+ __ Bind(&process_4bytes);
+ // Goto process_2bytes if less than four bytes available
+ __ Tbz(len, 2, &process_2bytes);
+ __ Ldr(array_elem, MemOperand(ptr, 4, PostIndex));
+ __ Crc32w(out, out, array_elem);
+
+ __ Bind(&process_2bytes);
+ // Goto process_1bytes if less than two bytes available
+ __ Tbz(len, 1, &process_1byte);
+ __ Ldrh(array_elem, MemOperand(ptr, 2, PostIndex));
+ __ Crc32h(out, out, array_elem);
+
+ __ Bind(&process_1byte);
+ // Goto done if no bytes available
+ __ Tbz(len, 0, &done);
+ __ Ldrb(array_elem, MemOperand(ptr));
+ __ Crc32b(out, out, array_elem);
+
+ __ Bind(&done);
+ __ Mvn(out, out);
+}
+
+// The threshold for sizes of arrays to use the library provided implementation
+// of CRC32.updateBytes instead of the intrinsic.
+static constexpr int32_t kCRC32UpdateBytesThreshold = 64 * 1024;
+
+void IntrinsicLocationsBuilderARM64::VisitCRC32UpdateBytes(HInvoke* invoke) {
+ if (!codegen_->GetInstructionSetFeatures().HasCRC()) {
+ return;
+ }
+
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke,
+ LocationSummary::kCallOnSlowPath,
+ kIntrinsified);
+
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(2, Location::RegisterOrConstant(invoke->InputAt(2)));
+ locations->SetInAt(3, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+// Lower the invoke of CRC32.updateBytes(int crc, byte[] b, int off, int len)
+//
+// Note: The intrinsic is not used if len exceeds a threshold.
+void IntrinsicCodeGeneratorARM64::VisitCRC32UpdateBytes(HInvoke* invoke) {
+ DCHECK(codegen_->GetInstructionSetFeatures().HasCRC());
+
+ MacroAssembler* masm = GetVIXLAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ SlowPathCodeARM64* slow_path =
+ new (codegen_->GetScopedAllocator()) IntrinsicSlowPathARM64(invoke);
+ codegen_->AddSlowPath(slow_path);
+
+ Register length = WRegisterFrom(locations->InAt(3));
+ __ Cmp(length, kCRC32UpdateBytesThreshold);
+ __ B(slow_path->GetEntryLabel(), hi);
+
+ const uint32_t array_data_offset =
+ mirror::Array::DataOffset(Primitive::kPrimByte).Uint32Value();
+ Register ptr = XRegisterFrom(locations->GetTemp(0));
+ Register array = XRegisterFrom(locations->InAt(1));
+ Location offset = locations->InAt(2);
+ if (offset.IsConstant()) {
+ int32_t offset_value = offset.GetConstant()->AsIntConstant()->GetValue();
+ __ Add(ptr, array, array_data_offset + offset_value);
+ } else {
+ __ Add(ptr, array, array_data_offset);
+ __ Add(ptr, ptr, XRegisterFrom(offset));
+ }
+
+ Register crc = WRegisterFrom(locations->InAt(0));
+ Register out = WRegisterFrom(locations->Out());
+
+ GenerateCodeForCalculationCRC32ValueOfBytes(masm, crc, ptr, length, out);
+
+ __ Bind(slow_path->GetExitLabel());
+}
+
+void IntrinsicLocationsBuilderARM64::VisitCRC32UpdateByteBuffer(HInvoke* invoke) {
+ if (!codegen_->GetInstructionSetFeatures().HasCRC()) {
+ return;
+ }
+
+ LocationSummary* locations =
+ new (allocator_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(2, Location::RequiresRegister());
+ locations->SetInAt(3, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+// Lower the invoke of CRC32.updateByteBuffer(int crc, long addr, int off, int len)
+//
+// There is no need to generate code checking if addr is 0.
+// The method updateByteBuffer is a private method of java.util.zip.CRC32.
+// This guarantees no calls outside of the CRC32 class.
+// An address of DirectBuffer is always passed to the call of updateByteBuffer.
+// It might be an implementation of an empty DirectBuffer which can use a zero
+// address but it must have the length to be zero. The current generated code
+// correctly works with the zero length.
+void IntrinsicCodeGeneratorARM64::VisitCRC32UpdateByteBuffer(HInvoke* invoke) {
+ DCHECK(codegen_->GetInstructionSetFeatures().HasCRC());
+
+ MacroAssembler* masm = GetVIXLAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ Register addr = XRegisterFrom(locations->InAt(1));
+ Register ptr = XRegisterFrom(locations->GetTemp(0));
+ __ Add(ptr, addr, XRegisterFrom(locations->InAt(2)));
+
+ Register crc = WRegisterFrom(locations->InAt(0));
+ Register length = WRegisterFrom(locations->InAt(3));
+ Register out = WRegisterFrom(locations->Out());
+ GenerateCodeForCalculationCRC32ValueOfBytes(masm, crc, ptr, length, out);
+}
+
UNIMPLEMENTED_INTRINSIC(ARM64, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(ARM64, StringStringIndexOf);
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index f0a418454d..f0aa92e981 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -229,7 +229,7 @@ class ReadBarrierSystemArrayCopySlowPathARMVIXL : public SlowPathCodeARMVIXL {
assembler->MaybePoisonHeapReference(tmp);
__ Str(tmp, MemOperand(dst_curr_addr, element_size, PostIndex));
__ Cmp(src_curr_addr, src_stop_addr);
- __ B(ne, &loop, /* far_target */ false);
+ __ B(ne, &loop, /* is_far_target= */ false);
__ B(GetExitLabel());
}
@@ -298,10 +298,10 @@ void IntrinsicLocationsBuilderARMVIXL::VisitDoubleLongBitsToDouble(HInvoke* invo
}
void IntrinsicCodeGeneratorARMVIXL::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
void IntrinsicCodeGeneratorARMVIXL::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
void IntrinsicLocationsBuilderARMVIXL::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
@@ -312,10 +312,10 @@ void IntrinsicLocationsBuilderARMVIXL::VisitFloatIntBitsToFloat(HInvoke* invoke)
}
void IntrinsicCodeGeneratorARMVIXL::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
void IntrinsicCodeGeneratorARMVIXL::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -355,7 +355,7 @@ static void GenNumberOfLeadingZeros(HInvoke* invoke,
vixl32::Label end;
vixl32::Label* final_label = codegen->GetFinalLabel(invoke, &end);
__ Clz(out, in_reg_hi);
- __ CompareAndBranchIfNonZero(in_reg_hi, final_label, /* far_target */ false);
+ __ CompareAndBranchIfNonZero(in_reg_hi, final_label, /* is_far_target= */ false);
__ Clz(out, in_reg_lo);
__ Add(out, out, 32);
if (end.IsReferenced()) {
@@ -398,7 +398,7 @@ static void GenNumberOfTrailingZeros(HInvoke* invoke,
vixl32::Label* final_label = codegen->GetFinalLabel(invoke, &end);
__ Rbit(out, in_reg_lo);
__ Clz(out, out);
- __ CompareAndBranchIfNonZero(in_reg_lo, final_label, /* far_target */ false);
+ __ CompareAndBranchIfNonZero(in_reg_lo, final_label, /* is_far_target= */ false);
__ Rbit(out, in_reg_hi);
__ Clz(out, out);
__ Add(out, out, 32);
@@ -446,7 +446,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitMathRint(HInvoke* invoke) {
void IntrinsicCodeGeneratorARMVIXL::VisitMathRint(HInvoke* invoke) {
DCHECK(codegen_->GetInstructionSetFeatures().HasARMv8AInstructions());
ArmVIXLAssembler* assembler = GetAssembler();
- __ Vrintn(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
+ __ Vrintn(F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathRoundFloat(HInvoke* invoke) {
@@ -476,12 +476,12 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathRoundFloat(HInvoke* invoke) {
// For positive, zero or NaN inputs, rounding is done.
__ Cmp(out_reg, 0);
- __ B(ge, final_label, /* far_target */ false);
+ __ B(ge, final_label, /* is_far_target= */ false);
// Handle input < 0 cases.
// If input is negative but not a tie, previous result (round to nearest) is valid.
// If input is a negative tie, change rounding direction to positive infinity, out_reg += 1.
- __ Vrinta(F32, F32, temp1, in_reg);
+ __ Vrinta(F32, temp1, in_reg);
__ Vmov(temp2, 0.5);
__ Vsub(F32, temp1, in_reg, temp1);
__ Vcmp(F32, temp1, temp2);
@@ -642,7 +642,7 @@ static void GenUnsafeGet(HInvoke* invoke,
__ Add(RegisterFrom(temp), base, Operand(offset));
MemOperand src(RegisterFrom(temp), 0);
codegen->GenerateFieldLoadWithBakerReadBarrier(
- invoke, trg_loc, base, src, /* needs_null_check */ false);
+ invoke, trg_loc, base, src, /* needs_null_check= */ false);
if (is_volatile) {
__ Dmb(vixl32::ISH);
}
@@ -733,22 +733,22 @@ void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetObjectVolatile(HInvoke* inv
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGet(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetLong(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetObject(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_);
}
static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator,
@@ -778,39 +778,39 @@ static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator,
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePut(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- allocator_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kInt32, /* is_volatile= */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- allocator_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kInt32, /* is_volatile= */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- allocator_, features_, DataType::Type::kInt32, /* is_volatile */ true, invoke);
+ allocator_, features_, DataType::Type::kInt32, /* is_volatile= */ true, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObject(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- allocator_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kReference, /* is_volatile= */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- allocator_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kReference, /* is_volatile= */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- allocator_, features_, DataType::Type::kReference, /* is_volatile */ true, invoke);
+ allocator_, features_, DataType::Type::kReference, /* is_volatile= */ true, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLong(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- allocator_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kInt64, /* is_volatile= */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLongOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- allocator_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke);
+ allocator_, features_, DataType::Type::kInt64, /* is_volatile= */ false, invoke);
}
void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLongVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoid(
- allocator_, features_, DataType::Type::kInt64, /* is_volatile */ true, invoke);
+ allocator_, features_, DataType::Type::kInt64, /* is_volatile= */ true, invoke);
}
static void GenUnsafePut(LocationSummary* locations,
@@ -844,7 +844,7 @@ static void GenUnsafePut(LocationSummary* locations,
__ Ldrexd(temp_lo, temp_hi, MemOperand(temp_reg));
__ Strexd(temp_lo, value_lo, value_hi, MemOperand(temp_reg));
__ Cmp(temp_lo, 0);
- __ B(ne, &loop_head, /* far_target */ false);
+ __ B(ne, &loop_head, /* is_far_target= */ false);
} else {
__ Strd(value_lo, value_hi, MemOperand(base, offset));
}
@@ -875,64 +875,64 @@ static void GenUnsafePut(LocationSummary* locations,
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePut(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt32,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutOrdered(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt32,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutVolatile(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt32,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutObject(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kReference,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kReference,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kReference,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLong(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt64,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLongOrdered(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt64,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLongVolatile(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt64,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
codegen_);
}
@@ -1026,7 +1026,7 @@ class BakerReadBarrierCasSlowPathARMVIXL : public SlowPathCodeARMVIXL {
__ Strex(tmp, value, MemOperand(tmp_ptr));
assembler->MaybeUnpoisonHeapReference(value);
__ Cmp(tmp, 0);
- __ B(ne, &loop_head, /* far_target */ false);
+ __ B(ne, &loop_head, /* is_far_target= */ false);
__ B(GetExitLabel());
}
};
@@ -1092,7 +1092,8 @@ static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorARMVIXL* c
assembler->MaybeUnpoisonHeapReference(tmp);
}
__ Subs(tmp, tmp, expected);
- __ B(ne, failure, (failure == loop_exit) ? kNear : kBranchWithoutHint);
+ static_cast<vixl32::MacroAssembler*>(assembler->GetVIXLAssembler())->
+ B(ne, failure, /* hint= */ (failure == loop_exit) ? kNear : kBranchWithoutHint);
if (type == DataType::Type::kReference) {
assembler->MaybePoisonHeapReference(value);
}
@@ -1101,7 +1102,7 @@ static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorARMVIXL* c
assembler->MaybeUnpoisonHeapReference(value);
}
__ Cmp(tmp, 0);
- __ B(ne, &loop_head, /* far_target */ false);
+ __ B(ne, &loop_head, /* is_far_target= */ false);
__ Bind(loop_exit);
@@ -1112,7 +1113,7 @@ static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorARMVIXL* c
__ Lsr(out, out, WhichPowerOf2(out.GetSizeInBits()));
if (type == DataType::Type::kReference) {
- codegen->MaybeGenerateMarkingRegisterCheck(/* code */ 128);
+ codegen->MaybeGenerateMarkingRegisterCheck(/* code= */ 128);
}
}
@@ -1307,23 +1308,23 @@ static void GenerateStringCompareToLoop(ArmVIXLAssembler* assembler,
__ Ldr(temp_reg, MemOperand(str, temp1));
__ Ldr(temp2, MemOperand(arg, temp1));
__ Cmp(temp_reg, temp2);
- __ B(ne, &find_char_diff, /* far_target */ false);
+ __ B(ne, &find_char_diff, /* is_far_target= */ false);
__ Add(temp1, temp1, char_size * 2);
__ Ldr(temp_reg, MemOperand(str, temp1));
__ Ldr(temp2, MemOperand(arg, temp1));
__ Cmp(temp_reg, temp2);
- __ B(ne, &find_char_diff_2nd_cmp, /* far_target */ false);
+ __ B(ne, &find_char_diff_2nd_cmp, /* is_far_target= */ false);
__ Add(temp1, temp1, char_size * 2);
// With string compression, we have compared 8 bytes, otherwise 4 chars.
__ Subs(temp0, temp0, (mirror::kUseStringCompression ? 8 : 4));
- __ B(hi, &loop, /* far_target */ false);
+ __ B(hi, &loop, /* is_far_target= */ false);
__ B(end);
__ Bind(&find_char_diff_2nd_cmp);
if (mirror::kUseStringCompression) {
__ Subs(temp0, temp0, 4); // 4 bytes previously compared.
- __ B(ls, end, /* far_target */ false); // Was the second comparison fully beyond the end?
+ __ B(ls, end, /* is_far_target= */ false); // Was the second comparison fully beyond the end?
} else {
// Without string compression, we can start treating temp0 as signed
// and rely on the signed comparison below.
@@ -1351,7 +1352,7 @@ static void GenerateStringCompareToLoop(ArmVIXLAssembler* assembler,
// the remaining string data, so just return length diff (out).
// The comparison is unsigned for string compression, otherwise signed.
__ Cmp(temp0, Operand(temp1, vixl32::LSR, (mirror::kUseStringCompression ? 3 : 4)));
- __ B((mirror::kUseStringCompression ? ls : le), end, /* far_target */ false);
+ __ B((mirror::kUseStringCompression ? ls : le), end, /* is_far_target= */ false);
// Extract the characters and calculate the difference.
if (mirror::kUseStringCompression) {
@@ -1418,9 +1419,9 @@ static void GenerateStringCompareToLoop(ArmVIXLAssembler* assembler,
__ Ldrb(temp_reg, MemOperand(temp1, c_char_size, PostIndex));
__ Ldrh(temp3, MemOperand(temp2, char_size, PostIndex));
__ Cmp(temp_reg, temp3);
- __ B(ne, &different_compression_diff, /* far_target */ false);
+ __ B(ne, &different_compression_diff, /* is_far_target= */ false);
__ Subs(temp0, temp0, 2);
- __ B(hi, &different_compression_loop, /* far_target */ false);
+ __ B(hi, &different_compression_loop, /* is_far_target= */ false);
__ B(end);
// Calculate the difference.
@@ -1516,12 +1517,12 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringEquals(HInvoke* invoke) {
StringEqualsOptimizations optimizations(invoke);
if (!optimizations.GetArgumentNotNull()) {
// Check if input is null, return false if it is.
- __ CompareAndBranchIfZero(arg, &return_false, /* far_target */ false);
+ __ CompareAndBranchIfZero(arg, &return_false, /* is_far_target= */ false);
}
// Reference equality check, return true if same reference.
__ Cmp(str, arg);
- __ B(eq, &return_true, /* far_target */ false);
+ __ B(eq, &return_true, /* is_far_target= */ false);
if (!optimizations.GetArgumentIsString()) {
// Instanceof check for the argument by comparing class fields.
@@ -1539,7 +1540,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringEquals(HInvoke* invoke) {
// Also, because we use the previously loaded class references only in the
// following comparison, we don't need to unpoison them.
__ Cmp(temp, out);
- __ B(ne, &return_false, /* far_target */ false);
+ __ B(ne, &return_false, /* is_far_target= */ false);
}
// Check if one of the inputs is a const string. Do not special-case both strings
@@ -1562,7 +1563,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringEquals(HInvoke* invoke) {
// Also compares the compression style, if differs return false.
__ Ldr(temp, MemOperand(arg, count_offset));
__ Cmp(temp, Operand(mirror::String::GetFlaggedCount(const_string_length, is_compressed)));
- __ B(ne, &return_false, /* far_target */ false);
+ __ B(ne, &return_false, /* is_far_target= */ false);
} else {
// Load `count` fields of this and argument strings.
__ Ldr(temp, MemOperand(str, count_offset));
@@ -1570,7 +1571,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringEquals(HInvoke* invoke) {
// Check if `count` fields are equal, return false if they're not.
// Also compares the compression style, if differs return false.
__ Cmp(temp, out);
- __ B(ne, &return_false, /* far_target */ false);
+ __ B(ne, &return_false, /* is_far_target= */ false);
}
// Assertions that must hold in order to compare strings 4 bytes at a time.
@@ -1593,9 +1594,9 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringEquals(HInvoke* invoke) {
__ Ldrd(temp, temp1, MemOperand(str, offset));
__ Ldrd(temp2, out, MemOperand(arg, offset));
__ Cmp(temp, temp2);
- __ B(ne, &return_false, /* far_label */ false);
+ __ B(ne, &return_false, /* is_far_target= */ false);
__ Cmp(temp1, out);
- __ B(ne, &return_false, /* far_label */ false);
+ __ B(ne, &return_false, /* is_far_target= */ false);
offset += 2u * sizeof(uint32_t);
remaining_bytes -= 2u * sizeof(uint32_t);
}
@@ -1603,13 +1604,13 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringEquals(HInvoke* invoke) {
__ Ldr(temp, MemOperand(str, offset));
__ Ldr(out, MemOperand(arg, offset));
__ Cmp(temp, out);
- __ B(ne, &return_false, /* far_label */ false);
+ __ B(ne, &return_false, /* is_far_target= */ false);
}
} else {
// Return true if both strings are empty. Even with string compression `count == 0` means empty.
static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
"Expecting 0=compressed, 1=uncompressed");
- __ CompareAndBranchIfZero(temp, &return_true, /* far_target */ false);
+ __ CompareAndBranchIfZero(temp, &return_true, /* is_far_target= */ false);
if (mirror::kUseStringCompression) {
// For string compression, calculate the number of bytes to compare (not chars).
@@ -1635,10 +1636,10 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringEquals(HInvoke* invoke) {
__ Ldr(temp2, MemOperand(arg, temp1));
__ Add(temp1, temp1, Operand::From(sizeof(uint32_t)));
__ Cmp(out, temp2);
- __ B(ne, &return_false, /* far_target */ false);
+ __ B(ne, &return_false, /* is_far_target= */ false);
// With string compression, we have compared 4 bytes, otherwise 2 chars.
__ Subs(temp, temp, mirror::kUseStringCompression ? 4 : 2);
- __ B(hi, &loop, /* far_target */ false);
+ __ B(hi, &loop, /* is_far_target= */ false);
}
// Return true and exit the function.
@@ -1719,7 +1720,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorARMVIXL::VisitStringIndexOf(HInvoke* invoke) {
- GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
+ GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ true);
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) {
@@ -1735,7 +1736,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke)
}
void IntrinsicCodeGeneratorARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
+ GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ false);
}
void IntrinsicLocationsBuilderARMVIXL::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -1957,7 +1958,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
} else {
if (!optimizations.GetDestinationIsSource()) {
__ Cmp(src, dest);
- __ B(ne, &conditions_on_positions_validated, /* far_target */ false);
+ __ B(ne, &conditions_on_positions_validated, /* is_far_target= */ false);
}
__ Cmp(RegisterFrom(dest_pos), src_pos_constant);
__ B(gt, intrinsic_slow_path->GetEntryLabel());
@@ -1965,7 +1966,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
} else {
if (!optimizations.GetDestinationIsSource()) {
__ Cmp(src, dest);
- __ B(ne, &conditions_on_positions_validated, /* far_target */ false);
+ __ B(ne, &conditions_on_positions_validated, /* is_far_target= */ false);
}
if (dest_pos.IsConstant()) {
int32_t dest_pos_constant = Int32ConstantFrom(dest_pos);
@@ -2025,11 +2026,11 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
if (!optimizations.GetSourceIsNonPrimitiveArray()) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check */ false);
+ invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check= */ false);
// Bail out if the source is not a non primitive array.
// /* HeapReference<Class> */ temp1 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false);
+ invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check= */ false);
__ CompareAndBranchIfZero(temp1, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp1` has been unpoisoned
// by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2041,7 +2042,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
// /* HeapReference<Class> */ temp1 = dest->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, dest, class_offset, temp2_loc, /* needs_null_check */ false);
+ invoke, temp1_loc, dest, class_offset, temp2_loc, /* needs_null_check= */ false);
if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
// Bail out if the destination is not a non primitive array.
@@ -2053,7 +2054,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
// temporaries such a `temp1`.
// /* HeapReference<Class> */ temp2 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp2_loc, temp1, component_offset, temp3_loc, /* needs_null_check */ false);
+ invoke, temp2_loc, temp1, component_offset, temp3_loc, /* needs_null_check= */ false);
__ CompareAndBranchIfZero(temp2, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp2` has been unpoisoned
// by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2067,16 +2068,16 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
// read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below.
// /* HeapReference<Class> */ temp2 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp2_loc, src, class_offset, temp3_loc, /* needs_null_check */ false);
+ invoke, temp2_loc, src, class_offset, temp3_loc, /* needs_null_check= */ false);
// Note: if heap poisoning is on, we are comparing two unpoisoned references here.
__ Cmp(temp1, temp2);
if (optimizations.GetDestinationIsTypedObjectArray()) {
vixl32::Label do_copy;
- __ B(eq, &do_copy, /* far_target */ false);
+ __ B(eq, &do_copy, /* is_far_target= */ false);
// /* HeapReference<Class> */ temp1 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false);
+ invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check= */ false);
// /* HeapReference<Class> */ temp1 = temp1->super_class_
// We do not need to emit a read barrier for the following
// heap reference load, as `temp1` is only used in a
@@ -2133,7 +2134,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
if (optimizations.GetDestinationIsTypedObjectArray()) {
vixl32::Label do_copy;
- __ B(eq, &do_copy, /* far_target */ false);
+ __ B(eq, &do_copy, /* is_far_target= */ false);
if (!did_unpoison) {
assembler->MaybeUnpoisonHeapReference(temp1);
}
@@ -2155,10 +2156,10 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check */ false);
+ invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check= */ false);
// /* HeapReference<Class> */ temp3 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp3_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false);
+ invoke, temp3_loc, temp1, component_offset, temp2_loc, /* needs_null_check= */ false);
__ CompareAndBranchIfZero(temp3, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp3` has been unpoisoned
// by the the previous call to GenerateFieldLoadWithBakerReadBarrier.
@@ -2186,7 +2187,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
if (length.IsRegister()) {
// Don't enter the copy loop if the length is null.
- __ CompareAndBranchIfZero(RegisterFrom(length), &done, /* is_far_target */ false);
+ __ CompareAndBranchIfZero(RegisterFrom(length), &done, /* is_far_target= */ false);
}
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
@@ -2263,7 +2264,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
__ Str(temp_reg, MemOperand(temp2, element_size, PostIndex));
}
__ Cmp(temp1, temp3);
- __ B(ne, &loop, /* far_target */ false);
+ __ B(ne, &loop, /* is_far_target= */ false);
__ Bind(read_barrier_slow_path->GetExitLabel());
} else {
@@ -2285,13 +2286,13 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
__ Str(temp_reg, MemOperand(temp2, element_size, PostIndex));
}
__ Cmp(temp1, temp3);
- __ B(ne, &loop, /* far_target */ false);
+ __ B(ne, &loop, /* is_far_target= */ false);
}
__ Bind(&done);
}
// We only need one card marking on the destination array.
- codegen_->MarkGCCard(temp1, temp2, dest, NoReg, /* value_can_be_null */ false);
+ codegen_->MarkGCCard(temp1, temp2, dest, NoReg, /* can_be_null= */ false);
__ Bind(intrinsic_slow_path->GetExitLabel());
}
@@ -2821,7 +2822,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke)
__ Subs(num_chr, srcEnd, srcBegin);
// Early out for valid zero-length retrievals.
- __ B(eq, final_label, /* far_target */ false);
+ __ B(eq, final_label, /* is_far_target= */ false);
// src range to copy.
__ Add(src_ptr, srcObj, value_offset);
@@ -2837,7 +2838,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke)
__ Ldr(temp, MemOperand(srcObj, count_offset));
__ Tst(temp, 1);
temps.Release(temp);
- __ B(eq, &compressed_string_preloop, /* far_target */ false);
+ __ B(eq, &compressed_string_preloop, /* is_far_target= */ false);
}
__ Add(src_ptr, src_ptr, Operand(srcBegin, vixl32::LSL, 1));
@@ -2847,7 +2848,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke)
temp = temps.Acquire();
// Save repairing the value of num_chr on the < 4 character path.
__ Subs(temp, num_chr, 4);
- __ B(lt, &remainder, /* far_target */ false);
+ __ B(lt, &remainder, /* is_far_target= */ false);
// Keep the result of the earlier subs, we are going to fetch at least 4 characters.
__ Mov(num_chr, temp);
@@ -2862,10 +2863,10 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke)
__ Ldr(temp, MemOperand(src_ptr, char_size * 4, PostIndex));
__ Str(temp, MemOperand(dst_ptr, char_size * 4, PostIndex));
temps.Release(temp);
- __ B(ge, &loop, /* far_target */ false);
+ __ B(ge, &loop, /* is_far_target= */ false);
__ Adds(num_chr, num_chr, 4);
- __ B(eq, final_label, /* far_target */ false);
+ __ B(eq, final_label, /* is_far_target= */ false);
// Main loop for < 4 character case and remainder handling. Loads and stores one
// 16-bit Java character at a time.
@@ -2875,7 +2876,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke)
__ Subs(num_chr, num_chr, 1);
__ Strh(temp, MemOperand(dst_ptr, char_size, PostIndex));
temps.Release(temp);
- __ B(gt, &remainder, /* far_target */ false);
+ __ B(gt, &remainder, /* is_far_target= */ false);
if (mirror::kUseStringCompression) {
__ B(final_label);
@@ -2891,7 +2892,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke)
__ Strh(temp, MemOperand(dst_ptr, char_size, PostIndex));
temps.Release(temp);
__ Subs(num_chr, num_chr, 1);
- __ B(gt, &compressed_string_loop, /* far_target */ false);
+ __ B(gt, &compressed_string_loop, /* is_far_target= */ false);
}
if (done.IsReferenced()) {
@@ -2952,7 +2953,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitMathCeil(HInvoke* invoke) {
void IntrinsicCodeGeneratorARMVIXL::VisitMathCeil(HInvoke* invoke) {
ArmVIXLAssembler* assembler = GetAssembler();
DCHECK(codegen_->GetInstructionSetFeatures().HasARMv8AInstructions());
- __ Vrintp(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
+ __ Vrintp(F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
}
void IntrinsicLocationsBuilderARMVIXL::VisitMathFloor(HInvoke* invoke) {
@@ -2964,7 +2965,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitMathFloor(HInvoke* invoke) {
void IntrinsicCodeGeneratorARMVIXL::VisitMathFloor(HInvoke* invoke) {
ArmVIXLAssembler* assembler = GetAssembler();
DCHECK(codegen_->GetInstructionSetFeatures().HasARMv8AInstructions());
- __ Vrintm(F64, F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
+ __ Vrintm(F64, OutputDRegister(invoke), InputDRegisterAt(invoke, 0));
}
void IntrinsicLocationsBuilderARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
@@ -3011,7 +3012,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
__ Add(out, in, -info.low);
__ Cmp(out, info.length);
vixl32::Label allocate, done;
- __ B(hs, &allocate, /* is_far_target */ false);
+ __ B(hs, &allocate, /* is_far_target= */ false);
// If the value is within the bounds, load the j.l.Integer directly from the array.
codegen_->LoadBootImageAddress(temp, info.array_data_boot_image_reference);
codegen_->LoadFromShiftedRegOffset(DataType::Type::kReference, locations->Out(), temp, out);
@@ -3044,7 +3045,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitThreadInterrupted(HInvoke* invoke) {
vixl32::Register temp = temps.Acquire();
vixl32::Label done;
vixl32::Label* const final_label = codegen_->GetFinalLabel(invoke, &done);
- __ CompareAndBranchIfZero(out, final_label, /* far_target */ false);
+ __ CompareAndBranchIfZero(out, final_label, /* is_far_target= */ false);
__ Dmb(vixl32::ISH);
__ Mov(temp, 0);
assembler->StoreToOffset(kStoreWord, temp, tr, offset);
@@ -3066,6 +3067,9 @@ UNIMPLEMENTED_INTRINSIC(ARMVIXL, MathRoundDouble) // Could be done by changing
UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeCASLong) // High register pressure.
UNIMPLEMENTED_INTRINSIC(ARMVIXL, SystemArrayCopyChar)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, ReferenceGetReferent)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, CRC32Update)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, CRC32UpdateBytes)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, CRC32UpdateByteBuffer)
UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 2ca12b6533..3da0e578bf 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -185,7 +185,7 @@ void IntrinsicLocationsBuilderMIPS::VisitDoubleDoubleToRawLongBits(HInvoke* invo
}
void IntrinsicCodeGeneratorMIPS::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
// int java.lang.Float.floatToRawIntBits(float)
@@ -194,7 +194,7 @@ void IntrinsicLocationsBuilderMIPS::VisitFloatFloatToRawIntBits(HInvoke* invoke)
}
void IntrinsicCodeGeneratorMIPS::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -226,7 +226,7 @@ void IntrinsicLocationsBuilderMIPS::VisitDoubleLongBitsToDouble(HInvoke* invoke)
}
void IntrinsicCodeGeneratorMIPS::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
// float java.lang.Float.intBitsToFloat(int)
@@ -235,7 +235,7 @@ void IntrinsicLocationsBuilderMIPS::VisitFloatIntBitsToFloat(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
static void CreateIntToIntLocations(ArenaAllocator* allocator,
@@ -411,7 +411,7 @@ void IntrinsicCodeGeneratorMIPS::VisitIntegerReverseBytes(HInvoke* invoke) {
DataType::Type::kInt32,
IsR2OrNewer(),
IsR6(),
- /* reverseBits */ false,
+ /* reverseBits= */ false,
GetAssembler());
}
@@ -425,7 +425,7 @@ void IntrinsicCodeGeneratorMIPS::VisitLongReverseBytes(HInvoke* invoke) {
DataType::Type::kInt64,
IsR2OrNewer(),
IsR6(),
- /* reverseBits */ false,
+ /* reverseBits= */ false,
GetAssembler());
}
@@ -439,7 +439,7 @@ void IntrinsicCodeGeneratorMIPS::VisitShortReverseBytes(HInvoke* invoke) {
DataType::Type::kInt16,
IsR2OrNewer(),
IsR6(),
- /* reverseBits */ false,
+ /* reverseBits= */ false,
GetAssembler());
}
@@ -479,7 +479,7 @@ void IntrinsicLocationsBuilderMIPS::VisitIntegerNumberOfLeadingZeros(HInvoke* in
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ false, IsR6(), GetAssembler());
+ GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ false, IsR6(), GetAssembler());
}
// int java.lang.Long.numberOfLeadingZeros(long i)
@@ -488,7 +488,7 @@ void IntrinsicLocationsBuilderMIPS::VisitLongNumberOfLeadingZeros(HInvoke* invok
}
void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ true, IsR6(), GetAssembler());
+ GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ true, IsR6(), GetAssembler());
}
static void GenNumberOfTrailingZeroes(LocationSummary* locations,
@@ -566,7 +566,7 @@ void IntrinsicLocationsBuilderMIPS::VisitIntegerNumberOfTrailingZeros(HInvoke* i
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ false, IsR6(), GetAssembler());
+ GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ false, IsR6(), GetAssembler());
}
// int java.lang.Long.numberOfTrailingZeros(long i)
@@ -575,7 +575,7 @@ void IntrinsicLocationsBuilderMIPS::VisitLongNumberOfTrailingZeros(HInvoke* invo
}
void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ true, IsR6(), GetAssembler());
+ GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ true, IsR6(), GetAssembler());
}
// int java.lang.Integer.reverse(int)
@@ -588,7 +588,7 @@ void IntrinsicCodeGeneratorMIPS::VisitIntegerReverse(HInvoke* invoke) {
DataType::Type::kInt32,
IsR2OrNewer(),
IsR6(),
- /* reverseBits */ true,
+ /* reverseBits= */ true,
GetAssembler());
}
@@ -602,7 +602,7 @@ void IntrinsicCodeGeneratorMIPS::VisitLongReverse(HInvoke* invoke) {
DataType::Type::kInt64,
IsR2OrNewer(),
IsR6(),
- /* reverseBits */ true,
+ /* reverseBits= */ true,
GetAssembler());
}
@@ -1055,11 +1055,11 @@ static void GenUnsafeGet(HInvoke* invoke,
codegen->GenerateReferenceLoadWithBakerReadBarrier(invoke,
trg_loc,
base,
- /* offset */ 0U,
- /* index */ offset_loc,
+ /* offset= */ 0U,
+ /* index= */ offset_loc,
TIMES_1,
temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
if (is_volatile) {
__ Sync(0);
}
@@ -1077,8 +1077,8 @@ static void GenUnsafeGet(HInvoke* invoke,
trg_loc,
trg_loc,
base_loc,
- /* offset */ 0U,
- /* index */ offset_loc);
+ /* offset= */ 0U,
+ /* index= */ offset_loc);
}
} else {
if (is_R6) {
@@ -1107,7 +1107,7 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafeGet(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGet(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, IsR6(), codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, IsR6(), codegen_);
}
// int sun.misc.Unsafe.getIntVolatile(Object o, long offset)
@@ -1116,7 +1116,7 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, IsR6(), codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, IsR6(), codegen_);
}
// long sun.misc.Unsafe.getLong(Object o, long offset)
@@ -1125,7 +1125,7 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetLong(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetLong(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, IsR6(), codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, IsR6(), codegen_);
}
// Object sun.misc.Unsafe.getObject(Object o, long offset)
@@ -1134,7 +1134,7 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetObject(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObject(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, IsR6(), codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, IsR6(), codegen_);
}
// Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset)
@@ -1143,7 +1143,7 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke
}
void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, IsR6(), codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, IsR6(), codegen_);
}
static void CreateIntIntIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -1225,8 +1225,8 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePut(HInvoke* invoke) {
void IntrinsicCodeGeneratorMIPS::VisitUnsafePut(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt32,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
IsR6(),
codegen_);
}
@@ -1239,8 +1239,8 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutOrdered(HInvoke* invoke) {
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutOrdered(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt32,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
IsR6(),
codegen_);
}
@@ -1253,8 +1253,8 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutVolatile(HInvoke* invoke) {
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutVolatile(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt32,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
IsR6(),
codegen_);
}
@@ -1267,8 +1267,8 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObject(HInvoke* invoke) {
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObject(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kReference,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
IsR6(),
codegen_);
}
@@ -1281,8 +1281,8 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke)
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kReference,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
IsR6(),
codegen_);
}
@@ -1295,8 +1295,8 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kReference,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
IsR6(),
codegen_);
}
@@ -1309,8 +1309,8 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutLong(HInvoke* invoke) {
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLong(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt64,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
IsR6(),
codegen_);
}
@@ -1323,8 +1323,8 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) {
void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt64,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
IsR6(),
codegen_);
}
@@ -1388,12 +1388,12 @@ static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorMIPS* code
invoke,
out_loc, // Unused, used only as a "temporary" within the read barrier.
base,
- /* offset */ 0u,
- /* index */ offset_loc,
+ /* offset= */ 0u,
+ /* index= */ offset_loc,
ScaleFactor::TIMES_1,
temp,
- /* needs_null_check */ false,
- /* always_update_field */ true);
+ /* needs_null_check= */ false,
+ /* always_update_field= */ true);
}
}
@@ -1714,7 +1714,7 @@ void IntrinsicLocationsBuilderMIPS::VisitStringIndexOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS::VisitStringIndexOf(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, /* start_at_zero */ true, GetAssembler(), codegen_);
+ GenerateStringIndexOf(invoke, /* start_at_zero= */ true, GetAssembler(), codegen_);
}
// int java.lang.String.indexOf(int ch, int fromIndex)
@@ -1735,7 +1735,7 @@ void IntrinsicLocationsBuilderMIPS::VisitStringIndexOfAfter(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, /* start_at_zero */ false, GetAssembler(), codegen_);
+ GenerateStringIndexOf(invoke, /* start_at_zero= */ false, GetAssembler(), codegen_);
}
// java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
@@ -2704,6 +2704,10 @@ UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeCASLong)
UNIMPLEMENTED_INTRINSIC(MIPS, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(MIPS, SystemArrayCopy)
+UNIMPLEMENTED_INTRINSIC(MIPS, CRC32Update)
+UNIMPLEMENTED_INTRINSIC(MIPS, CRC32UpdateBytes)
+UNIMPLEMENTED_INTRINSIC(MIPS, CRC32UpdateByteBuffer)
+
UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOfAfter);
UNIMPLEMENTED_INTRINSIC(MIPS, StringBufferAppend);
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index cbe3b42cbf..3e687652d3 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -169,7 +169,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitDoubleDoubleToRawLongBits(HInvoke* in
}
void IntrinsicCodeGeneratorMIPS64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
// int java.lang.Float.floatToRawIntBits(float)
@@ -178,7 +178,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitFloatFloatToRawIntBits(HInvoke* invok
}
void IntrinsicCodeGeneratorMIPS64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -205,7 +205,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitDoubleLongBitsToDouble(HInvoke* invok
}
void IntrinsicCodeGeneratorMIPS64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
// float java.lang.Float.intBitsToFloat(int)
@@ -214,7 +214,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitFloatIntBitsToFloat(HInvoke* invoke)
}
void IntrinsicCodeGeneratorMIPS64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -295,7 +295,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke*
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
// int java.lang.Long.numberOfLeadingZeros(long i)
@@ -304,7 +304,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* inv
}
void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
static void GenNumberOfTrailingZeroes(LocationSummary* locations,
@@ -332,7 +332,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke*
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
// int java.lang.Long.numberOfTrailingZeros(long i)
@@ -341,7 +341,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* in
}
void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
static void GenReverse(LocationSummary* locations,
@@ -911,11 +911,11 @@ static void GenUnsafeGet(HInvoke* invoke,
codegen->GenerateReferenceLoadWithBakerReadBarrier(invoke,
trg_loc,
base,
- /* offset */ 0U,
- /* index */ offset_loc,
+ /* offset= */ 0U,
+ /* index= */ offset_loc,
TIMES_1,
temp,
- /* needs_null_check */ false);
+ /* needs_null_check= */ false);
if (is_volatile) {
__ Sync(0);
}
@@ -928,8 +928,8 @@ static void GenUnsafeGet(HInvoke* invoke,
trg_loc,
trg_loc,
base_loc,
- /* offset */ 0U,
- /* index */ offset_loc);
+ /* offset= */ 0U,
+ /* index= */ offset_loc);
}
} else {
__ Lwu(trg, TMP, 0);
@@ -952,7 +952,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGet(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGet(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
}
// int sun.misc.Unsafe.getIntVolatile(Object o, long offset)
@@ -961,7 +961,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
}
// long sun.misc.Unsafe.getLong(Object o, long offset)
@@ -970,7 +970,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLong(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLong(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
}
// long sun.misc.Unsafe.getLongVolatile(Object o, long offset)
@@ -979,7 +979,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
}
// Object sun.misc.Unsafe.getObject(Object o, long offset)
@@ -988,7 +988,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObject(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObject(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_);
}
// Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset)
@@ -997,7 +997,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invo
}
void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_);
}
static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -1067,8 +1067,8 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePut(HInvoke* invoke) {
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePut(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt32,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
codegen_);
}
@@ -1080,8 +1080,8 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) {
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt32,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
codegen_);
}
@@ -1093,8 +1093,8 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) {
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt32,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
codegen_);
}
@@ -1106,8 +1106,8 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObject(HInvoke* invoke) {
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObject(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kReference,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
codegen_);
}
@@ -1119,8 +1119,8 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invok
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kReference,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
codegen_);
}
@@ -1132,8 +1132,8 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invo
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kReference,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
codegen_);
}
@@ -1145,8 +1145,8 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLong(HInvoke* invoke) {
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLong(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt64,
- /* is_volatile */ false,
- /* is_ordered */ false,
+ /* is_volatile= */ false,
+ /* is_ordered= */ false,
codegen_);
}
@@ -1158,8 +1158,8 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke)
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt64,
- /* is_volatile */ false,
- /* is_ordered */ true,
+ /* is_volatile= */ false,
+ /* is_ordered= */ true,
codegen_);
}
@@ -1171,8 +1171,8 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke
void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
GenUnsafePut(invoke->GetLocations(),
DataType::Type::kInt64,
- /* is_volatile */ true,
- /* is_ordered */ false,
+ /* is_volatile= */ true,
+ /* is_ordered= */ false,
codegen_);
}
@@ -1234,12 +1234,12 @@ static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorMIPS64* co
invoke,
out_loc, // Unused, used only as a "temporary" within the read barrier.
base,
- /* offset */ 0u,
- /* index */ offset_loc,
+ /* offset= */ 0u,
+ /* index= */ offset_loc,
ScaleFactor::TIMES_1,
temp,
- /* needs_null_check */ false,
- /* always_update_field */ true);
+ /* needs_null_check= */ false,
+ /* always_update_field= */ true);
}
}
@@ -1556,7 +1556,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOf(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ true);
}
// int java.lang.String.indexOf(int ch, int fromIndex)
@@ -1574,7 +1574,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ false);
}
// java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
@@ -1675,7 +1675,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitFloatIsInfinite(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS64::VisitFloatIsInfinite(HInvoke* invoke) {
- GenIsInfinite(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ GenIsInfinite(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
// boolean java.lang.Double.isInfinite(double)
@@ -1684,7 +1684,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitDoubleIsInfinite(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS64::VisitDoubleIsInfinite(HInvoke* invoke) {
- GenIsInfinite(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ GenIsInfinite(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
// void java.lang.String.getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin)
@@ -2354,6 +2354,9 @@ void IntrinsicCodeGeneratorMIPS64::VisitReachabilityFence(HInvoke* invoke ATTRIB
UNIMPLEMENTED_INTRINSIC(MIPS64, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(MIPS64, SystemArrayCopy)
+UNIMPLEMENTED_INTRINSIC(MIPS64, CRC32Update)
+UNIMPLEMENTED_INTRINSIC(MIPS64, CRC32UpdateBytes)
+UNIMPLEMENTED_INTRINSIC(MIPS64, CRC32UpdateByteBuffer)
UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 3b23798758..de697f0f96 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -223,31 +223,31 @@ static void MoveIntToFP(LocationSummary* locations, bool is64bit, X86Assembler*
}
void IntrinsicLocationsBuilderX86::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- CreateFPToIntLocations(allocator_, invoke, /* is64bit */ true);
+ CreateFPToIntLocations(allocator_, invoke, /* is64bit= */ true);
}
void IntrinsicLocationsBuilderX86::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- CreateIntToFPLocations(allocator_, invoke, /* is64bit */ true);
+ CreateIntToFPLocations(allocator_, invoke, /* is64bit= */ true);
}
void IntrinsicCodeGeneratorX86::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
void IntrinsicCodeGeneratorX86::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
void IntrinsicLocationsBuilderX86::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- CreateFPToIntLocations(allocator_, invoke, /* is64bit */ false);
+ CreateFPToIntLocations(allocator_, invoke, /* is64bit= */ false);
}
void IntrinsicLocationsBuilderX86::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- CreateIntToFPLocations(allocator_, invoke, /* is64bit */ false);
+ CreateIntToFPLocations(allocator_, invoke, /* is64bit= */ false);
}
void IntrinsicCodeGeneratorX86::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
void IntrinsicCodeGeneratorX86::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -545,6 +545,96 @@ static void GenFPToFPCall(HInvoke* invoke, CodeGeneratorX86* codegen, QuickEntry
__ cfi().AdjustCFAOffset(-16);
}
+static void CreateLowestOneBitLocations(ArenaAllocator* allocator, bool is_long, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
+ if (is_long) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ } else {
+ locations->SetInAt(0, Location::Any());
+ }
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+static void GenLowestOneBit(X86Assembler* assembler,
+ CodeGeneratorX86* codegen,
+ bool is_long,
+ HInvoke* invoke) {
+ LocationSummary* locations = invoke->GetLocations();
+ Location src = locations->InAt(0);
+ Location out_loc = locations->Out();
+
+ if (invoke->InputAt(0)->IsConstant()) {
+ // Evaluate this at compile time.
+ int64_t value = Int64FromConstant(invoke->InputAt(0)->AsConstant());
+ if (value == 0) {
+ if (is_long) {
+ __ xorl(out_loc.AsRegisterPairLow<Register>(), out_loc.AsRegisterPairLow<Register>());
+ __ xorl(out_loc.AsRegisterPairHigh<Register>(), out_loc.AsRegisterPairHigh<Register>());
+ } else {
+ __ xorl(out_loc.AsRegister<Register>(), out_loc.AsRegister<Register>());
+ }
+ return;
+ }
+ // Nonzero value.
+ value = is_long ? CTZ(static_cast<uint64_t>(value))
+ : CTZ(static_cast<uint32_t>(value));
+ if (is_long) {
+ if (value >= 32) {
+ int shift = value-32;
+ codegen->Load32BitValue(out_loc.AsRegisterPairLow<Register>(), 0);
+ codegen->Load32BitValue(out_loc.AsRegisterPairHigh<Register>(), 1 << shift);
+ } else {
+ codegen->Load32BitValue(out_loc.AsRegisterPairLow<Register>(), 1 << value);
+ codegen->Load32BitValue(out_loc.AsRegisterPairHigh<Register>(), 0);
+ }
+ } else {
+ codegen->Load32BitValue(out_loc.AsRegister<Register>(), 1 << value);
+ }
+ return;
+ }
+ // Handle non constant case
+ if (is_long) {
+ DCHECK(src.IsRegisterPair());
+ Register src_lo = src.AsRegisterPairLow<Register>();
+ Register src_hi = src.AsRegisterPairHigh<Register>();
+
+ Register out_lo = out_loc.AsRegisterPairLow<Register>();
+ Register out_hi = out_loc.AsRegisterPairHigh<Register>();
+
+ __ movl(out_lo, src_lo);
+ __ movl(out_hi, src_hi);
+
+ __ negl(out_lo);
+ __ adcl(out_hi, Immediate(0));
+ __ negl(out_hi);
+
+ __ andl(out_lo, src_lo);
+ __ andl(out_hi, src_hi);
+ } else {
+ if (codegen->GetInstructionSetFeatures().HasAVX2() && src.IsRegister()) {
+ Register out = out_loc.AsRegister<Register>();
+ __ blsi(out, src.AsRegister<Register>());
+ } else {
+ Register out = out_loc.AsRegister<Register>();
+ // Do tmp & -tmp
+ if (src.IsRegister()) {
+ __ movl(out, src.AsRegister<Register>());
+ } else {
+ DCHECK(src.IsStackSlot());
+ __ movl(out, Address(ESP, src.GetStackIndex()));
+ }
+ __ negl(out);
+
+ if (src.IsRegister()) {
+ __ andl(out, src.AsRegister<Register>());
+ } else {
+ __ andl(out, Address(ESP, src.GetStackIndex()));
+ }
+ }
+ }
+}
+
void IntrinsicLocationsBuilderX86::VisitMathCos(HInvoke* invoke) {
CreateFPToFPCallLocations(allocator_, invoke);
}
@@ -657,6 +747,21 @@ void IntrinsicCodeGeneratorX86::VisitMathTanh(HInvoke* invoke) {
GenFPToFPCall(invoke, codegen_, kQuickTanh);
}
+void IntrinsicLocationsBuilderX86::VisitIntegerLowestOneBit(HInvoke* invoke) {
+ CreateLowestOneBitLocations(allocator_, /*is_long=*/ false, invoke);
+}
+void IntrinsicCodeGeneratorX86::VisitIntegerLowestOneBit(HInvoke* invoke) {
+ GenLowestOneBit(GetAssembler(), codegen_, /*is_long=*/ false, invoke);
+}
+
+void IntrinsicLocationsBuilderX86::VisitLongLowestOneBit(HInvoke* invoke) {
+ CreateLowestOneBitLocations(allocator_, /*is_long=*/ true, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitLongLowestOneBit(HInvoke* invoke) {
+ GenLowestOneBit(GetAssembler(), codegen_, /*is_long=*/ true, invoke);
+}
+
static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
LocationSummary* locations =
new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
@@ -1220,19 +1325,19 @@ static void GenerateStringIndexOf(HInvoke* invoke,
}
void IntrinsicLocationsBuilderX86::VisitStringIndexOf(HInvoke* invoke) {
- CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ true);
+ CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero= */ true);
}
void IntrinsicCodeGeneratorX86::VisitStringIndexOf(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ true);
}
void IntrinsicLocationsBuilderX86::VisitStringIndexOfAfter(HInvoke* invoke) {
- CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ false);
+ CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero= */ false);
}
void IntrinsicCodeGeneratorX86::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ false);
}
void IntrinsicLocationsBuilderX86::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -1594,7 +1699,7 @@ static void GenUnsafeGet(HInvoke* invoke,
if (kUseBakerReadBarrier) {
Address src(base, offset, ScaleFactor::TIMES_1, 0);
codegen->GenerateReferenceLoadWithBakerReadBarrier(
- invoke, output_loc, base, src, /* needs_null_check */ false);
+ invoke, output_loc, base, src, /* needs_null_check= */ false);
} else {
__ movl(output, Address(base, offset, ScaleFactor::TIMES_1, 0));
codegen->GenerateReadBarrierSlow(
@@ -1665,45 +1770,45 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator,
void IntrinsicLocationsBuilderX86::VisitUnsafeGet(HInvoke* invoke) {
CreateIntIntIntToIntLocations(
- allocator_, invoke, DataType::Type::kInt32, /* is_volatile */ false);
+ allocator_, invoke, DataType::Type::kInt32, /* is_volatile= */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32, /* is_volatile */ true);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32, /* is_volatile= */ true);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetLong(HInvoke* invoke) {
CreateIntIntIntToIntLocations(
- allocator_, invoke, DataType::Type::kInt64, /* is_volatile */ false);
+ allocator_, invoke, DataType::Type::kInt64, /* is_volatile= */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64, /* is_volatile */ true);
+ CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64, /* is_volatile= */ true);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetObject(HInvoke* invoke) {
CreateIntIntIntToIntLocations(
- allocator_, invoke, DataType::Type::kReference, /* is_volatile */ false);
+ allocator_, invoke, DataType::Type::kReference, /* is_volatile= */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
CreateIntIntIntToIntLocations(
- allocator_, invoke, DataType::Type::kReference, /* is_volatile */ true);
+ allocator_, invoke, DataType::Type::kReference, /* is_volatile= */ true);
}
void IntrinsicCodeGeneratorX86::VisitUnsafeGet(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafeGetVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafeGetLong(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafeGetObject(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_);
}
@@ -1730,39 +1835,39 @@ static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* allocator
void IntrinsicLocationsBuilderX86::VisitUnsafePut(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kInt32, invoke, /* is_volatile= */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kInt32, invoke, /* is_volatile= */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ true);
+ allocator_, DataType::Type::kInt32, invoke, /* is_volatile= */ true);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutObject(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- allocator_, DataType::Type::kReference, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kReference, invoke, /* is_volatile= */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- allocator_, DataType::Type::kReference, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kReference, invoke, /* is_volatile= */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- allocator_, DataType::Type::kReference, invoke, /* is_volatile */ true);
+ allocator_, DataType::Type::kReference, invoke, /* is_volatile= */ true);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutLong(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kInt64, invoke, /* is_volatile= */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutLongOrdered(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ false);
+ allocator_, DataType::Type::kInt64, invoke, /* is_volatile= */ false);
}
void IntrinsicLocationsBuilderX86::VisitUnsafePutLongVolatile(HInvoke* invoke) {
CreateIntIntIntIntToVoidPlusTempsLocations(
- allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ true);
+ allocator_, DataType::Type::kInt64, invoke, /* is_volatile= */ true);
}
// We don't care for ordered: it requires an AnyStore barrier, which is already given by the x86
@@ -1814,34 +1919,34 @@ static void GenUnsafePut(LocationSummary* locations,
}
void IntrinsicCodeGeneratorX86::VisitUnsafePut(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafePutOrdered(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafePutVolatile(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafePutObject(HInvoke* invoke) {
GenUnsafePut(
- invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_);
+ invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
GenUnsafePut(
- invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_);
+ invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
GenUnsafePut(
- invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ true, codegen_);
+ invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafePutLong(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafePutLongOrdered(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitUnsafePutLongVolatile(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
}
static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator,
@@ -1938,8 +2043,8 @@ static void GenCAS(DataType::Type type, HInvoke* invoke, CodeGeneratorX86* codeg
temp1_loc, // Unused, used only as a "temporary" within the read barrier.
base,
field_addr,
- /* needs_null_check */ false,
- /* always_update_field */ true,
+ /* needs_null_check= */ false,
+ /* always_update_field= */ true,
&temp2);
}
@@ -2170,19 +2275,19 @@ static void GenBitCount(X86Assembler* assembler,
}
void IntrinsicLocationsBuilderX86::VisitIntegerBitCount(HInvoke* invoke) {
- CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long */ false);
+ CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long= */ false);
}
void IntrinsicCodeGeneratorX86::VisitIntegerBitCount(HInvoke* invoke) {
- GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ false);
+ GenBitCount(GetAssembler(), codegen_, invoke, /* is_long= */ false);
}
void IntrinsicLocationsBuilderX86::VisitLongBitCount(HInvoke* invoke) {
- CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long */ true);
+ CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long= */ true);
}
void IntrinsicCodeGeneratorX86::VisitLongBitCount(HInvoke* invoke) {
- GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ true);
+ GenBitCount(GetAssembler(), codegen_, invoke, /* is_long= */ true);
}
static void CreateLeadingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_long) {
@@ -2274,19 +2379,19 @@ static void GenLeadingZeros(X86Assembler* assembler,
}
void IntrinsicLocationsBuilderX86::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- CreateLeadingZeroLocations(allocator_, invoke, /* is_long */ false);
+ CreateLeadingZeroLocations(allocator_, invoke, /* is_long= */ false);
}
void IntrinsicCodeGeneratorX86::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ false);
+ GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ false);
}
void IntrinsicLocationsBuilderX86::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- CreateLeadingZeroLocations(allocator_, invoke, /* is_long */ true);
+ CreateLeadingZeroLocations(allocator_, invoke, /* is_long= */ true);
}
void IntrinsicCodeGeneratorX86::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
+ GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ true);
}
static void CreateTrailingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_long) {
@@ -2365,19 +2470,19 @@ static void GenTrailingZeros(X86Assembler* assembler,
}
void IntrinsicLocationsBuilderX86::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- CreateTrailingZeroLocations(allocator_, invoke, /* is_long */ false);
+ CreateTrailingZeroLocations(allocator_, invoke, /* is_long= */ false);
}
void IntrinsicCodeGeneratorX86::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ false);
+ GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ false);
}
void IntrinsicLocationsBuilderX86::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- CreateTrailingZeroLocations(allocator_, invoke, /* is_long */ true);
+ CreateTrailingZeroLocations(allocator_, invoke, /* is_long= */ true);
}
void IntrinsicCodeGeneratorX86::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
+ GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ true);
}
static bool IsSameInput(HInstruction* instruction, size_t input0, size_t input1) {
@@ -2585,11 +2690,11 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, src, class_offset, /* needs_null_check */ false);
+ invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false);
// Bail out if the source is not a non primitive array.
// /* HeapReference<Class> */ temp1 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, temp1, component_offset, /* needs_null_check */ false);
+ invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false);
__ testl(temp1, temp1);
__ j(kEqual, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp1` has been unpoisoned
@@ -2622,7 +2727,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
// /* HeapReference<Class> */ temp1 = dest->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, dest, class_offset, /* needs_null_check */ false);
+ invoke, temp1_loc, dest, class_offset, /* needs_null_check= */ false);
if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
// Bail out if the destination is not a non primitive array.
@@ -2634,7 +2739,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
// temporaries such a `temp1`.
// /* HeapReference<Class> */ temp2 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp2_loc, temp1, component_offset, /* needs_null_check */ false);
+ invoke, temp2_loc, temp1, component_offset, /* needs_null_check= */ false);
__ testl(temp2, temp2);
__ j(kEqual, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp2` has been unpoisoned
@@ -2647,7 +2752,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
// read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below.
// /* HeapReference<Class> */ temp2 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp2_loc, src, class_offset, /* needs_null_check */ false);
+ invoke, temp2_loc, src, class_offset, /* needs_null_check= */ false);
// Note: if heap poisoning is on, we are comparing two unpoisoned references here.
__ cmpl(temp1, temp2);
@@ -2656,7 +2761,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
__ j(kEqual, &do_copy);
// /* HeapReference<Class> */ temp1 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, temp1, component_offset, /* needs_null_check */ false);
+ invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false);
// We do not need to emit a read barrier for the following
// heap reference load, as `temp1` is only used in a
// comparison with null below, and this reference is not
@@ -2710,10 +2815,10 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, src, class_offset, /* needs_null_check */ false);
+ invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false);
// /* HeapReference<Class> */ temp1 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, temp1, component_offset, /* needs_null_check */ false);
+ invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false);
__ testl(temp1, temp1);
__ j(kEqual, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `temp1` has been unpoisoned
@@ -2846,7 +2951,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
}
// We only need one card marking on the destination array.
- codegen_->MarkGCCard(temp1, temp2, dest, Register(kNoRegister), /* value_can_be_null */ false);
+ codegen_->MarkGCCard(temp1, temp2, dest, Register(kNoRegister), /* value_can_be_null= */ false);
__ Bind(intrinsic_slow_path->GetExitLabel());
}
@@ -2973,8 +3078,9 @@ UNIMPLEMENTED_INTRINSIC(X86, FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86, DoubleIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86, IntegerHighestOneBit)
UNIMPLEMENTED_INTRINSIC(X86, LongHighestOneBit)
-UNIMPLEMENTED_INTRINSIC(X86, IntegerLowestOneBit)
-UNIMPLEMENTED_INTRINSIC(X86, LongLowestOneBit)
+UNIMPLEMENTED_INTRINSIC(X86, CRC32Update)
+UNIMPLEMENTED_INTRINSIC(X86, CRC32UpdateBytes)
+UNIMPLEMENTED_INTRINSIC(X86, CRC32UpdateByteBuffer)
UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 0469b02129..e79c0c9adf 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -162,10 +162,10 @@ void IntrinsicLocationsBuilderX86_64::VisitDoubleLongBitsToDouble(HInvoke* invok
}
void IntrinsicCodeGeneratorX86_64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
void IntrinsicCodeGeneratorX86_64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
}
void IntrinsicLocationsBuilderX86_64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
@@ -176,10 +176,10 @@ void IntrinsicLocationsBuilderX86_64::VisitFloatIntBitsToFloat(HInvoke* invoke)
}
void IntrinsicCodeGeneratorX86_64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
- MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
void IntrinsicCodeGeneratorX86_64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
- MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler());
+ MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
}
static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -430,12 +430,12 @@ void IntrinsicCodeGeneratorX86_64::VisitMathRoundDouble(HInvoke* invoke) {
// direct x86 instruction, since NaN should map to 0 and large positive
// values need to be clipped to the extreme value.
codegen_->Load64BitValue(out, kPrimLongMax);
- __ cvtsi2sd(t2, out, /* is64bit */ true);
+ __ cvtsi2sd(t2, out, /* is64bit= */ true);
__ comisd(t1, t2);
__ j(kAboveEqual, &done); // clipped to max (already in out), does not jump on unordered
__ movl(out, Immediate(0)); // does not change flags, implicit zero extension to 64-bit
__ j(kUnordered, &done); // NaN mapped to 0 (just moved in out)
- __ cvttsd2si(out, t1, /* is64bit */ true);
+ __ cvttsd2si(out, t1, /* is64bit= */ true);
__ Bind(&done);
}
@@ -979,7 +979,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ temp1 = dest->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, dest, class_offset, /* needs_null_check */ false);
+ invoke, temp1_loc, dest, class_offset, /* needs_null_check= */ false);
// Register `temp1` is not trashed by the read barrier emitted
// by GenerateFieldLoadWithBakerReadBarrier below, as that
// method produces a call to a ReadBarrierMarkRegX entry point,
@@ -987,7 +987,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
// temporaries such a `temp1`.
// /* HeapReference<Class> */ temp2 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp2_loc, src, class_offset, /* needs_null_check */ false);
+ invoke, temp2_loc, src, class_offset, /* needs_null_check= */ false);
// If heap poisoning is enabled, `temp1` and `temp2` have been
// unpoisoned by the the previous calls to
// GenerateFieldLoadWithBakerReadBarrier.
@@ -1011,7 +1011,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ TMP = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, TMP_loc, temp1, component_offset, /* needs_null_check */ false);
+ invoke, TMP_loc, temp1, component_offset, /* needs_null_check= */ false);
__ testl(CpuRegister(TMP), CpuRegister(TMP));
__ j(kEqual, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `TMP` has been unpoisoned by
@@ -1034,7 +1034,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
// read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below.
// /* HeapReference<Class> */ TMP = temp2->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, TMP_loc, temp2, component_offset, /* needs_null_check */ false);
+ invoke, TMP_loc, temp2, component_offset, /* needs_null_check= */ false);
__ testl(CpuRegister(TMP), CpuRegister(TMP));
__ j(kEqual, intrinsic_slow_path->GetEntryLabel());
// If heap poisoning is enabled, `TMP` has been unpoisoned by
@@ -1058,7 +1058,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ temp1 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, temp1, component_offset, /* needs_null_check */ false);
+ invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false);
// We do not need to emit a read barrier for the following
// heap reference load, as `temp1` is only used in a
// comparison with null below, and this reference is not
@@ -1086,10 +1086,10 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, temp1_loc, src, class_offset, /* needs_null_check */ false);
+ invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false);
// /* HeapReference<Class> */ TMP = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
- invoke, TMP_loc, temp1, component_offset, /* needs_null_check */ false);
+ invoke, TMP_loc, temp1, component_offset, /* needs_null_check= */ false);
__ testl(CpuRegister(TMP), CpuRegister(TMP));
__ j(kEqual, intrinsic_slow_path->GetEntryLabel());
} else {
@@ -1198,7 +1198,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
}
// We only need one card marking on the destination array.
- codegen_->MarkGCCard(temp1, temp2, dest, CpuRegister(kNoRegister), /* value_can_be_null */ false);
+ codegen_->MarkGCCard(temp1, temp2, dest, CpuRegister(kNoRegister), /* value_can_be_null= */ false);
__ Bind(intrinsic_slow_path->GetExitLabel());
}
@@ -1452,7 +1452,7 @@ static void GenerateStringIndexOf(HInvoke* invoke,
// Ensure we have a start index >= 0;
__ xorl(counter, counter);
__ cmpl(start_index, Immediate(0));
- __ cmov(kGreater, counter, start_index, /* is64bit */ false); // 32-bit copy is enough.
+ __ cmov(kGreater, counter, start_index, /* is64bit= */ false); // 32-bit copy is enough.
if (mirror::kUseStringCompression) {
NearLabel modify_counter, offset_uncompressed_label;
@@ -1514,19 +1514,19 @@ static void GenerateStringIndexOf(HInvoke* invoke,
}
void IntrinsicLocationsBuilderX86_64::VisitStringIndexOf(HInvoke* invoke) {
- CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ true);
+ CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero= */ true);
}
void IntrinsicCodeGeneratorX86_64::VisitStringIndexOf(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ true);
}
void IntrinsicLocationsBuilderX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
- CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ false);
+ CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero= */ false);
}
void IntrinsicCodeGeneratorX86_64::VisitStringIndexOfAfter(HInvoke* invoke) {
- GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false);
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ false);
}
void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromBytes(HInvoke* invoke) {
@@ -1840,7 +1840,7 @@ void IntrinsicLocationsBuilderX86_64::VisitThreadCurrentThread(HInvoke* invoke)
void IntrinsicCodeGeneratorX86_64::VisitThreadCurrentThread(HInvoke* invoke) {
CpuRegister out = invoke->GetLocations()->Out().AsRegister<CpuRegister>();
GetAssembler()->gs()->movl(out, Address::Absolute(Thread::PeerOffset<kX86_64PointerSize>(),
- /* no_rip */ true));
+ /* no_rip= */ true));
}
static void GenUnsafeGet(HInvoke* invoke,
@@ -1866,7 +1866,7 @@ static void GenUnsafeGet(HInvoke* invoke,
if (kUseBakerReadBarrier) {
Address src(base, offset, ScaleFactor::TIMES_1, 0);
codegen->GenerateReferenceLoadWithBakerReadBarrier(
- invoke, output_loc, base, src, /* needs_null_check */ false);
+ invoke, output_loc, base, src, /* needs_null_check= */ false);
} else {
__ movl(output, Address(base, offset, ScaleFactor::TIMES_1, 0));
codegen->GenerateReadBarrierSlow(
@@ -1930,22 +1930,22 @@ void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetObjectVolatile(HInvoke* invo
void IntrinsicCodeGeneratorX86_64::VisitUnsafeGet(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetLong(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetObject(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
- GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_);
+ GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_);
}
@@ -2028,34 +2028,34 @@ static void GenUnsafePut(LocationSummary* locations, DataType::Type type, bool i
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafePut(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafePutOrdered(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafePutVolatile(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ true, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafePutObject(HInvoke* invoke) {
GenUnsafePut(
- invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_);
+ invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
GenUnsafePut(
- invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_);
+ invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
GenUnsafePut(
- invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ true, codegen_);
+ invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ true, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafePutLong(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
- GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ true, codegen_);
+ GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
}
static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator,
@@ -2140,8 +2140,8 @@ static void GenCAS(DataType::Type type, HInvoke* invoke, CodeGeneratorX86_64* co
out_loc, // Unused, used only as a "temporary" within the read barrier.
base,
field_addr,
- /* needs_null_check */ false,
- /* always_update_field */ true,
+ /* needs_null_check= */ false,
+ /* always_update_field= */ true,
&temp1,
&temp2);
}
@@ -2369,7 +2369,7 @@ void IntrinsicLocationsBuilderX86_64::VisitIntegerBitCount(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerBitCount(HInvoke* invoke) {
- GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ false);
+ GenBitCount(GetAssembler(), codegen_, invoke, /* is_long= */ false);
}
void IntrinsicLocationsBuilderX86_64::VisitLongBitCount(HInvoke* invoke) {
@@ -2377,7 +2377,7 @@ void IntrinsicLocationsBuilderX86_64::VisitLongBitCount(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorX86_64::VisitLongBitCount(HInvoke* invoke) {
- GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ true);
+ GenBitCount(GetAssembler(), codegen_, invoke, /* is_long= */ true);
}
static void CreateOneBitLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_high) {
@@ -2421,93 +2421,98 @@ static void GenOneBit(X86_64Assembler* assembler,
}
// Handle the non-constant cases.
- CpuRegister tmp = locations->GetTemp(0).AsRegister<CpuRegister>();
- if (is_high) {
- // Use architectural support: basically 1 << bsr.
- if (src.IsRegister()) {
+ if (!is_high && codegen->GetInstructionSetFeatures().HasAVX2() &&
+ src.IsRegister()) {
+ __ blsi(out, src.AsRegister<CpuRegister>());
+ } else {
+ CpuRegister tmp = locations->GetTemp(0).AsRegister<CpuRegister>();
+ if (is_high) {
+ // Use architectural support: basically 1 << bsr.
+ if (src.IsRegister()) {
+ if (is_long) {
+ __ bsrq(tmp, src.AsRegister<CpuRegister>());
+ } else {
+ __ bsrl(tmp, src.AsRegister<CpuRegister>());
+ }
+ } else if (is_long) {
+ DCHECK(src.IsDoubleStackSlot());
+ __ bsrq(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
+ } else {
+ DCHECK(src.IsStackSlot());
+ __ bsrl(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
+ }
+ // BSR sets ZF if the input was zero.
+ NearLabel is_zero, done;
+ __ j(kEqual, &is_zero);
+ __ movl(out, Immediate(1)); // Clears upper bits too.
if (is_long) {
- __ bsrq(tmp, src.AsRegister<CpuRegister>());
+ __ shlq(out, tmp);
} else {
- __ bsrl(tmp, src.AsRegister<CpuRegister>());
+ __ shll(out, tmp);
}
- } else if (is_long) {
- DCHECK(src.IsDoubleStackSlot());
- __ bsrq(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
- } else {
- DCHECK(src.IsStackSlot());
- __ bsrl(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
- }
- // BSR sets ZF if the input was zero.
- NearLabel is_zero, done;
- __ j(kEqual, &is_zero);
- __ movl(out, Immediate(1)); // Clears upper bits too.
- if (is_long) {
- __ shlq(out, tmp);
- } else {
- __ shll(out, tmp);
- }
- __ jmp(&done);
- __ Bind(&is_zero);
- __ xorl(out, out); // Clears upper bits too.
- __ Bind(&done);
- } else {
- // Copy input into temporary.
- if (src.IsRegister()) {
+ __ jmp(&done);
+ __ Bind(&is_zero);
+ __ xorl(out, out); // Clears upper bits too.
+ __ Bind(&done);
+ } else {
+ // Copy input into temporary.
+ if (src.IsRegister()) {
+ if (is_long) {
+ __ movq(tmp, src.AsRegister<CpuRegister>());
+ } else {
+ __ movl(tmp, src.AsRegister<CpuRegister>());
+ }
+ } else if (is_long) {
+ DCHECK(src.IsDoubleStackSlot());
+ __ movq(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
+ } else {
+ DCHECK(src.IsStackSlot());
+ __ movl(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
+ }
+ // Do the bit twiddling: basically tmp & -tmp;
if (is_long) {
- __ movq(tmp, src.AsRegister<CpuRegister>());
+ __ movq(out, tmp);
+ __ negq(tmp);
+ __ andq(out, tmp);
} else {
- __ movl(tmp, src.AsRegister<CpuRegister>());
+ __ movl(out, tmp);
+ __ negl(tmp);
+ __ andl(out, tmp);
}
- } else if (is_long) {
- DCHECK(src.IsDoubleStackSlot());
- __ movq(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
- } else {
- DCHECK(src.IsStackSlot());
- __ movl(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
- }
- // Do the bit twiddling: basically tmp & -tmp;
- if (is_long) {
- __ movq(out, tmp);
- __ negq(tmp);
- __ andq(out, tmp);
- } else {
- __ movl(out, tmp);
- __ negl(tmp);
- __ andl(out, tmp);
}
}
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerHighestOneBit(HInvoke* invoke) {
- CreateOneBitLocations(allocator_, invoke, /* is_high */ true);
+ CreateOneBitLocations(allocator_, invoke, /* is_high= */ true);
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerHighestOneBit(HInvoke* invoke) {
- GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ true, /* is_long */ false);
+ GenOneBit(GetAssembler(), codegen_, invoke, /* is_high= */ true, /* is_long= */ false);
}
void IntrinsicLocationsBuilderX86_64::VisitLongHighestOneBit(HInvoke* invoke) {
- CreateOneBitLocations(allocator_, invoke, /* is_high */ true);
+ CreateOneBitLocations(allocator_, invoke, /* is_high= */ true);
}
void IntrinsicCodeGeneratorX86_64::VisitLongHighestOneBit(HInvoke* invoke) {
- GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ true, /* is_long */ true);
+ GenOneBit(GetAssembler(), codegen_, invoke, /* is_high= */ true, /* is_long= */ true);
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerLowestOneBit(HInvoke* invoke) {
- CreateOneBitLocations(allocator_, invoke, /* is_high */ false);
+ CreateOneBitLocations(allocator_, invoke, /* is_high= */ false);
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerLowestOneBit(HInvoke* invoke) {
- GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ false, /* is_long */ false);
+ GenOneBit(GetAssembler(), codegen_, invoke, /* is_high= */ false, /* is_long= */ false);
}
void IntrinsicLocationsBuilderX86_64::VisitLongLowestOneBit(HInvoke* invoke) {
- CreateOneBitLocations(allocator_, invoke, /* is_high */ false);
+ CreateOneBitLocations(allocator_, invoke, /* is_high= */ false);
}
void IntrinsicCodeGeneratorX86_64::VisitLongLowestOneBit(HInvoke* invoke) {
- GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ false, /* is_long */ true);
+ GenOneBit(GetAssembler(), codegen_, invoke, /* is_high= */ false, /* is_long= */ true);
}
static void CreateLeadingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -2572,7 +2577,7 @@ void IntrinsicLocationsBuilderX86_64::VisitIntegerNumberOfLeadingZeros(HInvoke*
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ false);
+ GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ false);
}
void IntrinsicLocationsBuilderX86_64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
@@ -2580,7 +2585,7 @@ void IntrinsicLocationsBuilderX86_64::VisitLongNumberOfLeadingZeros(HInvoke* inv
}
void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
+ GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ true);
}
static void CreateTrailingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke) {
@@ -2640,7 +2645,7 @@ void IntrinsicLocationsBuilderX86_64::VisitIntegerNumberOfTrailingZeros(HInvoke*
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
- GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ false);
+ GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ false);
}
void IntrinsicLocationsBuilderX86_64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
@@ -2648,7 +2653,7 @@ void IntrinsicLocationsBuilderX86_64::VisitLongNumberOfTrailingZeros(HInvoke* in
}
void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
- GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true);
+ GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ true);
}
void IntrinsicLocationsBuilderX86_64::VisitIntegerValueOf(HInvoke* invoke) {
@@ -2719,7 +2724,7 @@ void IntrinsicCodeGeneratorX86_64::VisitThreadInterrupted(HInvoke* invoke) {
X86_64Assembler* assembler = GetAssembler();
CpuRegister out = invoke->GetLocations()->Out().AsRegister<CpuRegister>();
Address address = Address::Absolute
- (Thread::InterruptedOffset<kX86_64PointerSize>().Int32Value(), /* no_rip */ true);
+ (Thread::InterruptedOffset<kX86_64PointerSize>().Int32Value(), /* no_rip= */ true);
NearLabel done;
__ gs()->movl(out, address);
__ testl(out, out);
@@ -2740,6 +2745,9 @@ void IntrinsicCodeGeneratorX86_64::VisitReachabilityFence(HInvoke* invoke ATTRIB
UNIMPLEMENTED_INTRINSIC(X86_64, ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(X86_64, FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite)
+UNIMPLEMENTED_INTRINSIC(X86_64, CRC32Update)
+UNIMPLEMENTED_INTRINSIC(X86_64, CRC32UpdateBytes)
+UNIMPLEMENTED_INTRINSIC(X86_64, CRC32UpdateByteBuffer)
UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOf);
UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOfAfter);
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 7f71745a43..b33d0f488e 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -692,7 +692,7 @@ class LSEVisitor : public HGraphDelegateVisitor {
VisitSetLocation(instruction, idx, instruction->InputAt(2));
}
- void VisitDeoptimize(HDeoptimize* instruction) {
+ void VisitDeoptimize(HDeoptimize* instruction) override {
const ScopedArenaVector<HInstruction*>& heap_values =
heap_values_for_[instruction->GetBlock()->GetBlockId()];
for (HInstruction* heap_value : heap_values) {
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 7d66155b39..12b180d5ff 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -351,7 +351,10 @@ static bool HasReductionFormat(HInstruction* reduction, HInstruction* phi) {
// Translates vector operation to reduction kind.
static HVecReduce::ReductionKind GetReductionKind(HVecOperation* reduction) {
- if (reduction->IsVecAdd() || reduction->IsVecSub() || reduction->IsVecSADAccumulate()) {
+ if (reduction->IsVecAdd() ||
+ reduction->IsVecSub() ||
+ reduction->IsVecSADAccumulate() ||
+ reduction->IsVecDotProd()) {
return HVecReduce::kSum;
}
LOG(FATAL) << "Unsupported SIMD reduction " << reduction->GetId();
@@ -431,6 +434,23 @@ static void PeelByCount(HLoopInformation* loop_info, int count) {
}
}
+// Returns the narrower type out of instructions a and b types.
+static DataType::Type GetNarrowerType(HInstruction* a, HInstruction* b) {
+ DataType::Type type = a->GetType();
+ if (DataType::Size(b->GetType()) < DataType::Size(type)) {
+ type = b->GetType();
+ }
+ if (a->IsTypeConversion() &&
+ DataType::Size(a->InputAt(0)->GetType()) < DataType::Size(type)) {
+ type = a->InputAt(0)->GetType();
+ }
+ if (b->IsTypeConversion() &&
+ DataType::Size(b->InputAt(0)->GetType()) < DataType::Size(type)) {
+ type = b->InputAt(0)->GetType();
+ }
+ return type;
+}
+
//
// Public methods.
//
@@ -1289,6 +1309,7 @@ bool HLoopOptimization::VectorizeDef(LoopNode* node,
DataType::Type type = instruction->GetType();
// Recognize SAD idiom or direct reduction.
if (VectorizeSADIdiom(node, instruction, generate_code, type, restrictions) ||
+ VectorizeDotProdIdiom(node, instruction, generate_code, type, restrictions) ||
(TrySetVectorType(type, &restrictions) &&
VectorizeUse(node, instruction, generate_code, type, restrictions))) {
if (generate_code) {
@@ -1531,11 +1552,11 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict
case DataType::Type::kBool:
case DataType::Type::kUint8:
case DataType::Type::kInt8:
- *restrictions |= kNoDiv | kNoReduction;
+ *restrictions |= kNoDiv | kNoReduction | kNoDotProd;
return TrySetVectorLength(8);
case DataType::Type::kUint16:
case DataType::Type::kInt16:
- *restrictions |= kNoDiv | kNoStringCharAt | kNoReduction;
+ *restrictions |= kNoDiv | kNoStringCharAt | kNoReduction | kNoDotProd;
return TrySetVectorLength(4);
case DataType::Type::kInt32:
*restrictions |= kNoDiv | kNoWideSAD;
@@ -1580,12 +1601,23 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict
case DataType::Type::kBool:
case DataType::Type::kUint8:
case DataType::Type::kInt8:
- *restrictions |=
- kNoMul | kNoDiv | kNoShift | kNoAbs | kNoSignedHAdd | kNoUnroundedHAdd | kNoSAD;
+ *restrictions |= kNoMul |
+ kNoDiv |
+ kNoShift |
+ kNoAbs |
+ kNoSignedHAdd |
+ kNoUnroundedHAdd |
+ kNoSAD |
+ kNoDotProd;
return TrySetVectorLength(16);
case DataType::Type::kUint16:
case DataType::Type::kInt16:
- *restrictions |= kNoDiv | kNoAbs | kNoSignedHAdd | kNoUnroundedHAdd | kNoSAD;
+ *restrictions |= kNoDiv |
+ kNoAbs |
+ kNoSignedHAdd |
+ kNoUnroundedHAdd |
+ kNoSAD|
+ kNoDotProd;
return TrySetVectorLength(8);
case DataType::Type::kInt32:
*restrictions |= kNoDiv | kNoSAD;
@@ -1610,11 +1642,11 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict
case DataType::Type::kBool:
case DataType::Type::kUint8:
case DataType::Type::kInt8:
- *restrictions |= kNoDiv;
+ *restrictions |= kNoDiv | kNoDotProd;
return TrySetVectorLength(16);
case DataType::Type::kUint16:
case DataType::Type::kInt16:
- *restrictions |= kNoDiv | kNoStringCharAt;
+ *restrictions |= kNoDiv | kNoStringCharAt | kNoDotProd;
return TrySetVectorLength(8);
case DataType::Type::kInt32:
*restrictions |= kNoDiv;
@@ -1639,11 +1671,11 @@ bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrict
case DataType::Type::kBool:
case DataType::Type::kUint8:
case DataType::Type::kInt8:
- *restrictions |= kNoDiv;
+ *restrictions |= kNoDiv | kNoDotProd;
return TrySetVectorLength(16);
case DataType::Type::kUint16:
case DataType::Type::kInt16:
- *restrictions |= kNoDiv | kNoStringCharAt;
+ *restrictions |= kNoDiv | kNoStringCharAt | kNoDotProd;
return TrySetVectorLength(8);
case DataType::Type::kInt32:
*restrictions |= kNoDiv;
@@ -2071,18 +2103,7 @@ bool HLoopOptimization::VectorizeSADIdiom(LoopNode* node,
HInstruction* r = a;
HInstruction* s = b;
bool is_unsigned = false;
- DataType::Type sub_type = a->GetType();
- if (DataType::Size(b->GetType()) < DataType::Size(sub_type)) {
- sub_type = b->GetType();
- }
- if (a->IsTypeConversion() &&
- DataType::Size(a->InputAt(0)->GetType()) < DataType::Size(sub_type)) {
- sub_type = a->InputAt(0)->GetType();
- }
- if (b->IsTypeConversion() &&
- DataType::Size(b->InputAt(0)->GetType()) < DataType::Size(sub_type)) {
- sub_type = b->InputAt(0)->GetType();
- }
+ DataType::Type sub_type = GetNarrowerType(a, b);
if (reduction_type != sub_type &&
(!IsNarrowerOperands(a, b, sub_type, &r, &s, &is_unsigned) || is_unsigned)) {
return false;
@@ -2123,6 +2144,75 @@ bool HLoopOptimization::VectorizeSADIdiom(LoopNode* node,
return false;
}
+// Method recognises the following dot product idiom:
+// q += a * b for operands a, b whose type is narrower than the reduction one.
+// Provided that the operands have the same type or are promoted to a wider form.
+// Since this may involve a vector length change, the idiom is handled by going directly
+// to a dot product node (rather than relying combining finer grained nodes later).
+bool HLoopOptimization::VectorizeDotProdIdiom(LoopNode* node,
+ HInstruction* instruction,
+ bool generate_code,
+ DataType::Type reduction_type,
+ uint64_t restrictions) {
+ if (!instruction->IsAdd() || (reduction_type != DataType::Type::kInt32)) {
+ return false;
+ }
+
+ HInstruction* q = instruction->InputAt(0);
+ HInstruction* v = instruction->InputAt(1);
+ if (!v->IsMul() || v->GetType() != reduction_type) {
+ return false;
+ }
+
+ HInstruction* a = v->InputAt(0);
+ HInstruction* b = v->InputAt(1);
+ HInstruction* r = a;
+ HInstruction* s = b;
+ DataType::Type op_type = GetNarrowerType(a, b);
+ bool is_unsigned = false;
+
+ if (!IsNarrowerOperands(a, b, op_type, &r, &s, &is_unsigned)) {
+ return false;
+ }
+ op_type = HVecOperation::ToProperType(op_type, is_unsigned);
+
+ if (!TrySetVectorType(op_type, &restrictions) ||
+ HasVectorRestrictions(restrictions, kNoDotProd)) {
+ return false;
+ }
+
+ DCHECK(r != nullptr && s != nullptr);
+ // Accept dot product idiom for vectorizable operands. Vectorized code uses the shorthand
+ // idiomatic operation. Sequential code uses the original scalar expressions.
+ if (generate_code && vector_mode_ != kVector) { // de-idiom
+ r = a;
+ s = b;
+ }
+ if (VectorizeUse(node, q, generate_code, op_type, restrictions) &&
+ VectorizeUse(node, r, generate_code, op_type, restrictions) &&
+ VectorizeUse(node, s, generate_code, op_type, restrictions)) {
+ if (generate_code) {
+ if (vector_mode_ == kVector) {
+ vector_map_->Put(instruction, new (global_allocator_) HVecDotProd(
+ global_allocator_,
+ vector_map_->Get(q),
+ vector_map_->Get(r),
+ vector_map_->Get(s),
+ reduction_type,
+ is_unsigned,
+ GetOtherVL(reduction_type, op_type, vector_length_),
+ kNoDexPc));
+ MaybeRecordStat(stats_, MethodCompilationStat::kLoopVectorizedIdiom);
+ } else {
+ GenerateVecOp(v, vector_map_->Get(r), vector_map_->Get(s), reduction_type);
+ GenerateVecOp(instruction, vector_map_->Get(q), vector_map_->Get(v), reduction_type);
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
//
// Vectorization heuristics.
//
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 2b202fda75..1a842c4bf3 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -82,6 +82,7 @@ class HLoopOptimization : public HOptimization {
kNoReduction = 1 << 9, // no reduction
kNoSAD = 1 << 10, // no sum of absolute differences (SAD)
kNoWideSAD = 1 << 11, // no sum of absolute differences (SAD) with operand widening
+ kNoDotProd = 1 << 12, // no dot product
};
/*
@@ -217,6 +218,11 @@ class HLoopOptimization : public HOptimization {
bool generate_code,
DataType::Type type,
uint64_t restrictions);
+ bool VectorizeDotProdIdiom(LoopNode* node,
+ HInstruction* instruction,
+ bool generate_code,
+ DataType::Type type,
+ uint64_t restrictions);
// Vectorization heuristics.
Alignment ComputeAlignment(HInstruction* offset,
diff --git a/compiler/optimizing/loop_optimization_test.cc b/compiler/optimizing/loop_optimization_test.cc
index c7cc661303..310d98b5b0 100644
--- a/compiler/optimizing/loop_optimization_test.cc
+++ b/compiler/optimizing/loop_optimization_test.cc
@@ -30,7 +30,7 @@ class LoopOptimizationTest : public OptimizingUnitTest {
: graph_(CreateGraph()),
iva_(new (GetAllocator()) HInductionVarAnalysis(graph_)),
loop_opt_(new (GetAllocator()) HLoopOptimization(
- graph_, /* compiler_options */ nullptr, iva_, /* stats */ nullptr)) {
+ graph_, /* compiler_options= */ nullptr, iva_, /* stats= */ nullptr)) {
BuildGraph();
}
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 79a7e2c858..f7c16d1d02 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -20,6 +20,7 @@
#include "art_method-inl.h"
#include "base/bit_utils.h"
#include "base/bit_vector-inl.h"
+#include "base/logging.h"
#include "base/stl_util.h"
#include "class_linker-inl.h"
#include "class_root.h"
@@ -43,7 +44,7 @@ void HGraph::InitializeInexactObjectRTI(VariableSizedHandleScope* handles) {
// Create the inexact Object reference type and store it in the HGraph.
inexact_object_rti_ = ReferenceTypeInfo::Create(
handles->NewHandle(GetClassRoot<mirror::Object>()),
- /* is_exact */ false);
+ /* is_exact= */ false);
}
void HGraph::AddBlock(HBasicBlock* block) {
@@ -59,7 +60,7 @@ void HGraph::FindBackEdges(ArenaBitVector* visited) {
ScopedArenaAllocator allocator(GetArenaStack());
// Nodes that we're currently visiting, indexed by block id.
ArenaBitVector visiting(
- &allocator, blocks_.size(), /* expandable */ false, kArenaAllocGraphBuilder);
+ &allocator, blocks_.size(), /* expandable= */ false, kArenaAllocGraphBuilder);
visiting.ClearAllBits();
// Number of successors visited from a given node, indexed by block id.
ScopedArenaVector<size_t> successors_visited(blocks_.size(),
@@ -688,7 +689,7 @@ HCurrentMethod* HGraph::GetCurrentMethod() {
}
const char* HGraph::GetMethodName() const {
- const DexFile::MethodId& method_id = dex_file_.GetMethodId(method_idx_);
+ const dex::MethodId& method_id = dex_file_.GetMethodId(method_idx_);
return dex_file_.GetMethodName(method_id);
}
@@ -825,7 +826,7 @@ void HLoopInformation::Populate() {
ScopedArenaAllocator allocator(graph->GetArenaStack());
ArenaBitVector visited(&allocator,
graph->GetBlocks().size(),
- /* expandable */ false,
+ /* expandable= */ false,
kArenaAllocGraphBuilder);
visited.ClearAllBits();
// Stop marking blocks at the loop header.
@@ -1230,7 +1231,7 @@ bool HInstructionList::FoundBefore(const HInstruction* instruction1,
}
}
LOG(FATAL) << "Did not find an order between two instructions of the same block.";
- return true;
+ UNREACHABLE();
}
bool HInstruction::StrictlyDominates(HInstruction* other_instruction) const {
@@ -1253,7 +1254,7 @@ bool HInstruction::StrictlyDominates(HInstruction* other_instruction) const {
} else {
// There is no order among phis.
LOG(FATAL) << "There is no dominance between phis of a same block.";
- return false;
+ UNREACHABLE();
}
} else {
// `this` is not a phi.
@@ -2526,7 +2527,7 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
current->SetGraph(outer_graph);
outer_graph->AddBlock(current);
outer_graph->reverse_post_order_[++index_of_at] = current;
- UpdateLoopAndTryInformationOfNewBlock(current, at, /* replace_if_back_edge */ false);
+ UpdateLoopAndTryInformationOfNewBlock(current, at, /* replace_if_back_edge= */ false);
}
}
@@ -2536,7 +2537,7 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
outer_graph->reverse_post_order_[++index_of_at] = to;
// Only `to` can become a back edge, as the inlined blocks
// are predecessors of `to`.
- UpdateLoopAndTryInformationOfNewBlock(to, at, /* replace_if_back_edge */ true);
+ UpdateLoopAndTryInformationOfNewBlock(to, at, /* replace_if_back_edge= */ true);
// Update all predecessors of the exit block (now the `to` block)
// to not `HReturn` but `HGoto` instead. Special case throwing blocks
@@ -2710,13 +2711,13 @@ void HGraph::TransformLoopHeaderForBCE(HBasicBlock* header) {
DCHECK((old_pre_header->GetLoopInformation() == nullptr) ||
!old_pre_header->GetLoopInformation()->IsBackEdge(*old_pre_header));
UpdateLoopAndTryInformationOfNewBlock(
- if_block, old_pre_header, /* replace_if_back_edge */ false);
+ if_block, old_pre_header, /* replace_if_back_edge= */ false);
UpdateLoopAndTryInformationOfNewBlock(
- true_block, old_pre_header, /* replace_if_back_edge */ false);
+ true_block, old_pre_header, /* replace_if_back_edge= */ false);
UpdateLoopAndTryInformationOfNewBlock(
- false_block, old_pre_header, /* replace_if_back_edge */ false);
+ false_block, old_pre_header, /* replace_if_back_edge= */ false);
UpdateLoopAndTryInformationOfNewBlock(
- new_pre_header, old_pre_header, /* replace_if_back_edge */ false);
+ new_pre_header, old_pre_header, /* replace_if_back_edge= */ false);
}
HBasicBlock* HGraph::TransformLoopForVectorization(HBasicBlock* header,
@@ -3180,4 +3181,77 @@ std::ostream& operator<<(std::ostream& os, const MemBarrierKind& kind) {
}
}
+// Check that intrinsic enum values fit within space set aside in ArtMethod modifier flags.
+#define CHECK_INTRINSICS_ENUM_VALUES(Name, InvokeType, _, SideEffects, Exceptions, ...) \
+ static_assert( \
+ static_cast<uint32_t>(Intrinsics::k ## Name) <= (kAccIntrinsicBits >> CTZ(kAccIntrinsicBits)), \
+ "Instrinsics enumeration space overflow.");
+#include "intrinsics_list.h"
+ INTRINSICS_LIST(CHECK_INTRINSICS_ENUM_VALUES)
+#undef INTRINSICS_LIST
+#undef CHECK_INTRINSICS_ENUM_VALUES
+
+// Function that returns whether an intrinsic needs an environment or not.
+static inline IntrinsicNeedsEnvironmentOrCache NeedsEnvironmentOrCacheIntrinsic(Intrinsics i) {
+ switch (i) {
+ case Intrinsics::kNone:
+ return kNeedsEnvironmentOrCache; // Non-sensical for intrinsic.
+#define OPTIMIZING_INTRINSICS(Name, InvokeType, NeedsEnvOrCache, SideEffects, Exceptions, ...) \
+ case Intrinsics::k ## Name: \
+ return NeedsEnvOrCache;
+#include "intrinsics_list.h"
+ INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
+#undef INTRINSICS_LIST
+#undef OPTIMIZING_INTRINSICS
+ }
+ return kNeedsEnvironmentOrCache;
+}
+
+// Function that returns whether an intrinsic has side effects.
+static inline IntrinsicSideEffects GetSideEffectsIntrinsic(Intrinsics i) {
+ switch (i) {
+ case Intrinsics::kNone:
+ return kAllSideEffects;
+#define OPTIMIZING_INTRINSICS(Name, InvokeType, NeedsEnvOrCache, SideEffects, Exceptions, ...) \
+ case Intrinsics::k ## Name: \
+ return SideEffects;
+#include "intrinsics_list.h"
+ INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
+#undef INTRINSICS_LIST
+#undef OPTIMIZING_INTRINSICS
+ }
+ return kAllSideEffects;
+}
+
+// Function that returns whether an intrinsic can throw exceptions.
+static inline IntrinsicExceptions GetExceptionsIntrinsic(Intrinsics i) {
+ switch (i) {
+ case Intrinsics::kNone:
+ return kCanThrow;
+#define OPTIMIZING_INTRINSICS(Name, InvokeType, NeedsEnvOrCache, SideEffects, Exceptions, ...) \
+ case Intrinsics::k ## Name: \
+ return Exceptions;
+#include "intrinsics_list.h"
+ INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
+#undef INTRINSICS_LIST
+#undef OPTIMIZING_INTRINSICS
+ }
+ return kCanThrow;
+}
+
+void HInvoke::SetResolvedMethod(ArtMethod* method) {
+ // TODO: b/65872996 The intent is that polymorphic signature methods should
+ // be compiler intrinsics. At present, they are only interpreter intrinsics.
+ if (method != nullptr &&
+ method->IsIntrinsic() &&
+ !method->IsPolymorphicSignature()) {
+ Intrinsics intrinsic = static_cast<Intrinsics>(method->GetIntrinsic());
+ SetIntrinsic(intrinsic,
+ NeedsEnvironmentOrCacheIntrinsic(intrinsic),
+ GetSideEffectsIntrinsic(intrinsic),
+ GetExceptionsIntrinsic(intrinsic));
+ }
+ resolved_method_ = method;
+}
+
} // namespace art
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 68f1a2406a..c70674b0ad 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -26,9 +26,11 @@
#include "base/arena_object.h"
#include "base/array_ref.h"
#include "base/iteration_range.h"
+#include "base/mutex.h"
#include "base/quasi_atomic.h"
#include "base/stl_util.h"
#include "base/transform_array_ref.h"
+#include "art_method.h"
#include "data_type.h"
#include "deoptimization_kind.h"
#include "dex/dex_file.h"
@@ -128,6 +130,7 @@ enum GraphAnalysisResult {
kAnalysisInvalidBytecode,
kAnalysisFailThrowCatchLoop,
kAnalysisFailAmbiguousArrayOp,
+ kAnalysisFailIrreducibleLoopAndStringInit,
kAnalysisSuccess,
};
@@ -314,6 +317,7 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
uint32_t method_idx,
InstructionSet instruction_set,
InvokeType invoke_type = kInvalidInvokeType,
+ bool dead_reference_safe = false,
bool debuggable = false,
bool osr = false,
int start_instruction_id = 0)
@@ -333,6 +337,7 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
has_simd_(false),
has_loops_(false),
has_irreducible_loops_(false),
+ dead_reference_safe_(dead_reference_safe),
debuggable_(debuggable),
current_instruction_id_(start_instruction_id),
dex_file_(dex_file),
@@ -523,6 +528,12 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
has_bounds_checks_ = value;
}
+ // Is the code known to be robust against eliminating dead references
+ // and the effects of early finalization?
+ bool IsDeadReferenceSafe() const { return dead_reference_safe_; }
+
+ void MarkDeadReferenceUnsafe() { dead_reference_safe_ = false; }
+
bool IsDebuggable() const { return debuggable_; }
// Returns a constant of the given type and value. If it does not exist
@@ -701,6 +712,14 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
// so there might be false positives.
bool has_irreducible_loops_;
+ // Is the code known to be robust against eliminating dead references
+ // and the effects of early finalization? If false, dead reference variables
+ // are kept if they might be visible to the garbage collector.
+ // Currently this means that the class was declared to be dead-reference-safe,
+ // the method accesses no reachability-sensitive fields or data, and the same
+ // is true for any methods that were inlined into the current one.
+ bool dead_reference_safe_;
+
// Indicates whether the graph should be compiled in a way that
// ensures full debuggability. If false, we can apply more
// aggressive optimizations that may limit the level of debugging.
@@ -892,7 +911,7 @@ class TryCatchInformation : public ArenaObject<kArenaAllocTryCatchInfo> {
explicit TryCatchInformation(const HTryBoundary& try_entry)
: try_entry_(&try_entry),
catch_dex_file_(nullptr),
- catch_type_index_(DexFile::kDexNoIndex16) {
+ catch_type_index_(dex::TypeIndex::Invalid()) {
DCHECK(try_entry_ != nullptr);
}
@@ -911,9 +930,9 @@ class TryCatchInformation : public ArenaObject<kArenaAllocTryCatchInfo> {
bool IsCatchBlock() const { return catch_dex_file_ != nullptr; }
- bool IsCatchAllTypeIndex() const {
+ bool IsValidTypeIndex() const {
DCHECK(IsCatchBlock());
- return !catch_type_index_.IsValid();
+ return catch_type_index_.IsValid();
}
dex::TypeIndex GetCatchTypeIndex() const {
@@ -926,6 +945,10 @@ class TryCatchInformation : public ArenaObject<kArenaAllocTryCatchInfo> {
return *catch_dex_file_;
}
+ void SetInvalidTypeIndex() {
+ catch_type_index_ = dex::TypeIndex::Invalid();
+ }
+
private:
// One of possibly several TryBoundary instructions entering the block's try.
// Only set for try blocks.
@@ -933,7 +956,7 @@ class TryCatchInformation : public ArenaObject<kArenaAllocTryCatchInfo> {
// Exception type information. Only set for catch blocks.
const DexFile* catch_dex_file_;
- const dex::TypeIndex catch_type_index_;
+ dex::TypeIndex catch_type_index_;
};
static constexpr size_t kNoLifetime = -1;
@@ -1453,6 +1476,7 @@ class HLoopInformationOutwardIterator : public ValueObject {
M(VecSetScalars, VecOperation) \
M(VecMultiplyAccumulate, VecOperation) \
M(VecSADAccumulate, VecOperation) \
+ M(VecDotProd, VecOperation) \
M(VecLoad, VecMemoryOperation) \
M(VecStore, VecMemoryOperation) \
@@ -1494,6 +1518,14 @@ class HLoopInformationOutwardIterator : public ValueObject {
M(X86PackedSwitch, Instruction)
#endif
+#if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
+#define FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(M) \
+ M(X86AndNot, Instruction) \
+ M(X86MaskOrResetLeastSetBit, Instruction)
+#else
+#define FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(M)
+#endif
+
#define FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M)
#define FOR_EACH_CONCRETE_INSTRUCTION(M) \
@@ -1504,7 +1536,8 @@ class HLoopInformationOutwardIterator : public ValueObject {
FOR_EACH_CONCRETE_INSTRUCTION_MIPS(M) \
FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(M) \
FOR_EACH_CONCRETE_INSTRUCTION_X86(M) \
- FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M)
+ FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M) \
+ FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(M)
#define FOR_EACH_ABSTRACT_INSTRUCTION(M) \
M(Condition, BinaryOperation) \
@@ -3229,7 +3262,7 @@ class HDeoptimize final : public HVariableInputSizeInstruction {
SideEffects::All(),
dex_pc,
allocator,
- /* number_of_inputs */ 1,
+ /* number_of_inputs= */ 1,
kArenaAllocMisc) {
SetPackedFlag<kFieldCanBeMoved>(false);
SetPackedField<DeoptimizeKindField>(kind);
@@ -3254,7 +3287,7 @@ class HDeoptimize final : public HVariableInputSizeInstruction {
SideEffects::CanTriggerGC(),
dex_pc,
allocator,
- /* number_of_inputs */ 2,
+ /* number_of_inputs= */ 2,
kArenaAllocMisc) {
SetPackedFlag<kFieldCanBeMoved>(true);
SetPackedField<DeoptimizeKindField>(kind);
@@ -4322,7 +4355,7 @@ class HInvoke : public HVariableInputSizeInstruction {
bool IsIntrinsic() const { return intrinsic_ != Intrinsics::kNone; }
ArtMethod* GetResolvedMethod() const { return resolved_method_; }
- void SetResolvedMethod(ArtMethod* method) { resolved_method_ = method; }
+ void SetResolvedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
DECLARE_ABSTRACT_INSTRUCTION(Invoke);
@@ -4354,12 +4387,14 @@ class HInvoke : public HVariableInputSizeInstruction {
number_of_arguments + number_of_other_inputs,
kArenaAllocInvokeInputs),
number_of_arguments_(number_of_arguments),
- resolved_method_(resolved_method),
dex_method_index_(dex_method_index),
intrinsic_(Intrinsics::kNone),
intrinsic_optimizations_(0) {
SetPackedField<InvokeTypeField>(invoke_type);
SetPackedFlag<kFlagCanThrow>(true);
+ // Check mutator lock, constructors lack annotalysis support.
+ Locks::mutator_lock_->AssertNotExclusiveHeld(Thread::Current());
+ SetResolvedMethod(resolved_method);
}
DEFAULT_COPY_CONSTRUCTOR(Invoke);
@@ -4384,7 +4419,7 @@ class HInvokeUnresolved final : public HInvoke {
: HInvoke(kInvokeUnresolved,
allocator,
number_of_arguments,
- 0u /* number_of_other_inputs */,
+ /* number_of_other_inputs= */ 0u,
return_type,
dex_pc,
dex_method_index,
@@ -4410,7 +4445,7 @@ class HInvokePolymorphic final : public HInvoke {
: HInvoke(kInvokePolymorphic,
allocator,
number_of_arguments,
- 0u /* number_of_other_inputs */,
+ /* number_of_other_inputs= */ 0u,
return_type,
dex_pc,
dex_method_index,
@@ -4436,11 +4471,11 @@ class HInvokeCustom final : public HInvoke {
: HInvoke(kInvokeCustom,
allocator,
number_of_arguments,
- /* number_of_other_inputs */ 0u,
+ /* number_of_other_inputs= */ 0u,
return_type,
dex_pc,
- /* dex_method_index */ dex::kDexNoIndex,
- /* resolved_method */ nullptr,
+ /* dex_method_index= */ dex::kDexNoIndex,
+ /* resolved_method= */ nullptr,
kStatic),
call_site_index_(call_site_index) {
}
@@ -4533,8 +4568,7 @@ class HInvokeStaticOrDirect final : public HInvoke {
allocator,
number_of_arguments,
// There is potentially one extra argument for the HCurrentMethod node, and
- // potentially one other if the clinit check is explicit, and potentially
- // one other if the method is a string factory.
+ // potentially one other if the clinit check is explicit.
(NeedsCurrentMethodInput(dispatch_info.method_load_kind) ? 1u : 0u) +
(clinit_check_requirement == ClinitCheckRequirement::kExplicit ? 1u : 0u),
return_type,
@@ -4845,10 +4879,11 @@ class HNeg final : public HUnaryOperation {
class HNewArray final : public HExpression<2> {
public:
- HNewArray(HInstruction* cls, HInstruction* length, uint32_t dex_pc)
+ HNewArray(HInstruction* cls, HInstruction* length, uint32_t dex_pc, size_t component_size_shift)
: HExpression(kNewArray, DataType::Type::kReference, SideEffects::CanTriggerGC(), dex_pc) {
SetRawInputAt(0, cls);
SetRawInputAt(1, length);
+ SetPackedField<ComponentSizeShiftField>(component_size_shift);
}
bool IsClonable() const override { return true; }
@@ -4870,10 +4905,23 @@ class HNewArray final : public HExpression<2> {
return InputAt(1);
}
+ size_t GetComponentSizeShift() {
+ return GetPackedField<ComponentSizeShiftField>();
+ }
+
DECLARE_INSTRUCTION(NewArray);
protected:
DEFAULT_COPY_CONSTRUCTOR(NewArray);
+
+ private:
+ static constexpr size_t kFieldComponentSizeShift = kNumberOfGenericPackedBits;
+ static constexpr size_t kFieldComponentSizeShiftSize = MinimumBitsToStore(3u);
+ static constexpr size_t kNumberOfNewArrayPackedBits =
+ kFieldComponentSizeShift + kFieldComponentSizeShiftSize;
+ static_assert(kNumberOfNewArrayPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
+ using ComponentSizeShiftField =
+ BitField<size_t, kFieldComponentSizeShift, kFieldComponentSizeShift>;
};
class HAdd final : public HBinaryOperation {
@@ -5656,6 +5704,10 @@ class HTypeConversion final : public HExpression<1> {
bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
return true;
}
+ // Return whether the conversion is implicit. This includes conversion to the same type.
+ bool IsImplicitConversion() const {
+ return DataType::IsTypeConversionImplicit(GetInputType(), GetResultType());
+ }
// Try to statically evaluate the conversion and return a HConstant
// containing the result. If the input cannot be converted, return nullptr.
@@ -5862,7 +5914,7 @@ class HArrayGet final : public HExpression<2> {
type,
SideEffects::ArrayReadOfType(type),
dex_pc,
- /* is_string_char_at */ false) {
+ /* is_string_char_at= */ false) {
}
HArrayGet(HInstruction* array,
@@ -6136,6 +6188,9 @@ class HBoundsCheck final : public HExpression<2> {
private:
static constexpr size_t kFlagIsStringCharAt = kNumberOfGenericPackedBits;
+ static constexpr size_t kNumberOfBoundsCheckPackedBits = kFlagIsStringCharAt + 1;
+ static_assert(kNumberOfBoundsCheckPackedBits <= HInstruction::kMaxNumberOfPackedBits,
+ "Too many packed fields.");
};
class HSuspendCheck final : public HExpression<0> {
@@ -6301,7 +6356,7 @@ class HLoadClass final : public HInstruction {
ReferenceTypeInfo GetLoadedClassRTI() {
if (GetPackedFlag<kFlagValidLoadedClassRTI>()) {
// Note: The is_exact flag from the return value should not be used.
- return ReferenceTypeInfo::CreateUnchecked(klass_, /* is_exact */ true);
+ return ReferenceTypeInfo::CreateUnchecked(klass_, /* is_exact= */ true);
} else {
return ReferenceTypeInfo::CreateInvalid();
}
@@ -7054,7 +7109,7 @@ class HTypeCheckInstruction : public HVariableInputSizeInstruction {
side_effects,
dex_pc,
allocator,
- /* number_of_inputs */ check_kind == TypeCheckKind::kBitstringCheck ? 4u : 2u,
+ /* number_of_inputs= */ check_kind == TypeCheckKind::kBitstringCheck ? 4u : 2u,
kArenaAllocTypeCheckInputs),
klass_(klass) {
SetPackedField<TypeCheckKindField>(check_kind);
@@ -7110,7 +7165,7 @@ class HTypeCheckInstruction : public HVariableInputSizeInstruction {
ReferenceTypeInfo GetTargetClassRTI() {
if (GetPackedFlag<kFlagValidTargetClassRTI>()) {
// Note: The is_exact flag from the return value should not be used.
- return ReferenceTypeInfo::CreateUnchecked(klass_, /* is_exact */ true);
+ return ReferenceTypeInfo::CreateUnchecked(klass_, /* is_exact= */ true);
} else {
return ReferenceTypeInfo::CreateInvalid();
}
@@ -7377,7 +7432,7 @@ class HMemoryBarrier final : public HExpression<0> {
// }
//
// See also:
-// * CompilerDriver::RequiresConstructorBarrier
+// * DexCompilationUnit::RequiresConstructorBarrier
// * QuasiAtomic::ThreadFenceForConstructor
//
class HConstructorFence final : public HVariableInputSizeInstruction {
@@ -7423,7 +7478,7 @@ class HConstructorFence final : public HVariableInputSizeInstruction {
SideEffects::AllReads(),
dex_pc,
allocator,
- /* number_of_inputs */ 1,
+ /* number_of_inputs= */ 1,
kArenaAllocConstructorFenceInputs) {
DCHECK(fence_object != nullptr);
SetRawInputAt(0, fence_object);
@@ -7741,7 +7796,7 @@ class HIntermediateAddress final : public HExpression<2> {
#ifdef ART_ENABLE_CODEGEN_mips
#include "nodes_mips.h"
#endif
-#ifdef ART_ENABLE_CODEGEN_x86
+#if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
#include "nodes_x86.h"
#endif
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index c7539f2846..efe4d6b000 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -207,7 +207,7 @@ class HVecUnaryOperation : public HVecOperation {
allocator,
packed_type,
SideEffects::None(),
- /* number_of_inputs */ 1,
+ /* number_of_inputs= */ 1,
vector_length,
dex_pc) {
SetRawInputAt(0, input);
@@ -235,7 +235,7 @@ class HVecBinaryOperation : public HVecOperation {
allocator,
packed_type,
SideEffects::None(),
- /* number_of_inputs */ 2,
+ /* number_of_inputs= */ 2,
vector_length,
dex_pc) {
SetRawInputAt(0, left);
@@ -384,21 +384,21 @@ class HVecReduce final : public HVecUnaryOperation {
HInstruction* input,
DataType::Type packed_type,
size_t vector_length,
- ReductionKind kind,
+ ReductionKind reduction_kind,
uint32_t dex_pc)
: HVecUnaryOperation(kVecReduce, allocator, input, packed_type, vector_length, dex_pc),
- kind_(kind) {
+ reduction_kind_(reduction_kind) {
DCHECK(HasConsistentPackedTypes(input, packed_type));
}
- ReductionKind GetKind() const { return kind_; }
+ ReductionKind GetReductionKind() const { return reduction_kind_; }
bool CanBeMoved() const override { return true; }
bool InstructionDataEquals(const HInstruction* other) const override {
DCHECK(other->IsVecReduce());
const HVecReduce* o = other->AsVecReduce();
- return HVecOperation::InstructionDataEquals(o) && GetKind() == o->GetKind();
+ return HVecOperation::InstructionDataEquals(o) && GetReductionKind() == o->GetReductionKind();
}
DECLARE_INSTRUCTION(VecReduce);
@@ -407,7 +407,7 @@ class HVecReduce final : public HVecUnaryOperation {
DEFAULT_COPY_CONSTRUCTOR(VecReduce);
private:
- const ReductionKind kind_;
+ const ReductionKind reduction_kind_;
};
// Converts every component in the vector,
@@ -948,7 +948,7 @@ class HVecMultiplyAccumulate final : public HVecOperation {
allocator,
packed_type,
SideEffects::None(),
- /* number_of_inputs */ 3,
+ /* number_of_inputs= */ 3,
vector_length,
dex_pc),
op_kind_(op) {
@@ -1002,7 +1002,7 @@ class HVecSADAccumulate final : public HVecOperation {
allocator,
packed_type,
SideEffects::None(),
- /* number_of_inputs */ 3,
+ /* number_of_inputs= */ 3,
vector_length,
dex_pc) {
DCHECK(HasConsistentPackedTypes(accumulator, packed_type));
@@ -1021,6 +1021,66 @@ class HVecSADAccumulate final : public HVecOperation {
DEFAULT_COPY_CONSTRUCTOR(VecSADAccumulate);
};
+// Performs dot product of two vectors and adds the result to wider precision components in
+// the accumulator.
+//
+// viz. DOT_PRODUCT([ a1, .. , am], [ x1, .. , xn ], [ y1, .. , yn ]) =
+// [ a1 + sum(xi * yi), .. , am + sum(xj * yj) ],
+// for m <= n, non-overlapping sums,
+// for either both signed or both unsigned operands x, y.
+//
+// Notes:
+// - packed type reflects the type of sum reduction, not the type of the operands.
+// - IsZeroExtending() is used to determine the kind of signed/zero extension to be
+// performed for the operands.
+//
+// TODO: Support types other than kInt32 for packed type.
+class HVecDotProd final : public HVecOperation {
+ public:
+ HVecDotProd(ArenaAllocator* allocator,
+ HInstruction* accumulator,
+ HInstruction* left,
+ HInstruction* right,
+ DataType::Type packed_type,
+ bool is_zero_extending,
+ size_t vector_length,
+ uint32_t dex_pc)
+ : HVecOperation(kVecDotProd,
+ allocator,
+ packed_type,
+ SideEffects::None(),
+ /* number_of_inputs= */ 3,
+ vector_length,
+ dex_pc) {
+ DCHECK(HasConsistentPackedTypes(accumulator, packed_type));
+ DCHECK(DataType::IsIntegralType(packed_type));
+ DCHECK(left->IsVecOperation());
+ DCHECK(right->IsVecOperation());
+ DCHECK_EQ(ToSignedType(left->AsVecOperation()->GetPackedType()),
+ ToSignedType(right->AsVecOperation()->GetPackedType()));
+ SetRawInputAt(0, accumulator);
+ SetRawInputAt(1, left);
+ SetRawInputAt(2, right);
+ SetPackedFlag<kFieldHDotProdIsZeroExtending>(is_zero_extending);
+ }
+
+ bool IsZeroExtending() const { return GetPackedFlag<kFieldHDotProdIsZeroExtending>(); }
+
+ bool CanBeMoved() const override { return true; }
+
+ DECLARE_INSTRUCTION(VecDotProd);
+
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecDotProd);
+
+ private:
+ // Additional packed bits.
+ static constexpr size_t kFieldHDotProdIsZeroExtending =
+ HVecOperation::kNumberOfVectorOpPackedBits;
+ static constexpr size_t kNumberOfHDotProdPackedBits = kFieldHDotProdIsZeroExtending + 1;
+ static_assert(kNumberOfHDotProdPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
+};
+
// Loads a vector from memory, viz. load(mem, 1)
// yield the vector [ mem(1), .. , mem(n) ].
class HVecLoad final : public HVecMemoryOperation {
@@ -1037,7 +1097,7 @@ class HVecLoad final : public HVecMemoryOperation {
allocator,
packed_type,
side_effects,
- /* number_of_inputs */ 2,
+ /* number_of_inputs= */ 2,
vector_length,
dex_pc) {
SetRawInputAt(0, base);
@@ -1083,7 +1143,7 @@ class HVecStore final : public HVecMemoryOperation {
allocator,
packed_type,
side_effects,
- /* number_of_inputs */ 3,
+ /* number_of_inputs= */ 3,
vector_length,
dex_pc) {
DCHECK(HasConsistentPackedTypes(value, packed_type));
diff --git a/compiler/optimizing/nodes_vector_test.cc b/compiler/optimizing/nodes_vector_test.cc
index af13449646..b0a665d704 100644
--- a/compiler/optimizing/nodes_vector_test.cc
+++ b/compiler/optimizing/nodes_vector_test.cc
@@ -401,9 +401,9 @@ TEST_F(NodesVectorTest, VectorKindMattersOnReduce) {
EXPECT_TRUE(v2->CanBeMoved());
EXPECT_TRUE(v3->CanBeMoved());
- EXPECT_EQ(HVecReduce::kSum, v1->GetKind());
- EXPECT_EQ(HVecReduce::kMin, v2->GetKind());
- EXPECT_EQ(HVecReduce::kMax, v3->GetKind());
+ EXPECT_EQ(HVecReduce::kSum, v1->GetReductionKind());
+ EXPECT_EQ(HVecReduce::kMin, v2->GetReductionKind());
+ EXPECT_EQ(HVecReduce::kMax, v3->GetReductionKind());
EXPECT_TRUE(v1->Equals(v1));
EXPECT_TRUE(v2->Equals(v2));
diff --git a/compiler/optimizing/nodes_x86.h b/compiler/optimizing/nodes_x86.h
index a55110426b..8e8fbc1581 100644
--- a/compiler/optimizing/nodes_x86.h
+++ b/compiler/optimizing/nodes_x86.h
@@ -128,6 +128,92 @@ class HX86PackedSwitch final : public HExpression<2> {
const int32_t num_entries_;
};
+class HX86AndNot final : public HBinaryOperation {
+ public:
+ HX86AndNot(DataType::Type result_type,
+ HInstruction* left,
+ HInstruction* right,
+ uint32_t dex_pc = kNoDexPc)
+ : HBinaryOperation(kX86AndNot, result_type, left, right, SideEffects::None(), dex_pc) {
+ }
+
+ bool IsCommutative() const override { return false; }
+
+ template <typename T> static T Compute(T x, T y) { return ~x & y; }
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ }
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const override {
+ return GetBlock()->GetGraph()->GetLongConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ }
+ HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
+ HFloatConstant* y ATTRIBUTE_UNUSED) const override {
+ LOG(FATAL) << DebugName() << " is not defined for float values";
+ UNREACHABLE();
+ }
+ HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const override {
+ LOG(FATAL) << DebugName() << " is not defined for double values";
+ UNREACHABLE();
+ }
+
+ DECLARE_INSTRUCTION(X86AndNot);
+
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(X86AndNot);
+};
+
+class HX86MaskOrResetLeastSetBit final : public HUnaryOperation {
+ public:
+ HX86MaskOrResetLeastSetBit(DataType::Type result_type, InstructionKind op,
+ HInstruction* input, uint32_t dex_pc = kNoDexPc)
+ : HUnaryOperation(kX86MaskOrResetLeastSetBit, result_type, input, dex_pc),
+ op_kind_(op) {
+ DCHECK_EQ(result_type, DataType::Kind(input->GetType()));
+ DCHECK(op == HInstruction::kAnd || op == HInstruction::kXor) << op;
+ }
+ template <typename T>
+ auto Compute(T x) const -> decltype(x & (x-1)) {
+ static_assert(std::is_same<decltype(x & (x-1)), decltype(x ^(x-1))>::value,
+ "Inconsistent bitwise types");
+ switch (op_kind_) {
+ case HInstruction::kAnd:
+ return x & (x-1);
+ case HInstruction::kXor:
+ return x ^ (x-1);
+ default:
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ }
+ }
+
+ HConstant* Evaluate(HIntConstant* x) const override {
+ return GetBlock()->GetGraph()->GetIntConstant(Compute(x->GetValue()), GetDexPc());
+ }
+ HConstant* Evaluate(HLongConstant* x) const override {
+ return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue()), GetDexPc());
+ }
+ HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED) const override {
+ LOG(FATAL) << DebugName() << "is not defined for float values";
+ UNREACHABLE();
+ }
+ HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED) const override {
+ LOG(FATAL) << DebugName() << "is not defined for double values";
+ UNREACHABLE();
+ }
+ InstructionKind GetOpKind() const { return op_kind_; }
+
+ DECLARE_INSTRUCTION(X86MaskOrResetLeastSetBit);
+
+ protected:
+ const InstructionKind op_kind_;
+
+ DEFAULT_COPY_CONSTRUCTOR(X86MaskOrResetLeastSetBit);
+};
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_NODES_X86_H_
diff --git a/compiler/optimizing/optimization.cc b/compiler/optimizing/optimization.cc
index 142ddb5fbb..8864a12301 100644
--- a/compiler/optimizing/optimization.cc
+++ b/compiler/optimizing/optimization.cc
@@ -28,10 +28,14 @@
#endif
#ifdef ART_ENABLE_CODEGEN_x86
#include "pc_relative_fixups_x86.h"
+#include "instruction_simplifier_x86.h"
#endif
#if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
#include "x86_memory_gen.h"
#endif
+#ifdef ART_ENABLE_CODEGEN_x86_64
+#include "instruction_simplifier_x86_64.h"
+#endif
#include "bounds_check_elimination.h"
#include "cha_guard_optimization.h"
@@ -84,14 +88,10 @@ const char* OptimizationPassName(OptimizationPass pass) {
return HDeadCodeElimination::kDeadCodeEliminationPassName;
case OptimizationPass::kInliner:
return HInliner::kInlinerPassName;
- case OptimizationPass::kSharpening:
- return HSharpening::kSharpeningPassName;
case OptimizationPass::kSelectGenerator:
return HSelectGenerator::kSelectGeneratorPassName;
case OptimizationPass::kInstructionSimplifier:
return InstructionSimplifier::kInstructionSimplifierPassName;
- case OptimizationPass::kIntrinsicsRecognizer:
- return IntrinsicsRecognizer::kIntrinsicsRecognizerPassName;
case OptimizationPass::kCHAGuardOptimization:
return CHAGuardOptimization::kCHAGuardOptimizationPassName;
case OptimizationPass::kCodeSinking:
@@ -117,6 +117,12 @@ const char* OptimizationPassName(OptimizationPass pass) {
#ifdef ART_ENABLE_CODEGEN_x86
case OptimizationPass::kPcRelativeFixupsX86:
return x86::PcRelativeFixups::kPcRelativeFixupsX86PassName;
+ case OptimizationPass::kInstructionSimplifierX86:
+ return x86::InstructionSimplifierX86::kInstructionSimplifierX86PassName;
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86_64
+ case OptimizationPass::kInstructionSimplifierX86_64:
+ return x86_64::InstructionSimplifierX86_64::kInstructionSimplifierX86_64PassName;
#endif
#if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
case OptimizationPass::kX86MemoryOperandGeneration:
@@ -141,14 +147,12 @@ OptimizationPass OptimizationPassByName(const std::string& pass_name) {
X(OptimizationPass::kInductionVarAnalysis);
X(OptimizationPass::kInliner);
X(OptimizationPass::kInstructionSimplifier);
- X(OptimizationPass::kIntrinsicsRecognizer);
X(OptimizationPass::kInvariantCodeMotion);
X(OptimizationPass::kLoadStoreAnalysis);
X(OptimizationPass::kLoadStoreElimination);
X(OptimizationPass::kLoopOptimization);
X(OptimizationPass::kScheduling);
X(OptimizationPass::kSelectGenerator);
- X(OptimizationPass::kSharpening);
X(OptimizationPass::kSideEffectsAnalysis);
#ifdef ART_ENABLE_CODEGEN_arm
X(OptimizationPass::kInstructionSimplifierArm);
@@ -177,7 +181,6 @@ ArenaVector<HOptimization*> ConstructOptimizations(
HGraph* graph,
OptimizingCompilerStats* stats,
CodeGenerator* codegen,
- CompilerDriver* driver,
const DexCompilationUnit& dex_compilation_unit,
VariableSizedHandleScope* handles) {
ArenaVector<HOptimization*> optimizations(allocator->Adapter());
@@ -254,28 +257,21 @@ ArenaVector<HOptimization*> ConstructOptimizations(
codegen,
dex_compilation_unit, // outer_compilation_unit
dex_compilation_unit, // outermost_compilation_unit
- driver,
handles,
stats,
accessor.RegistersSize(),
- /* total_number_of_instructions */ 0,
- /* parent */ nullptr,
- /* depth */ 0,
+ /* total_number_of_instructions= */ 0,
+ /* parent= */ nullptr,
+ /* depth= */ 0,
pass_name);
break;
}
- case OptimizationPass::kSharpening:
- opt = new (allocator) HSharpening(graph, codegen, pass_name);
- break;
case OptimizationPass::kSelectGenerator:
opt = new (allocator) HSelectGenerator(graph, handles, stats, pass_name);
break;
case OptimizationPass::kInstructionSimplifier:
opt = new (allocator) InstructionSimplifier(graph, codegen, stats, pass_name);
break;
- case OptimizationPass::kIntrinsicsRecognizer:
- opt = new (allocator) IntrinsicsRecognizer(graph, stats, pass_name);
- break;
case OptimizationPass::kCHAGuardOptimization:
opt = new (allocator) CHAGuardOptimization(graph, pass_name);
break;
@@ -323,6 +319,14 @@ ArenaVector<HOptimization*> ConstructOptimizations(
DCHECK(alt_name == nullptr) << "arch-specific pass does not support alternative name";
opt = new (allocator) x86::X86MemoryOperandGeneration(graph, codegen, stats);
break;
+ case OptimizationPass::kInstructionSimplifierX86:
+ opt = new (allocator) x86::InstructionSimplifierX86(graph, codegen, stats);
+ break;
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86_64
+ case OptimizationPass::kInstructionSimplifierX86_64:
+ opt = new (allocator) x86_64::InstructionSimplifierX86_64(graph, codegen, stats);
+ break;
#endif
case OptimizationPass::kNone:
LOG(FATAL) << "kNone does not represent an actual pass";
diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h
index 88b283cebf..ce44b5f81a 100644
--- a/compiler/optimizing/optimization.h
+++ b/compiler/optimizing/optimization.h
@@ -77,14 +77,12 @@ enum class OptimizationPass {
kInductionVarAnalysis,
kInliner,
kInstructionSimplifier,
- kIntrinsicsRecognizer,
kInvariantCodeMotion,
kLoadStoreAnalysis,
kLoadStoreElimination,
kLoopOptimization,
kScheduling,
kSelectGenerator,
- kSharpening,
kSideEffectsAnalysis,
#ifdef ART_ENABLE_CODEGEN_arm
kInstructionSimplifierArm,
@@ -98,6 +96,10 @@ enum class OptimizationPass {
#endif
#ifdef ART_ENABLE_CODEGEN_x86
kPcRelativeFixupsX86,
+ kInstructionSimplifierX86,
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86_64
+ kInstructionSimplifierX86_64,
#endif
#if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
kX86MemoryOperandGeneration,
@@ -145,7 +147,6 @@ ArenaVector<HOptimization*> ConstructOptimizations(
HGraph* graph,
OptimizingCompilerStats* stats,
CodeGenerator* codegen,
- CompilerDriver* driver,
const DexCompilationUnit& dex_compilation_unit,
VariableSizedHandleScope* handles);
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index be1f7ea5b4..a52031cced 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -128,7 +128,7 @@ class OptimizingCFITest : public CFITest, public OptimizingUnitTestHelper {
public:
InternalCodeAllocator() {}
- virtual uint8_t* Allocate(size_t size) {
+ uint8_t* Allocate(size_t size) override {
memory_.resize(size);
return memory_.data();
}
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 0a747053cf..e8f8d32525 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -26,6 +26,7 @@
#include "base/arena_allocator.h"
#include "base/arena_containers.h"
#include "base/dumpable.h"
+#include "base/logging.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "base/scoped_arena_allocator.h"
@@ -79,7 +80,7 @@ class CodeVectorAllocator final : public CodeAllocator {
explicit CodeVectorAllocator(ArenaAllocator* allocator)
: memory_(allocator->Adapter(kArenaAllocCodeBuffer)) {}
- virtual uint8_t* Allocate(size_t size) {
+ uint8_t* Allocate(size_t size) override {
memory_.resize(size);
return &memory_[0];
}
@@ -161,7 +162,7 @@ class PassObserver : public ValueObject {
VLOG(compiler) << "Starting pass: " << pass_name;
// Dump graph first, then start timer.
if (visualizer_enabled_) {
- visualizer_.DumpGraph(pass_name, /* is_after_pass */ false, graph_in_bad_state_);
+ visualizer_.DumpGraph(pass_name, /* is_after_pass= */ false, graph_in_bad_state_);
FlushVisualizer();
}
if (timing_logger_enabled_) {
@@ -183,7 +184,7 @@ class PassObserver : public ValueObject {
timing_logger_.EndTiming();
}
if (visualizer_enabled_) {
- visualizer_.DumpGraph(pass_name, /* is_after_pass */ true, graph_in_bad_state_);
+ visualizer_.DumpGraph(pass_name, /* is_after_pass= */ true, graph_in_bad_state_);
FlushVisualizer();
}
@@ -271,7 +272,7 @@ class OptimizingCompiler final : public Compiler {
bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file) const override;
- CompiledMethod* Compile(const DexFile::CodeItem* code_item,
+ CompiledMethod* Compile(const dex::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
uint16_t class_def_idx,
@@ -298,6 +299,7 @@ class OptimizingCompiler final : public Compiler {
bool JitCompile(Thread* self,
jit::JitCodeCache* code_cache,
ArtMethod* method,
+ bool baseline,
bool osr,
jit::JitLogger* jit_logger)
override
@@ -319,7 +321,6 @@ class OptimizingCompiler final : public Compiler {
graph,
compilation_stats_.get(),
codegen,
- GetCompilerDriver(),
dex_compilation_unit,
handles);
DCHECK_EQ(length, optimizations.size());
@@ -369,7 +370,7 @@ class OptimizingCompiler final : public Compiler {
CompiledMethod* Emit(ArenaAllocator* allocator,
CodeVectorAllocator* code_allocator,
CodeGenerator* codegen,
- const DexFile::CodeItem* item) const;
+ const dex::CodeItem* item) const;
// Try compiling a method and return the code generator used for
// compiling it.
@@ -383,6 +384,7 @@ class OptimizingCompiler final : public Compiler {
CodeVectorAllocator* code_allocator,
const DexCompilationUnit& dex_compilation_unit,
ArtMethod* method,
+ bool baseline,
bool osr,
VariableSizedHandleScope* handles) const;
@@ -399,7 +401,14 @@ class OptimizingCompiler final : public Compiler {
PassObserver* pass_observer,
VariableSizedHandleScope* handles) const;
- void GenerateJitDebugInfo(ArtMethod* method, debug::MethodDebugInfo method_debug_info)
+ bool RunBaselineOptimizations(HGraph* graph,
+ CodeGenerator* codegen,
+ const DexCompilationUnit& dex_compilation_unit,
+ PassObserver* pass_observer,
+ VariableSizedHandleScope* handles) const;
+
+ void GenerateJitDebugInfo(ArtMethod* method,
+ const debug::MethodDebugInfo& method_debug_info)
REQUIRES_SHARED(Locks::mutator_lock_);
std::unique_ptr<OptimizingCompilerStats> compilation_stats_;
@@ -456,6 +465,48 @@ static bool IsInstructionSetSupported(InstructionSet instruction_set) {
|| instruction_set == InstructionSet::kX86_64;
}
+bool OptimizingCompiler::RunBaselineOptimizations(HGraph* graph,
+ CodeGenerator* codegen,
+ const DexCompilationUnit& dex_compilation_unit,
+ PassObserver* pass_observer,
+ VariableSizedHandleScope* handles) const {
+ switch (codegen->GetCompilerOptions().GetInstructionSet()) {
+#ifdef ART_ENABLE_CODEGEN_mips
+ case InstructionSet::kMips: {
+ OptimizationDef mips_optimizations[] = {
+ OptDef(OptimizationPass::kPcRelativeFixupsMips)
+ };
+ return RunOptimizations(graph,
+ codegen,
+ dex_compilation_unit,
+ pass_observer,
+ handles,
+ mips_optimizations);
+ }
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86
+ case InstructionSet::kX86: {
+ OptimizationDef x86_optimizations[] = {
+ OptDef(OptimizationPass::kPcRelativeFixupsX86),
+ };
+ return RunOptimizations(graph,
+ codegen,
+ dex_compilation_unit,
+ pass_observer,
+ handles,
+ x86_optimizations);
+ }
+#endif
+ default:
+ UNUSED(graph);
+ UNUSED(codegen);
+ UNUSED(dex_compilation_unit);
+ UNUSED(pass_observer);
+ UNUSED(handles);
+ return false;
+ }
+}
+
bool OptimizingCompiler::RunArchOptimizations(HGraph* graph,
CodeGenerator* codegen,
const DexCompilationUnit& dex_compilation_unit,
@@ -528,6 +579,7 @@ bool OptimizingCompiler::RunArchOptimizations(HGraph* graph,
#ifdef ART_ENABLE_CODEGEN_x86
case InstructionSet::kX86: {
OptimizationDef x86_optimizations[] = {
+ OptDef(OptimizationPass::kInstructionSimplifierX86),
OptDef(OptimizationPass::kSideEffectsAnalysis),
OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
OptDef(OptimizationPass::kPcRelativeFixupsX86),
@@ -544,6 +596,7 @@ bool OptimizingCompiler::RunArchOptimizations(HGraph* graph,
#ifdef ART_ENABLE_CODEGEN_x86_64
case InstructionSet::kX86_64: {
OptimizationDef x86_64_optimizations[] = {
+ OptDef(OptimizationPass::kInstructionSimplifierX86_64),
OptDef(OptimizationPass::kSideEffectsAnalysis),
OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
OptDef(OptimizationPass::kX86MemoryOperandGeneration)
@@ -623,8 +676,6 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph,
OptimizationDef optimizations[] = {
// Initial optimizations.
- OptDef(OptimizationPass::kIntrinsicsRecognizer),
- OptDef(OptimizationPass::kSharpening),
OptDef(OptimizationPass::kConstantFolding),
OptDef(OptimizationPass::kInstructionSimplifier),
OptDef(OptimizationPass::kDeadCodeElimination,
@@ -709,12 +760,12 @@ static ArenaVector<linker::LinkerPatch> EmitAndSortLinkerPatches(CodeGenerator*
CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* allocator,
CodeVectorAllocator* code_allocator,
CodeGenerator* codegen,
- const DexFile::CodeItem* code_item_for_osr_check) const {
+ const dex::CodeItem* code_item_for_osr_check) const {
ArenaVector<linker::LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen);
ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item_for_osr_check);
CompiledMethod* compiled_method = CompiledMethod::SwapAllocCompiledMethod(
- GetCompilerDriver(),
+ GetCompilerDriver()->GetCompiledMethodStorage(),
codegen->GetInstructionSet(),
code_allocator->GetMemory(),
ArrayRef<const uint8_t>(stack_map),
@@ -739,6 +790,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
CodeVectorAllocator* code_allocator,
const DexCompilationUnit& dex_compilation_unit,
ArtMethod* method,
+ bool baseline,
bool osr,
VariableSizedHandleScope* handles) const {
MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptBytecodeCompilation);
@@ -747,7 +799,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
InstructionSet instruction_set = compiler_options.GetInstructionSet();
const DexFile& dex_file = *dex_compilation_unit.GetDexFile();
uint32_t method_idx = dex_compilation_unit.GetDexMethodIndex();
- const DexFile::CodeItem* code_item = dex_compilation_unit.GetCodeItem();
+ const dex::CodeItem* code_item = dex_compilation_unit.GetCodeItem();
// Always use the Thumb-2 assembler: some runtime functionality
// (like implicit stack overflow checks) assume Thumb-2.
@@ -776,6 +828,29 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
}
CodeItemDebugInfoAccessor code_item_accessor(dex_file, code_item, method_idx);
+
+ bool dead_reference_safe;
+ ArrayRef<const uint8_t> interpreter_metadata;
+ // For AOT compilation, we may not get a method, for example if its class is erroneous,
+ // possibly due to an unavailable superclass. JIT should always have a method.
+ DCHECK(Runtime::Current()->IsAotCompiler() || method != nullptr);
+ if (method != nullptr) {
+ const dex::ClassDef* containing_class;
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ containing_class = &method->GetClassDef();
+ interpreter_metadata = method->GetQuickenedInfo();
+ }
+ // MethodContainsRSensitiveAccess is currently slow, but HasDeadReferenceSafeAnnotation()
+ // is currently rarely true.
+ dead_reference_safe =
+ annotations::HasDeadReferenceSafeAnnotation(dex_file, *containing_class)
+ && !annotations::MethodContainsRSensitiveAccess(dex_file, *containing_class, method_idx);
+ } else {
+ // If we could not resolve the class, conservatively assume it's dead-reference unsafe.
+ dead_reference_safe = false;
+ }
+
HGraph* graph = new (allocator) HGraph(
allocator,
arena_stack,
@@ -783,17 +858,12 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
method_idx,
compiler_options.GetInstructionSet(),
kInvalidInvokeType,
+ dead_reference_safe,
compiler_driver->GetCompilerOptions().GetDebuggable(),
- osr);
+ /* osr= */ osr);
- ArrayRef<const uint8_t> interpreter_metadata;
- // For AOT compilation, we may not get a method, for example if its class is erroneous.
- // JIT should always have a method.
- DCHECK(Runtime::Current()->IsAotCompiler() || method != nullptr);
if (method != nullptr) {
graph->SetArtMethod(method);
- ScopedObjectAccess soa(Thread::Current());
- interpreter_metadata = method->GetQuickenedInfo();
}
std::unique_ptr<CodeGenerator> codegen(
@@ -820,7 +890,6 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
code_item_accessor,
&dex_compilation_unit,
&dex_compilation_unit,
- compiler_driver,
codegen.get(),
compilation_stats_.get(),
interpreter_metadata,
@@ -848,6 +917,11 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
MethodCompilationStat::kNotCompiledAmbiguousArrayOp);
break;
}
+ case kAnalysisFailIrreducibleLoopAndStringInit: {
+ MaybeRecordStat(compilation_stats_.get(),
+ MethodCompilationStat::kNotCompiledIrreducibleLoopAndStringInit);
+ break;
+ }
case kAnalysisSuccess:
UNREACHABLE();
}
@@ -856,11 +930,11 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
}
}
- RunOptimizations(graph,
- codegen.get(),
- dex_compilation_unit,
- &pass_observer,
- handles);
+ if (baseline) {
+ RunBaselineOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer, handles);
+ } else {
+ RunOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer, handles);
+ }
RegisterAllocator::Strategy regalloc_strategy =
compiler_options.GetRegisterAllocationStrategy();
@@ -905,10 +979,11 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
arena_stack,
dex_file,
method_idx,
- compiler_driver->GetCompilerOptions().GetInstructionSet(),
+ compiler_options.GetInstructionSet(),
kInvalidInvokeType,
- compiler_driver->GetCompilerOptions().GetDebuggable(),
- /* osr */ false);
+ /* dead_reference_safe= */ true, // Intrinsics don't affect dead reference safety.
+ compiler_options.GetDebuggable(),
+ /* osr= */ false);
DCHECK(Runtime::Current()->IsAotCompiler());
DCHECK(method != nullptr);
@@ -936,18 +1011,16 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
CodeItemDebugInfoAccessor(), // Null code item.
&dex_compilation_unit,
&dex_compilation_unit,
- compiler_driver,
codegen.get(),
compilation_stats_.get(),
- /* interpreter_metadata */ ArrayRef<const uint8_t>(),
+ /* interpreter_metadata= */ ArrayRef<const uint8_t>(),
handles);
builder.BuildIntrinsicGraph(method);
}
OptimizationDef optimizations[] = {
- OptDef(OptimizationPass::kIntrinsicsRecognizer),
- // Some intrinsics are converted to HIR by the simplifier and the codegen also
- // has a few assumptions that only the instruction simplifier can satisfy.
+ // The codegen has a few assumptions that only the instruction simplifier
+ // can satisfy.
OptDef(OptimizationPass::kInstructionSimplifier),
};
RunOptimizations(graph,
@@ -979,7 +1052,7 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
return codegen.release();
}
-CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
+CompiledMethod* OptimizingCompiler::Compile(const dex::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
uint16_t class_def_idx,
@@ -988,12 +1061,13 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache) const {
CompilerDriver* compiler_driver = GetCompilerDriver();
+ const CompilerOptions& compiler_options = compiler_driver->GetCompilerOptions();
CompiledMethod* compiled_method = nullptr;
Runtime* runtime = Runtime::Current();
DCHECK(runtime->IsAotCompiler());
- const VerifiedMethod* verified_method = compiler_driver->GetVerifiedMethod(&dex_file, method_idx);
+ const VerifiedMethod* verified_method = compiler_options.GetVerifiedMethod(&dex_file, method_idx);
DCHECK(!verified_method->HasRuntimeThrow());
- if (compiler_driver->IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file) ||
+ if (compiler_options.IsMethodVerifiedWithoutFailures(method_idx, class_def_idx, dex_file) ||
verifier::CanCompilerHandleVerificationFailure(
verified_method->GetEncounteredVerificationFailures())) {
ArenaAllocator allocator(runtime->GetArenaPool());
@@ -1002,6 +1076,15 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
std::unique_ptr<CodeGenerator> codegen;
bool compiled_intrinsic = false;
{
+ ScopedObjectAccess soa(Thread::Current());
+ ArtMethod* method =
+ runtime->GetClassLinker()->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
+ method_idx, dex_cache, jclass_loader, /*referrer=*/ nullptr, invoke_type);
+ DCHECK_EQ(method == nullptr, soa.Self()->IsExceptionPending());
+ soa.Self()->ClearException(); // Suppress exception if any.
+ VariableSizedHandleScope handles(soa.Self());
+ Handle<mirror::Class> compiling_class =
+ handles.NewHandle(method != nullptr ? method->GetDeclaringClass() : nullptr);
DexCompilationUnit dex_compilation_unit(
jclass_loader,
runtime->GetClassLinker(),
@@ -1010,16 +1093,13 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
class_def_idx,
method_idx,
access_flags,
- /* verified_method */ nullptr, // Not needed by the Optimizing compiler.
- dex_cache);
- ScopedObjectAccess soa(Thread::Current());
- ArtMethod* method = compiler_driver->ResolveMethod(
- soa, dex_cache, jclass_loader, &dex_compilation_unit, method_idx, invoke_type);
- VariableSizedHandleScope handles(soa.Self());
+ /*verified_method=*/ nullptr, // Not needed by the Optimizing compiler.
+ dex_cache,
+ compiling_class);
// Go to native so that we don't block GC during compilation.
ScopedThreadSuspension sts(soa.Self(), kNative);
if (method != nullptr && UNLIKELY(method->IsIntrinsic())) {
- DCHECK(compiler_driver->GetCompilerOptions().IsBootImage());
+ DCHECK(compiler_options.IsBootImage());
codegen.reset(
TryCompileIntrinsic(&allocator,
&arena_stack,
@@ -1038,7 +1118,8 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
&code_allocator,
dex_compilation_unit,
method,
- /* osr */ false,
+ compiler_options.IsBaseline(),
+ /* osr= */ false,
&handles));
}
}
@@ -1066,7 +1147,7 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
}
} else {
MethodCompilationStat method_stat;
- if (compiler_driver->GetCompilerOptions().VerifyAtRuntime()) {
+ if (compiler_options.VerifyAtRuntime()) {
method_stat = MethodCompilationStat::kNotCompiledVerifyAtRuntime;
} else {
method_stat = MethodCompilationStat::kNotCompiledVerificationError;
@@ -1075,8 +1156,8 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
}
if (kIsDebugBuild &&
- IsCompilingWithCoreImage() &&
- IsInstructionSetSupported(compiler_driver->GetCompilerOptions().GetInstructionSet())) {
+ compiler_options.CompilingWithCoreImage() &&
+ IsInstructionSetSupported(compiler_options.GetInstructionSet())) {
// For testing purposes, we put a special marker on method names
// that should be compiled with this compiler (when the
// instruction set is supported). This makes sure we're not
@@ -1099,7 +1180,7 @@ static ScopedArenaVector<uint8_t> CreateJniStackMap(ScopedArenaAllocator* alloca
jni_compiled_method.GetFrameSize(),
jni_compiled_method.GetCoreSpillMask(),
jni_compiled_method.GetFpSpillMask(),
- /* num_dex_registers */ 0);
+ /* num_dex_registers= */ 0);
stack_map_stream->EndMethod();
return stack_map_stream->Encode();
}
@@ -1116,21 +1197,23 @@ CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
if (compiler_options.IsBootImage()) {
ScopedObjectAccess soa(Thread::Current());
ArtMethod* method = runtime->GetClassLinker()->LookupResolvedMethod(
- method_idx, dex_cache.Get(), /* class_loader */ nullptr);
+ method_idx, dex_cache.Get(), /*class_loader=*/ nullptr);
if (method != nullptr && UNLIKELY(method->IsIntrinsic())) {
+ VariableSizedHandleScope handles(soa.Self());
ScopedNullHandle<mirror::ClassLoader> class_loader; // null means boot class path loader.
+ Handle<mirror::Class> compiling_class = handles.NewHandle(method->GetDeclaringClass());
DexCompilationUnit dex_compilation_unit(
class_loader,
runtime->GetClassLinker(),
dex_file,
- /* code_item */ nullptr,
- /* class_def_idx */ DexFile::kDexNoIndex16,
+ /*code_item=*/ nullptr,
+ /*class_def_idx=*/ DexFile::kDexNoIndex16,
method_idx,
access_flags,
- /* verified_method */ nullptr,
- dex_cache);
+ /*verified_method=*/ nullptr,
+ dex_cache,
+ compiling_class);
CodeVectorAllocator code_allocator(&allocator);
- VariableSizedHandleScope handles(soa.Self());
// Go to native so that we don't block GC during compilation.
ScopedThreadSuspension sts(soa.Self(), kNative);
std::unique_ptr<CodeGenerator> codegen(
@@ -1144,7 +1227,7 @@ CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
CompiledMethod* compiled_method = Emit(&allocator,
&code_allocator,
codegen.get(),
- /* code_item_for_osr_check */ nullptr);
+ /* item= */ nullptr);
compiled_method->MarkAsIntrinsic();
return compiled_method;
}
@@ -1159,45 +1242,27 @@ CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(&stack_map_allocator,
jni_compiled_method);
return CompiledMethod::SwapAllocCompiledMethod(
- GetCompilerDriver(),
+ GetCompilerDriver()->GetCompiledMethodStorage(),
jni_compiled_method.GetInstructionSet(),
jni_compiled_method.GetCode(),
ArrayRef<const uint8_t>(stack_map),
jni_compiled_method.GetCfi(),
- /* patches */ ArrayRef<const linker::LinkerPatch>());
+ /* patches= */ ArrayRef<const linker::LinkerPatch>());
}
Compiler* CreateOptimizingCompiler(CompilerDriver* driver) {
return new OptimizingCompiler(driver);
}
-bool IsCompilingWithCoreImage() {
- const std::string& image = Runtime::Current()->GetImageLocation();
- return CompilerDriver::IsCoreImageFilename(image);
-}
-
bool EncodeArtMethodInInlineInfo(ArtMethod* method ATTRIBUTE_UNUSED) {
// Note: the runtime is null only for unit testing.
return Runtime::Current() == nullptr || !Runtime::Current()->IsAotCompiler();
}
-bool CanEncodeInlinedMethodInStackMap(const DexFile& caller_dex_file, ArtMethod* callee) {
- if (!Runtime::Current()->IsAotCompiler()) {
- // JIT can always encode methods in stack maps.
- return true;
- }
- if (IsSameDexFile(caller_dex_file, *callee->GetDexFile())) {
- return true;
- }
- // TODO(ngeoffray): Support more AOT cases for inlining:
- // - methods in multidex
- // - methods in boot image for on-device non-PIC compilation.
- return false;
-}
-
bool OptimizingCompiler::JitCompile(Thread* self,
jit::JitCodeCache* code_cache,
ArtMethod* method,
+ bool baseline,
bool osr,
jit::JitLogger* jit_logger) {
StackHandleScope<3> hs(self);
@@ -1208,7 +1273,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
const DexFile* dex_file = method->GetDexFile();
const uint16_t class_def_idx = method->GetClassDefIndex();
- const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
+ const dex::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
const uint32_t method_idx = method->GetDexMethodIndex();
const uint32_t access_flags = method->GetAccessFlags();
@@ -1219,7 +1284,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions();
JniCompiledMethod jni_compiled_method = ArtQuickJniCompileMethod(
compiler_options, access_flags, method_idx, *dex_file);
- ScopedNullHandle<mirror::ObjectArray<mirror::Object>> roots;
+ std::vector<Handle<mirror::Object>> roots;
ArenaSet<ArtMethod*, std::less<ArtMethod*>> cha_single_implementation_list(
allocator.Adapter(kArenaAllocCHA));
ArenaStack arena_stack(runtime->GetJitArenaPool());
@@ -1231,7 +1296,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
uint8_t* roots_data = nullptr;
uint32_t data_size = code_cache->ReserveData(self,
stack_map.size(),
- /* number_of_roots */ 0,
+ /* number_of_roots= */ 0,
method,
&stack_map_data,
&roots_data);
@@ -1251,7 +1316,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
data_size,
osr,
roots,
- /* has_should_deoptimize_flag */ false,
+ /* has_should_deoptimize_flag= */ false,
cha_single_implementation_list);
if (code == nullptr) {
return false;
@@ -1293,6 +1358,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
std::unique_ptr<CodeGenerator> codegen;
{
+ Handle<mirror::Class> compiling_class = handles.NewHandle(method->GetDeclaringClass());
DexCompilationUnit dex_compilation_unit(
class_loader,
runtime->GetClassLinker(),
@@ -1301,8 +1367,9 @@ bool OptimizingCompiler::JitCompile(Thread* self,
class_def_idx,
method_idx,
access_flags,
- /* verified_method */ nullptr,
- dex_cache);
+ /*verified_method=*/ nullptr,
+ dex_cache,
+ compiling_class);
// Go to native so that we don't block GC during compilation.
ScopedThreadSuspension sts(self, kNative);
@@ -1312,6 +1379,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
&code_allocator,
dex_compilation_unit,
method,
+ baseline,
osr,
&handles));
if (codegen.get() == nullptr) {
@@ -1321,19 +1389,6 @@ bool OptimizingCompiler::JitCompile(Thread* self,
ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item);
size_t number_of_roots = codegen->GetNumberOfJitRoots();
- // We allocate an object array to ensure the JIT roots that we will collect in EmitJitRoots
- // will be visible by the GC between EmitLiterals and CommitCode. Once CommitCode is
- // executed, this array is not needed.
- Handle<mirror::ObjectArray<mirror::Object>> roots(
- hs.NewHandle(mirror::ObjectArray<mirror::Object>::Alloc(
- self, GetClassRoot<mirror::ObjectArray<mirror::Object>>(), number_of_roots)));
- if (roots == nullptr) {
- // Out of memory, just clear the exception to avoid any Java exception uncaught problems.
- MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
- DCHECK(self->IsExceptionPending());
- self->ClearException();
- return false;
- }
uint8_t* stack_map_data = nullptr;
uint8_t* roots_data = nullptr;
uint32_t data_size = code_cache->ReserveData(self,
@@ -1347,7 +1402,14 @@ bool OptimizingCompiler::JitCompile(Thread* self,
return false;
}
memcpy(stack_map_data, stack_map.data(), stack_map.size());
- codegen->EmitJitRoots(code_allocator.GetData(), roots, roots_data);
+ std::vector<Handle<mirror::Object>> roots;
+ codegen->EmitJitRoots(code_allocator.GetData(), roots_data, &roots);
+ // The root Handle<>s filled by the codegen reference entries in the VariableSizedHandleScope.
+ DCHECK(std::all_of(roots.begin(),
+ roots.end(),
+ [&handles](Handle<mirror::Object> root){
+ return handles.Contains(root.GetReference());
+ }));
const void* code = code_cache->CommitCode(
self,
@@ -1413,26 +1475,31 @@ bool OptimizingCompiler::JitCompile(Thread* self,
return true;
}
-void OptimizingCompiler::GenerateJitDebugInfo(ArtMethod* method, debug::MethodDebugInfo info) {
+void OptimizingCompiler::GenerateJitDebugInfo(ArtMethod* method ATTRIBUTE_UNUSED,
+ const debug::MethodDebugInfo& info) {
const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions();
DCHECK(compiler_options.GenerateAnyDebugInfo());
-
- // If both flags are passed, generate full debug info.
- const bool mini_debug_info = !compiler_options.GetGenerateDebugInfo();
-
- // Create entry for the single method that we just compiled.
- std::vector<uint8_t> elf_file = debug::MakeElfFileForJIT(
- compiler_options.GetInstructionSet(),
- compiler_options.GetInstructionSetFeatures(),
- mini_debug_info,
- ArrayRef<const debug::MethodDebugInfo>(&info, 1));
- MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
- AddNativeDebugInfoForJit(reinterpret_cast<const void*>(info.code_address), elf_file);
-
- VLOG(jit)
- << "JIT mini-debug-info added for " << ArtMethod::PrettyMethod(method)
- << " size=" << PrettySize(elf_file.size())
- << " total_size=" << PrettySize(GetJitNativeDebugInfoMemUsage());
+ TimingLogger logger("Generate JIT debug info logger", true, VLOG_IS_ON(jit));
+ {
+ TimingLogger::ScopedTiming st("Generate JIT debug info", &logger);
+
+ // If both flags are passed, generate full debug info.
+ const bool mini_debug_info = !compiler_options.GetGenerateDebugInfo();
+
+ // Create entry for the single method that we just compiled.
+ std::vector<uint8_t> elf_file = debug::MakeElfFileForJIT(
+ compiler_options.GetInstructionSet(),
+ compiler_options.GetInstructionSetFeatures(),
+ mini_debug_info,
+ info);
+ AddNativeDebugInfoForJit(Thread::Current(),
+ reinterpret_cast<const void*>(info.code_address),
+ elf_file,
+ debug::PackElfFileForJIT,
+ compiler_options.GetInstructionSet(),
+ compiler_options.GetInstructionSetFeatures());
+ }
+ Runtime::Current()->GetJit()->AddTimingLogger(logger);
}
} // namespace art
diff --git a/compiler/optimizing/optimizing_compiler.h b/compiler/optimizing/optimizing_compiler.h
index 6ee9c70fdb..f5279e83eb 100644
--- a/compiler/optimizing/optimizing_compiler.h
+++ b/compiler/optimizing/optimizing_compiler.h
@@ -29,14 +29,7 @@ class DexFile;
Compiler* CreateOptimizingCompiler(CompilerDriver* driver);
-// Returns whether we are compiling against a "core" image, which
-// is an indicative we are running tests. The compiler will use that
-// information for checking invariants.
-bool IsCompilingWithCoreImage();
-
bool EncodeArtMethodInInlineInfo(ArtMethod* method);
-bool CanEncodeInlinedMethodInStackMap(const DexFile& caller_dex_file, ArtMethod* callee)
- REQUIRES_SHARED(Locks::mutator_lock_);
} // namespace art
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index 9a26f2f6c4..ddd57f5f1a 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -22,9 +22,10 @@
#include <string>
#include <type_traits>
+#include <android-base/logging.h>
+
#include "base/atomic.h"
#include "base/globals.h"
-#include "base/logging.h" // For VLOG_IS_ON.
namespace art {
@@ -59,6 +60,7 @@ enum class MethodCompilationStat {
kNotCompiledUnsupportedIsa,
kNotCompiledVerificationError,
kNotCompiledVerifyAtRuntime,
+ kNotCompiledIrreducibleLoopAndStringInit,
kInlinedMonomorphicCall,
kInlinedPolymorphicCall,
kMonomorphicCall,
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index f903f82d50..e5f694109a 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -155,7 +155,7 @@ class OptimizingUnitTestHelper {
void* aligned_data = GetAllocator()->Alloc(code_item_size);
memcpy(aligned_data, &data[0], code_item_size);
CHECK_ALIGNED(aligned_data, StandardDexFile::CodeItem::kAlignment);
- const DexFile::CodeItem* code_item = reinterpret_cast<const DexFile::CodeItem*>(aligned_data);
+ const dex::CodeItem* code_item = reinterpret_cast<const dex::CodeItem*>(aligned_data);
{
ScopedObjectAccess soa(Thread::Current());
@@ -165,13 +165,13 @@ class OptimizingUnitTestHelper {
const DexCompilationUnit* dex_compilation_unit =
new (graph->GetAllocator()) DexCompilationUnit(
handles_->NewHandle<mirror::ClassLoader>(nullptr),
- /* class_linker */ nullptr,
+ /* class_linker= */ nullptr,
graph->GetDexFile(),
code_item,
- /* class_def_index */ DexFile::kDexNoIndex16,
- /* method_idx */ dex::kDexNoIndex,
- /* access_flags */ 0u,
- /* verified_method */ nullptr,
+ /* class_def_index= */ DexFile::kDexNoIndex16,
+ /* method_idx= */ dex::kDexNoIndex,
+ /* access_flags= */ 0u,
+ /* verified_method= */ nullptr,
handles_->NewHandle<mirror::DexCache>(nullptr));
CodeItemDebugInfoAccessor accessor(graph->GetDexFile(), code_item, /*dex_method_idx*/ 0u);
HGraphBuilder builder(graph, dex_compilation_unit, accessor, handles_.get(), return_type);
diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc
index 399a6d8cbd..a8ab6cdd0c 100644
--- a/compiler/optimizing/parallel_move_test.cc
+++ b/compiler/optimizing/parallel_move_test.cc
@@ -174,8 +174,8 @@ class ParallelMoveTest : public ::testing::Test {
template<> const bool ParallelMoveTest<TestParallelMoveResolverWithSwap>::has_swap = true;
template<> const bool ParallelMoveTest<TestParallelMoveResolverNoSwap>::has_swap = false;
-typedef ::testing::Types<TestParallelMoveResolverWithSwap, TestParallelMoveResolverNoSwap>
- ParallelMoveResolverTestTypes;
+using ParallelMoveResolverTestTypes =
+ ::testing::Types<TestParallelMoveResolverWithSwap, TestParallelMoveResolverNoSwap>;
TYPED_TEST_CASE(ParallelMoveTest, ParallelMoveResolverTestTypes);
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index 4b07d5b621..4ff293c46c 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -17,7 +17,6 @@
#include "pc_relative_fixups_x86.h"
#include "code_generator_x86.h"
#include "intrinsics_x86.h"
-#include "runtime.h"
namespace art {
namespace x86 {
@@ -239,7 +238,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
case Intrinsics::kIntegerValueOf:
// This intrinsic can be call free if it loads the address of the boot image object.
// If we're compiling PIC, we need the address base for loading from .data.bimg.rel.ro.
- if (Runtime::Current()->UseJitCompilation()) {
+ if (!codegen_->GetCompilerOptions().GetCompilePic()) {
break;
}
FALLTHROUGH_INTENDED;
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index fc81740013..fbdbf9d086 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -87,9 +87,9 @@ void PrepareForRegisterAllocation::VisitBoundsCheck(HBoundsCheck* check) {
if (GetGraph()->GetArtMethod() != char_at_method) {
ArenaAllocator* allocator = GetGraph()->GetAllocator();
HEnvironment* environment = new (allocator) HEnvironment(allocator,
- /* number_of_vregs */ 0u,
+ /* number_of_vregs= */ 0u,
char_at_method,
- /* dex_pc */ dex::kDexNoIndex,
+ /* dex_pc= */ dex::kDexNoIndex,
check);
check->InsertRawEnvironment(environment);
}
@@ -304,4 +304,13 @@ bool PrepareForRegisterAllocation::CanMoveClinitCheck(HInstruction* input,
return true;
}
+void PrepareForRegisterAllocation::VisitTypeConversion(HTypeConversion* instruction) {
+ // For simplicity, our code generators don't handle implicit type conversion, so ensure
+ // there are none before hitting codegen.
+ if (instruction->IsImplicitConversion()) {
+ instruction->ReplaceWith(instruction->GetInput());
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h
index a8ab256e27..e0bb76eb22 100644
--- a/compiler/optimizing/prepare_for_register_allocation.h
+++ b/compiler/optimizing/prepare_for_register_allocation.h
@@ -55,6 +55,7 @@ class PrepareForRegisterAllocation : public HGraphDelegateVisitor {
void VisitConstructorFence(HConstructorFence* constructor_fence) override;
void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override;
void VisitDeoptimize(HDeoptimize* deoptimize) override;
+ void VisitTypeConversion(HTypeConversion* instruction) override;
bool CanMoveClinitCheck(HInstruction* input, HInstruction* user) const;
bool CanEmitConditionAt(HCondition* condition, HInstruction* user) const;
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index a9d590232c..4929e0a3a1 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -114,9 +114,9 @@ class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor {
void VisitCheckCast(HCheckCast* instr) override;
void VisitBoundType(HBoundType* instr) override;
void VisitNullCheck(HNullCheck* instr) override;
- void VisitPhi(HPhi* phi);
+ void VisitPhi(HPhi* phi) override;
- void VisitBasicBlock(HBasicBlock* block);
+ void VisitBasicBlock(HBasicBlock* block) override;
void ProcessWorklist();
private:
@@ -278,7 +278,7 @@ static void BoundTypeIn(HInstruction* receiver,
if (ShouldCreateBoundType(
insert_point, receiver, class_rti, start_instruction, start_block)) {
bound_type = new (receiver->GetBlock()->GetGraph()->GetAllocator()) HBoundType(receiver);
- bound_type->SetUpperBound(class_rti, /* bound_can_be_null */ false);
+ bound_type->SetUpperBound(class_rti, /* can_be_null= */ false);
start_block->InsertInstructionBefore(bound_type, insert_point);
// To comply with the RTP algorithm, don't type the bound type just yet, it will
// be handled in RTPVisitor::VisitBoundType.
@@ -350,7 +350,7 @@ static void BoundTypeForClassCheck(HInstruction* check) {
HBasicBlock* trueBlock = compare->IsEqual()
? check->AsIf()->IfTrueSuccessor()
: check->AsIf()->IfFalseSuccessor();
- BoundTypeIn(receiver, trueBlock, /* start_instruction */ nullptr, class_rti);
+ BoundTypeIn(receiver, trueBlock, /* start_instruction= */ nullptr, class_rti);
} else {
DCHECK(check->IsDeoptimize());
if (compare->IsEqual() && check->AsDeoptimize()->GuardsAnInput()) {
@@ -427,9 +427,9 @@ void ReferenceTypePropagation::RTPVisitor::BoundTypeForIfNotNull(HBasicBlock* bl
: ifInstruction->IfFalseSuccessor();
ReferenceTypeInfo object_rti = ReferenceTypeInfo::Create(
- handle_cache_->GetObjectClassHandle(), /* is_exact */ false);
+ handle_cache_->GetObjectClassHandle(), /* is_exact= */ false);
- BoundTypeIn(obj, notNullBlock, /* start_instruction */ nullptr, object_rti);
+ BoundTypeIn(obj, notNullBlock, /* start_instruction= */ nullptr, object_rti);
}
// Returns true if one of the patterns below has been recognized. If so, the
@@ -538,10 +538,10 @@ void ReferenceTypePropagation::RTPVisitor::BoundTypeForIfInstanceOf(HBasicBlock*
{
ScopedObjectAccess soa(Thread::Current());
if (!class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes()) {
- class_rti = ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false);
+ class_rti = ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact= */ false);
}
}
- BoundTypeIn(obj, instanceOfTrueBlock, /* start_instruction */ nullptr, class_rti);
+ BoundTypeIn(obj, instanceOfTrueBlock, /* start_instruction= */ nullptr, class_rti);
}
void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* instr,
@@ -561,7 +561,7 @@ void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* inst
// Use a null loader, the target method is in a boot classpath dex file.
Handle<mirror::ClassLoader> loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
ArtMethod* method = cl->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
- dex_method_index, dex_cache, loader, /* referrer */ nullptr, kDirect);
+ dex_method_index, dex_cache, loader, /* referrer= */ nullptr, kDirect);
DCHECK(method != nullptr);
ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
DCHECK(declaring_class != nullptr);
@@ -571,7 +571,7 @@ void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* inst
<< "Expected String.<init>: " << method->PrettyMethod();
}
instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact */ true));
+ ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact= */ true));
} else if (IsAdmissible(klass)) {
ReferenceTypeInfo::TypeHandle handle = handle_cache_->NewHandle(klass);
is_exact = is_exact || handle->CannotBeAssignedFromOtherTypes();
@@ -600,12 +600,12 @@ void ReferenceTypePropagation::RTPVisitor::UpdateReferenceTypeInfo(HInstruction*
void ReferenceTypePropagation::RTPVisitor::VisitNewInstance(HNewInstance* instr) {
ScopedObjectAccess soa(Thread::Current());
- SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact */ true);
+ SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact= */ true);
}
void ReferenceTypePropagation::RTPVisitor::VisitNewArray(HNewArray* instr) {
ScopedObjectAccess soa(Thread::Current());
- SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact */ true);
+ SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact= */ true);
}
void ReferenceTypePropagation::RTPVisitor::VisitParameterValue(HParameterValue* instr) {
@@ -614,7 +614,7 @@ void ReferenceTypePropagation::RTPVisitor::VisitParameterValue(HParameterValue*
UpdateReferenceTypeInfo(instr,
instr->GetTypeIndex(),
instr->GetDexFile(),
- /* is_exact */ false);
+ /* is_exact= */ false);
}
}
@@ -632,7 +632,7 @@ void ReferenceTypePropagation::RTPVisitor::UpdateFieldAccessTypeInfo(HInstructio
klass = info.GetField()->LookupResolvedType();
}
- SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
+ SetClassAsTypeInfo(instr, klass, /* is_exact= */ false);
}
void ReferenceTypePropagation::RTPVisitor::VisitInstanceFieldGet(HInstanceFieldGet* instr) {
@@ -665,7 +665,7 @@ void ReferenceTypePropagation::RTPVisitor::VisitLoadClass(HLoadClass* instr) {
instr->SetValidLoadedClassRTI();
}
instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(handle_cache_->GetClassClassHandle(), /* is_exact */ true));
+ ReferenceTypeInfo::Create(handle_cache_->GetClassClassHandle(), /* is_exact= */ true));
}
void ReferenceTypePropagation::RTPVisitor::VisitInstanceOf(HInstanceOf* instr) {
@@ -682,31 +682,31 @@ void ReferenceTypePropagation::RTPVisitor::VisitClinitCheck(HClinitCheck* instr)
void ReferenceTypePropagation::RTPVisitor::VisitLoadMethodHandle(HLoadMethodHandle* instr) {
instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(
handle_cache_->GetMethodHandleClassHandle(),
- /* is_exact */ true));
+ /* is_exact= */ true));
}
void ReferenceTypePropagation::RTPVisitor::VisitLoadMethodType(HLoadMethodType* instr) {
instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(handle_cache_->GetMethodTypeClassHandle(), /* is_exact */ true));
+ ReferenceTypeInfo::Create(handle_cache_->GetMethodTypeClassHandle(), /* is_exact= */ true));
}
void ReferenceTypePropagation::RTPVisitor::VisitLoadString(HLoadString* instr) {
instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact */ true));
+ ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact= */ true));
}
void ReferenceTypePropagation::RTPVisitor::VisitLoadException(HLoadException* instr) {
DCHECK(instr->GetBlock()->IsCatchBlock());
TryCatchInformation* catch_info = instr->GetBlock()->GetTryCatchInformation();
- if (catch_info->IsCatchAllTypeIndex()) {
- instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(handle_cache_->GetThrowableClassHandle(), /* is_exact */ false));
- } else {
+ if (catch_info->IsValidTypeIndex()) {
UpdateReferenceTypeInfo(instr,
catch_info->GetCatchTypeIndex(),
catch_info->GetCatchDexFile(),
- /* is_exact */ false);
+ /* is_exact= */ false);
+ } else {
+ instr->SetReferenceTypeInfo(
+ ReferenceTypeInfo::Create(handle_cache_->GetThrowableClassHandle(), /* is_exact= */ false));
}
}
@@ -736,7 +736,7 @@ void ReferenceTypePropagation::RTPVisitor::VisitBoundType(HBoundType* instr) {
// bound type is dead. To not confuse potential other optimizations, we mark
// the bound as non-exact.
instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false));
+ ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact= */ false));
}
} else {
// Object not typed yet. Leave BoundType untyped for now rather than
@@ -914,7 +914,7 @@ void ReferenceTypePropagation::RTPVisitor::VisitInvoke(HInvoke* instr) {
ScopedObjectAccess soa(Thread::Current());
ArtMethod* method = instr->GetResolvedMethod();
ObjPtr<mirror::Class> klass = (method == nullptr) ? nullptr : method->LookupResolvedReturnType();
- SetClassAsTypeInfo(instr, klass, /* is_exact */ false);
+ SetClassAsTypeInfo(instr, klass, /* is_exact= */ false);
}
void ReferenceTypePropagation::RTPVisitor::VisitArrayGet(HArrayGet* instr) {
@@ -947,7 +947,7 @@ void ReferenceTypePropagation::RTPVisitor::UpdateBoundType(HBoundType* instr) {
// bound type is dead. To not confuse potential other optimizations, we mark
// the bound as non-exact.
instr->SetReferenceTypeInfo(
- ReferenceTypeInfo::Create(upper_bound_rti.GetTypeHandle(), /* is_exact */ false));
+ ReferenceTypeInfo::Create(upper_bound_rti.GetTypeHandle(), /* is_exact= */ false));
}
}
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
index 27f9ac3990..b1f0a1add9 100644
--- a/compiler/optimizing/register_allocation_resolver.cc
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -280,16 +280,16 @@ size_t RegisterAllocationResolver::CalculateMaximumSafepointSpillSize(
LocationSummary* locations = instruction->GetLocations();
if (locations->OnlyCallsOnSlowPath()) {
size_t core_spills =
- codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ true);
+ codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers= */ true);
size_t fp_spills =
- codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ false);
+ codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers= */ false);
size_t spill_size =
core_register_spill_size * core_spills + fp_register_spill_size * fp_spills;
maximum_safepoint_spill_size = std::max(maximum_safepoint_spill_size, spill_size);
} else if (locations->CallsOnMainAndSlowPath()) {
// Nothing to spill on the slow path if the main path already clobbers caller-saves.
- DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ true));
- DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ false));
+ DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers= */ true));
+ DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers= */ false));
}
}
return maximum_safepoint_spill_size;
diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc
index 1e00003701..0d6c5a3eff 100644
--- a/compiler/optimizing/register_allocator_linear_scan.cc
+++ b/compiler/optimizing/register_allocator_linear_scan.cc
@@ -252,7 +252,7 @@ void RegisterAllocatorLinearScan::ProcessInstruction(HInstruction* instruction)
temp_intervals_.push_back(interval);
interval->AddTempUse(instruction, i);
if (codegen_->NeedsTwoRegisters(DataType::Type::kFloat64)) {
- interval->AddHighInterval(/* is_temp */ true);
+ interval->AddHighInterval(/* is_temp= */ true);
LiveInterval* high = interval->GetHighInterval();
temp_intervals_.push_back(high);
unhandled_fp_intervals_.push_back(high);
@@ -284,7 +284,7 @@ void RegisterAllocatorLinearScan::ProcessInstruction(HInstruction* instruction)
}
if (locations->WillCall()) {
- BlockRegisters(position, position + 1, /* caller_save_only */ true);
+ BlockRegisters(position, position + 1, /* caller_save_only= */ true);
}
for (size_t i = 0; i < locations->GetInputCount(); ++i) {
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index db6a760007..79eb082cd7 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -68,11 +68,11 @@ class RegisterAllocatorTest : public OptimizingUnitTest {
bool ValidateIntervals(const ScopedArenaVector<LiveInterval*>& intervals,
const CodeGenerator& codegen) {
return RegisterAllocator::ValidateIntervals(ArrayRef<LiveInterval* const>(intervals),
- /* number_of_spill_slots */ 0u,
- /* number_of_out_slots */ 0u,
+ /* number_of_spill_slots= */ 0u,
+ /* number_of_out_slots= */ 0u,
codegen,
- /* processing_core_registers */ true,
- /* log_fatal_on_failure */ false);
+ /* processing_core_registers= */ true,
+ /* log_fatal_on_failure= */ false);
}
};
@@ -872,9 +872,9 @@ TEST_F(RegisterAllocatorTest, SpillInactive) {
// Create an interval with lifetime holes.
static constexpr size_t ranges1[][2] = {{0, 2}, {4, 6}, {8, 10}};
LiveInterval* first = BuildInterval(ranges1, arraysize(ranges1), GetScopedAllocator(), -1, one);
- first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 8));
- first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 7));
- first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 6));
+ first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 8));
+ first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 7));
+ first->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 6));
locations = new (GetAllocator()) LocationSummary(first->GetDefinedBy(), LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
@@ -895,9 +895,9 @@ TEST_F(RegisterAllocatorTest, SpillInactive) {
// before lifetime position 6 yet.
static constexpr size_t ranges3[][2] = {{2, 4}, {8, 10}};
LiveInterval* third = BuildInterval(ranges3, arraysize(ranges3), GetScopedAllocator(), -1, three);
- third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 8));
- third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 4));
- third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, false, 3));
+ third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 8));
+ third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 4));
+ third->uses_.push_front(*new (GetScopedAllocator()) UsePosition(user, 0u, 3));
locations = new (GetAllocator()) LocationSummary(third->GetDefinedBy(), LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
third = third->SplitAt(3);
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index df897a4904..fdef45ec8b 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -680,7 +680,7 @@ static void MoveAfterInBlock(HInstruction* instruction, HInstruction* cursor) {
DCHECK_NE(cursor, cursor->GetBlock()->GetLastInstruction());
DCHECK(!instruction->IsControlFlow());
DCHECK(!cursor->IsControlFlow());
- instruction->MoveBefore(cursor->GetNext(), /* do_checks */ false);
+ instruction->MoveBefore(cursor->GetNext(), /* do_checks= */ false);
}
void HScheduler::Schedule(HInstruction* instruction) {
diff --git a/compiler/optimizing/scheduler_arm.cc b/compiler/optimizing/scheduler_arm.cc
index d89d1171a1..858a555e97 100644
--- a/compiler/optimizing/scheduler_arm.cc
+++ b/compiler/optimizing/scheduler_arm.cc
@@ -563,7 +563,7 @@ void SchedulingLatencyVisitorARM::HandleGenerateDataProc(HDataProcWithShifterOp*
last_visited_internal_latency_ = kArmIntegerOpLatency;
last_visited_latency_ = kArmIntegerOpLatency;
} else {
- HandleGenerateDataProcInstruction(/* internal_latency */ true);
+ HandleGenerateDataProcInstruction(/* internal_latency= */ true);
HandleGenerateDataProcInstruction();
}
}
@@ -585,8 +585,8 @@ void SchedulingLatencyVisitorARM::HandleGenerateLongDataProc(HDataProcWithShifte
DCHECK_LT(shift_value, 32U);
if (kind == HInstruction::kOr || kind == HInstruction::kXor) {
- HandleGenerateDataProcInstruction(/* internal_latency */ true);
- HandleGenerateDataProcInstruction(/* internal_latency */ true);
+ HandleGenerateDataProcInstruction(/* internal_latency= */ true);
+ HandleGenerateDataProcInstruction(/* internal_latency= */ true);
HandleGenerateDataProcInstruction();
} else {
last_visited_internal_latency_ += 2 * kArmIntegerOpLatency;
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index 981fcc42a7..e0e265a04c 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -148,7 +148,7 @@ class SchedulerTest : public OptimizingUnitTest {
SchedulingGraph scheduling_graph(scheduler,
GetScopedAllocator(),
- /* heap_location_collector */ nullptr);
+ /* heap_location_collector= */ nullptr);
// Instructions must be inserted in reverse order into the scheduling graph.
for (HInstruction* instr : ReverseRange(block_instructions)) {
scheduling_graph.AddNode(instr);
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 5c2f57e314..885a08d459 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -35,22 +35,6 @@
namespace art {
-bool HSharpening::Run() {
- // We don't care about the order of the blocks here.
- for (HBasicBlock* block : graph_->GetReversePostOrder()) {
- for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* instruction = it.Current();
- if (instruction->IsInvokeStaticOrDirect()) {
- SharpenInvokeStaticOrDirect(instruction->AsInvokeStaticOrDirect(), codegen_);
- }
- // TODO: Move the sharpening of invoke-virtual/-interface/-super from HGraphBuilder
- // here. Rewrite it to avoid the CompilerDriver's reliance on verifier data
- // because we know the type better when inlining.
- }
- }
- return true;
-}
-
static bool IsInBootImage(ArtMethod* method) {
const std::vector<gc::space::ImageSpace*>& image_spaces =
Runtime::Current()->GetHeap()->GetBootImageSpaces();
@@ -72,17 +56,14 @@ static bool BootImageAOTCanEmbedMethod(ArtMethod* method, const CompilerOptions&
return compiler_options.IsImageClass(dex_file.StringByTypeIdx(klass->GetDexTypeIndex()));
}
-void HSharpening::SharpenInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke,
- CodeGenerator* codegen) {
- if (invoke->IsStringInit()) {
- // Not using the dex cache arrays. But we could still try to use a better dispatch...
- // TODO: Use direct_method and direct_code for the appropriate StringFactory method.
- return;
+HInvokeStaticOrDirect::DispatchInfo HSharpening::SharpenInvokeStaticOrDirect(
+ ArtMethod* callee, CodeGenerator* codegen) {
+ if (kIsDebugBuild) {
+ ScopedObjectAccess soa(Thread::Current()); // Required for GetDeclaringClass below.
+ DCHECK(callee != nullptr);
+ DCHECK(!(callee->IsConstructor() && callee->GetDeclaringClass()->IsStringClass()));
}
- ArtMethod* callee = invoke->GetResolvedMethod();
- DCHECK(callee != nullptr);
-
HInvokeStaticOrDirect::MethodLoadKind method_load_kind;
HInvokeStaticOrDirect::CodePtrLocation code_ptr_location;
uint64_t method_load_data = 0u;
@@ -141,9 +122,7 @@ void HSharpening::SharpenInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke,
HInvokeStaticOrDirect::DispatchInfo desired_dispatch_info = {
method_load_kind, code_ptr_location, method_load_data
};
- HInvokeStaticOrDirect::DispatchInfo dispatch_info =
- codegen->GetSupportedInvokeStaticOrDirectDispatch(desired_dispatch_info, invoke);
- invoke->SetDispatchInfo(dispatch_info);
+ return codegen->GetSupportedInvokeStaticOrDirectDispatch(desired_dispatch_info, callee);
}
HLoadClass::LoadKind HSharpening::ComputeLoadClassKind(
@@ -254,7 +233,7 @@ static inline bool CanUseTypeCheckBitstring(ObjPtr<mirror::Class> klass, CodeGen
// Try to assign a type check bitstring.
MutexLock subtype_check_lock(Thread::Current(), *Locks::subtype_check_lock_);
- if ((false) && // FIXME: Inliner does not respect CompilerDriver::IsClassToCompile()
+ if ((false) && // FIXME: Inliner does not respect CompilerDriver::ShouldCompileMethod()
// and we're hitting an unassigned bitstring in dex2oat_image_test. b/26687569
kIsDebugBuild &&
codegen->GetCompilerOptions().IsBootImage() &&
diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h
index dc55eea683..b81867201f 100644
--- a/compiler/optimizing/sharpening.h
+++ b/compiler/optimizing/sharpening.h
@@ -25,24 +25,13 @@ namespace art {
class CodeGenerator;
class DexCompilationUnit;
-// Optimization that tries to improve the way we dispatch methods and access types,
-// fields, etc. Besides actual method sharpening based on receiver type (for example
-// virtual->direct), this includes selecting the best available dispatch for
-// invoke-static/-direct based on code generator support.
-class HSharpening : public HOptimization {
+// Utility methods that try to improve the way we dispatch methods, and access
+// types and strings.
+class HSharpening {
public:
- HSharpening(HGraph* graph,
- CodeGenerator* codegen,
- const char* name = kSharpeningPassName)
- : HOptimization(graph, name),
- codegen_(codegen) { }
-
- bool Run() override;
-
- static constexpr const char* kSharpeningPassName = "sharpening";
-
- // Used by Sharpening and InstructionSimplifier.
- static void SharpenInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke, CodeGenerator* codegen);
+ // Used by the builder and InstructionSimplifier.
+ static HInvokeStaticOrDirect::DispatchInfo SharpenInvokeStaticOrDirect(
+ ArtMethod* callee, CodeGenerator* codegen);
// Used by the builder and the inliner.
static HLoadClass::LoadKind ComputeLoadClassKind(HLoadClass* load_class,
@@ -61,9 +50,6 @@ class HSharpening : public HOptimization {
CodeGenerator* codegen,
const DexCompilationUnit& dex_compilation_unit,
VariableSizedHandleScope* handles);
-
- private:
- CodeGenerator* codegen_;
};
} // namespace art
diff --git a/compiler/optimizing/side_effects_test.cc b/compiler/optimizing/side_effects_test.cc
index 4b0be07f3b..cf26e79c69 100644
--- a/compiler/optimizing/side_effects_test.cc
+++ b/compiler/optimizing/side_effects_test.cc
@@ -141,13 +141,13 @@ TEST(SideEffectsTest, NoDependences) {
TEST(SideEffectsTest, VolatileDependences) {
SideEffects volatile_write =
- SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile */ true);
+ SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile= */ true);
SideEffects any_write =
- SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile */ false);
+ SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile= */ false);
SideEffects volatile_read =
- SideEffects::FieldReadOfType(DataType::Type::kInt8, /* is_volatile */ true);
+ SideEffects::FieldReadOfType(DataType::Type::kInt8, /* is_volatile= */ true);
SideEffects any_read =
- SideEffects::FieldReadOfType(DataType::Type::kInt8, /* is_volatile */ false);
+ SideEffects::FieldReadOfType(DataType::Type::kInt8, /* is_volatile= */ false);
EXPECT_FALSE(volatile_write.MayDependOn(any_read));
EXPECT_TRUE(any_read.MayDependOn(volatile_write));
@@ -163,15 +163,15 @@ TEST(SideEffectsTest, VolatileDependences) {
TEST(SideEffectsTest, SameWidthTypesNoAlias) {
// Type I/F.
testNoWriteAndReadDependence(
- SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile */ false),
- SideEffects::FieldReadOfType(DataType::Type::kFloat32, /* is_volatile */ false));
+ SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile= */ false),
+ SideEffects::FieldReadOfType(DataType::Type::kFloat32, /* is_volatile= */ false));
testNoWriteAndReadDependence(
SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
SideEffects::ArrayReadOfType(DataType::Type::kFloat32));
// Type L/D.
testNoWriteAndReadDependence(
- SideEffects::FieldWriteOfType(DataType::Type::kInt64, /* is_volatile */ false),
- SideEffects::FieldReadOfType(DataType::Type::kFloat64, /* is_volatile */ false));
+ SideEffects::FieldWriteOfType(DataType::Type::kInt64, /* is_volatile= */ false),
+ SideEffects::FieldReadOfType(DataType::Type::kFloat64, /* is_volatile= */ false));
testNoWriteAndReadDependence(
SideEffects::ArrayWriteOfType(DataType::Type::kInt64),
SideEffects::ArrayReadOfType(DataType::Type::kFloat64));
@@ -181,9 +181,9 @@ TEST(SideEffectsTest, AllWritesAndReads) {
SideEffects s = SideEffects::None();
// Keep taking the union of different writes and reads.
for (DataType::Type type : kTestTypes) {
- s = s.Union(SideEffects::FieldWriteOfType(type, /* is_volatile */ false));
+ s = s.Union(SideEffects::FieldWriteOfType(type, /* is_volatile= */ false));
s = s.Union(SideEffects::ArrayWriteOfType(type));
- s = s.Union(SideEffects::FieldReadOfType(type, /* is_volatile */ false));
+ s = s.Union(SideEffects::FieldReadOfType(type, /* is_volatile= */ false));
s = s.Union(SideEffects::ArrayReadOfType(type));
}
EXPECT_TRUE(s.DoesAllReadWrite());
@@ -254,10 +254,10 @@ TEST(SideEffectsTest, BitStrings) {
"||I|||||",
SideEffects::ArrayReadOfType(DataType::Type::kInt32).ToString().c_str());
SideEffects s = SideEffects::None();
- s = s.Union(SideEffects::FieldWriteOfType(DataType::Type::kUint16, /* is_volatile */ false));
- s = s.Union(SideEffects::FieldWriteOfType(DataType::Type::kInt64, /* is_volatile */ false));
+ s = s.Union(SideEffects::FieldWriteOfType(DataType::Type::kUint16, /* is_volatile= */ false));
+ s = s.Union(SideEffects::FieldWriteOfType(DataType::Type::kInt64, /* is_volatile= */ false));
s = s.Union(SideEffects::ArrayWriteOfType(DataType::Type::kInt16));
- s = s.Union(SideEffects::FieldReadOfType(DataType::Type::kInt32, /* is_volatile */ false));
+ s = s.Union(SideEffects::FieldReadOfType(DataType::Type::kInt32, /* is_volatile= */ false));
s = s.Union(SideEffects::ArrayReadOfType(DataType::Type::kFloat32));
s = s.Union(SideEffects::ArrayReadOfType(DataType::Type::kFloat64));
EXPECT_STREQ("||DF|I||S|JC|", s.ToString().c_str());
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index dda29a1b4b..0d0e1ecf1f 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -16,6 +16,9 @@
#include "ssa_builder.h"
+#include "base/arena_bit_vector.h"
+#include "base/bit_vector-inl.h"
+#include "base/logging.h"
#include "data_type-inl.h"
#include "dex/bytecode_utils.h"
#include "mirror/class-inl.h"
@@ -388,7 +391,7 @@ bool SsaBuilder::FixAmbiguousArrayOps() {
// succeed in code validated by the verifier.
HInstruction* equivalent = GetFloatOrDoubleEquivalent(value, array_type);
DCHECK(equivalent != nullptr);
- aset->ReplaceInput(equivalent, /* input_index */ 2);
+ aset->ReplaceInput(equivalent, /* index= */ 2);
if (equivalent->IsPhi()) {
// Returned equivalent is a phi which may not have had its inputs
// replaced yet. We need to run primitive type propagation on it.
@@ -415,85 +418,36 @@ bool SsaBuilder::FixAmbiguousArrayOps() {
return true;
}
-static bool HasAliasInEnvironments(HInstruction* instruction) {
- HEnvironment* last_user = nullptr;
+bool SsaBuilder::HasAliasInEnvironments(HInstruction* instruction) {
+ ScopedArenaHashSet<size_t> seen_users(
+ local_allocator_->Adapter(kArenaAllocGraphBuilder));
for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) {
DCHECK(use.GetUser() != nullptr);
- // Note: The first comparison (== null) always fails.
- if (use.GetUser() == last_user) {
+ size_t id = use.GetUser()->GetHolder()->GetId();
+ if (seen_users.find(id) != seen_users.end()) {
return true;
}
- last_user = use.GetUser();
- }
-
- if (kIsDebugBuild) {
- // Do a quadratic search to ensure same environment uses are next
- // to each other.
- const HUseList<HEnvironment*>& env_uses = instruction->GetEnvUses();
- for (auto current = env_uses.begin(), end = env_uses.end(); current != end; ++current) {
- auto next = current;
- for (++next; next != end; ++next) {
- DCHECK(next->GetUser() != current->GetUser());
- }
- }
+ seen_users.insert(id);
}
return false;
}
-void SsaBuilder::ReplaceUninitializedStringPhis() {
- ScopedArenaHashSet<HInstruction*> seen_instructions(
- local_allocator_->Adapter(kArenaAllocGraphBuilder));
- ScopedArenaVector<HInstruction*> worklist(local_allocator_->Adapter(kArenaAllocGraphBuilder));
-
- // Iterate over all inputs and uses of the phi, recursively, until all related instructions
- // have been visited.
- for (const auto& pair : uninitialized_string_phis_) {
- HPhi* string_phi = pair.first;
- HInvoke* invoke = pair.second;
- worklist.push_back(string_phi);
- HNewInstance* found_instance = nullptr;
- do {
- HInstruction* current = worklist.back();
- worklist.pop_back();
- if (seen_instructions.find(current) != seen_instructions.end()) {
- continue;
- }
- seen_instructions.insert(current);
- if (current->IsNewInstance()) {
- // If it is the first time we see the allocation, replace its uses. We don't register
- // it through `RemoveRedundantUninitializedStrings`, as that method makes assumption about
- // aliasing and environment uses that don't hold when the string escapes to phis.
- // Note that this also means we will keep the (useless) allocation.
- if (found_instance == nullptr) {
- found_instance = current->AsNewInstance();
- } else {
- DCHECK(found_instance == current);
- }
- } else if (current->IsPhi()) {
- // Push all inputs to the worklist. Those should be Phis or NewInstance.
- for (HInstruction* input : current->GetInputs()) {
- DCHECK(input->IsPhi() || input->IsNewInstance()) << input->DebugName();
- worklist.push_back(input);
- }
- } else {
- // The verifier prevents any other DEX uses of the uninitialized string.
- DCHECK(current->IsEqual() || current->IsNotEqual());
- continue;
- }
- current->ReplaceUsesDominatedBy(invoke, invoke);
- current->ReplaceEnvUsesDominatedBy(invoke, invoke);
- // Push all users to the worklist. Now that we have replaced
- // the uses dominated by the invokes, the remaining users should only
- // be Phi, or Equal/NotEqual.
- for (const HUseListNode<HInstruction*>& use : current->GetUses()) {
- HInstruction* user = use.GetUser();
- DCHECK(user->IsPhi() || user->IsEqual() || user->IsNotEqual()) << user->DebugName();
- worklist.push_back(user);
- }
- } while (!worklist.empty());
- seen_instructions.clear();
- DCHECK(found_instance != nullptr);
+bool SsaBuilder::ReplaceUninitializedStringPhis() {
+ for (HInvoke* invoke : uninitialized_string_phis_) {
+ HInstruction* str = invoke->InputAt(invoke->InputCount() - 1);
+ if (str->IsPhi()) {
+ // If after redundant phi and dead phi elimination, it's still a phi that feeds
+ // the invoke, then we must be compiling a method with irreducible loops. Just bail.
+ DCHECK(graph_->HasIrreducibleLoops());
+ return false;
+ }
+ DCHECK(str->IsNewInstance());
+ AddUninitializedString(str->AsNewInstance());
+ str->ReplaceUsesDominatedBy(invoke, invoke);
+ str->ReplaceEnvUsesDominatedBy(invoke, invoke);
+ invoke->RemoveInputAt(invoke->InputCount() - 1);
}
+ return true;
}
void SsaBuilder::RemoveRedundantUninitializedStrings() {
@@ -508,8 +462,9 @@ void SsaBuilder::RemoveRedundantUninitializedStrings() {
DCHECK(new_instance->IsStringAlloc());
// Replace NewInstance of String with NullConstant if not used prior to
- // calling StringFactory. In case of deoptimization, the interpreter is
- // expected to skip null check on the `this` argument of the StringFactory call.
+ // calling StringFactory. We check for alias environments in case of deoptimization.
+ // The interpreter is expected to skip null check on the `this` argument of the
+ // StringFactory call.
if (!new_instance->HasNonEnvironmentUses() && !HasAliasInEnvironments(new_instance)) {
new_instance->ReplaceWith(graph_->GetNullConstant());
new_instance->GetBlock()->RemoveInstruction(new_instance);
@@ -544,11 +499,6 @@ void SsaBuilder::RemoveRedundantUninitializedStrings() {
GraphAnalysisResult SsaBuilder::BuildSsa() {
DCHECK(!graph_->IsInSsaForm());
- // Replace Phis that feed in a String.<init>, as well as their aliases, with
- // the actual String allocation invocation. We do this first, as the phis stored in
- // the data structure might get removed from the graph in later stages during `BuildSsa`.
- ReplaceUninitializedStringPhis();
-
// Propagate types of phis. At this point, phis are typed void in the general
// case, or float/double/reference if we created an equivalent phi. So we need
// to propagate the types across phis to give them a correct type. If a type
@@ -575,7 +525,7 @@ GraphAnalysisResult SsaBuilder::BuildSsa() {
class_loader_,
dex_cache_,
handles_,
- /* is_first_run */ true).Run();
+ /* is_first_run= */ true).Run();
// HInstructionBuilder duplicated ArrayGet instructions with ambiguous type
// (int/float or long/double) and marked ArraySets with ambiguous input type.
@@ -607,6 +557,14 @@ GraphAnalysisResult SsaBuilder::BuildSsa() {
// input types.
dead_phi_elimimation.EliminateDeadPhis();
+ // Replace Phis that feed in a String.<init> during instruction building. We
+ // run this after redundant and dead phi elimination to make sure the phi will have
+ // been replaced by the actual allocation. Only with an irreducible loop
+ // a phi can still be the input, in which case we bail.
+ if (!ReplaceUninitializedStringPhis()) {
+ return kAnalysisFailIrreducibleLoopAndStringInit;
+ }
+
// HInstructionBuidler replaced uses of NewInstances of String with the
// results of their corresponding StringFactory calls. Unless the String
// objects are used before they are initialized, they can be replaced with
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index 765544508e..bb892c9304 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -97,8 +97,8 @@ class SsaBuilder : public ValueObject {
}
}
- void AddUninitializedStringPhi(HPhi* phi, HInvoke* invoke) {
- uninitialized_string_phis_.push_back(std::make_pair(phi, invoke));
+ void AddUninitializedStringPhi(HInvoke* invoke) {
+ uninitialized_string_phis_.push_back(invoke);
}
private:
@@ -123,7 +123,8 @@ class SsaBuilder : public ValueObject {
HArrayGet* GetFloatOrDoubleEquivalentOfArrayGet(HArrayGet* aget);
void RemoveRedundantUninitializedStrings();
- void ReplaceUninitializedStringPhis();
+ bool ReplaceUninitializedStringPhis();
+ bool HasAliasInEnvironments(HInstruction* instruction);
HGraph* const graph_;
Handle<mirror::ClassLoader> class_loader_;
@@ -137,7 +138,7 @@ class SsaBuilder : public ValueObject {
ScopedArenaVector<HArrayGet*> ambiguous_agets_;
ScopedArenaVector<HArraySet*> ambiguous_asets_;
ScopedArenaVector<HNewInstance*> uninitialized_strings_;
- ScopedArenaVector<std::pair<HPhi*, HInvoke*>> uninitialized_string_phis_;
+ ScopedArenaVector<HInvoke*> uninitialized_string_phis_;
DISALLOW_COPY_AND_ASSIGN(SsaBuilder);
};
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 62a70d6b12..7b2c3a939c 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -120,7 +120,7 @@ void SsaLivenessAnalysis::RecursivelyProcessInputs(HInstruction* current,
DCHECK(input->HasSsaIndex());
// `input` generates a result used by `current`. Add use and update
// the live-in set.
- input->GetLiveInterval()->AddUse(current, /* environment */ nullptr, i, actual_user);
+ input->GetLiveInterval()->AddUse(current, /* environment= */ nullptr, i, actual_user);
live_in->SetBit(input->GetSsaIndex());
} else if (has_out_location) {
// `input` generates a result but it is not used by `current`.
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 92d0b08301..c88390775c 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -1155,10 +1155,11 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
*
* (a) Non-environment uses of an instruction always make
* the instruction live.
- * (b) Environment uses of an instruction whose type is
- * object (that is, non-primitive), make the instruction live.
- * This is due to having to keep alive objects that have
- * finalizers deleting native objects.
+ * (b) Environment uses of an instruction whose type is object (that is, non-primitive), make the
+ * instruction live, unless the class has an @DeadReferenceSafe annotation.
+ * This avoids unexpected premature reference enqueuing or finalization, which could
+ * result in premature deletion of native objects. In the presence of @DeadReferenceSafe,
+ * object references are treated like primitive types.
* (c) When the graph has the debuggable property, environment uses
* of an instruction that has a primitive type make the instruction live.
* If the graph does not have the debuggable property, the environment
@@ -1287,6 +1288,7 @@ class SsaLivenessAnalysis : public ValueObject {
// When compiling in OSR mode, all loops in the compiled method may be entered
// from the interpreter via SuspendCheck; thus we need to preserve the environment.
if (env_holder->IsSuspendCheck() && graph->IsCompilingOsr()) return true;
+ if (graph -> IsDeadReferenceSafe()) return false;
return instruction->GetType() == DataType::Type::kReference;
}
diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc
index 4b525531da..352c44f63a 100644
--- a/compiler/optimizing/ssa_liveness_analysis_test.cc
+++ b/compiler/optimizing/ssa_liveness_analysis_test.cc
@@ -94,25 +94,25 @@ TEST_F(SsaLivenessAnalysisTest, TestAput) {
HInstruction* null_check = new (GetAllocator()) HNullCheck(array, 0);
block->AddInstruction(null_check);
HEnvironment* null_check_env = new (GetAllocator()) HEnvironment(GetAllocator(),
- /* number_of_vregs */ 5,
- /* method */ nullptr,
- /* dex_pc */ 0u,
+ /* number_of_vregs= */ 5,
+ /* method= */ nullptr,
+ /* dex_pc= */ 0u,
null_check);
null_check_env->CopyFrom(ArrayRef<HInstruction* const>(args));
null_check->SetRawEnvironment(null_check_env);
HInstruction* length = new (GetAllocator()) HArrayLength(array, 0);
block->AddInstruction(length);
- HInstruction* bounds_check = new (GetAllocator()) HBoundsCheck(index, length, /* dex_pc */ 0u);
+ HInstruction* bounds_check = new (GetAllocator()) HBoundsCheck(index, length, /* dex_pc= */ 0u);
block->AddInstruction(bounds_check);
HEnvironment* bounds_check_env = new (GetAllocator()) HEnvironment(GetAllocator(),
- /* number_of_vregs */ 5,
- /* method */ nullptr,
- /* dex_pc */ 0u,
+ /* number_of_vregs= */ 5,
+ /* method= */ nullptr,
+ /* dex_pc= */ 0u,
bounds_check);
bounds_check_env->CopyFrom(ArrayRef<HInstruction* const>(args));
bounds_check->SetRawEnvironment(bounds_check_env);
HInstruction* array_set =
- new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
+ new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc= */ 0);
block->AddInstruction(array_set);
graph_->BuildDominatorTree();
@@ -163,9 +163,9 @@ TEST_F(SsaLivenessAnalysisTest, TestDeoptimize) {
HInstruction* null_check = new (GetAllocator()) HNullCheck(array, 0);
block->AddInstruction(null_check);
HEnvironment* null_check_env = new (GetAllocator()) HEnvironment(GetAllocator(),
- /* number_of_vregs */ 5,
- /* method */ nullptr,
- /* dex_pc */ 0u,
+ /* number_of_vregs= */ 5,
+ /* method= */ nullptr,
+ /* dex_pc= */ 0u,
null_check);
null_check_env->CopyFrom(ArrayRef<HInstruction* const>(args));
null_check->SetRawEnvironment(null_check_env);
@@ -175,17 +175,17 @@ TEST_F(SsaLivenessAnalysisTest, TestDeoptimize) {
HInstruction* ae = new (GetAllocator()) HAboveOrEqual(index, length);
block->AddInstruction(ae);
HInstruction* deoptimize = new(GetAllocator()) HDeoptimize(
- GetAllocator(), ae, DeoptimizationKind::kBlockBCE, /* dex_pc */ 0u);
+ GetAllocator(), ae, DeoptimizationKind::kBlockBCE, /* dex_pc= */ 0u);
block->AddInstruction(deoptimize);
HEnvironment* deoptimize_env = new (GetAllocator()) HEnvironment(GetAllocator(),
- /* number_of_vregs */ 5,
- /* method */ nullptr,
- /* dex_pc */ 0u,
+ /* number_of_vregs= */ 5,
+ /* method= */ nullptr,
+ /* dex_pc= */ 0u,
deoptimize);
deoptimize_env->CopyFrom(ArrayRef<HInstruction* const>(args));
deoptimize->SetRawEnvironment(deoptimize_env);
HInstruction* array_set =
- new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0);
+ new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc= */ 0);
block->AddInstruction(array_set);
graph_->BuildDominatorTree();
diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc
index 5370f43b4f..3fcb72e4fb 100644
--- a/compiler/optimizing/ssa_phi_elimination.cc
+++ b/compiler/optimizing/ssa_phi_elimination.cc
@@ -141,7 +141,7 @@ bool SsaRedundantPhiElimination::Run() {
ArenaBitVector visited_phis_in_cycle(&allocator,
graph_->GetCurrentInstructionId(),
- /* expandable */ false,
+ /* expandable= */ false,
kArenaAllocSsaPhiElimination);
visited_phis_in_cycle.ClearAllBits();
ScopedArenaVector<HPhi*> cycle_worklist(allocator.Adapter(kArenaAllocSsaPhiElimination));
diff --git a/compiler/optimizing/superblock_cloner.h b/compiler/optimizing/superblock_cloner.h
index f21172131b..dbe9008e92 100644
--- a/compiler/optimizing/superblock_cloner.h
+++ b/compiler/optimizing/superblock_cloner.h
@@ -372,8 +372,8 @@ class PeelUnrollHelper : public ValueObject {
// Returns whether the loop can be peeled/unrolled.
bool IsLoopClonable() const { return cloner_.IsSubgraphClonable(); }
- HBasicBlock* DoPeeling() { return DoPeelUnrollImpl(/* to_unroll */ false); }
- HBasicBlock* DoUnrolling() { return DoPeelUnrollImpl(/* to_unroll */ true); }
+ HBasicBlock* DoPeeling() { return DoPeelUnrollImpl(/* to_unroll= */ false); }
+ HBasicBlock* DoUnrolling() { return DoPeelUnrollImpl(/* to_unroll= */ true); }
HLoopInformation* GetRegionToBeAdjusted() const { return cloner_.GetRegionToBeAdjusted(); }
protected:
diff --git a/compiler/utils/arm/assembler_arm_vixl.cc b/compiler/utils/arm/assembler_arm_vixl.cc
index ebb631e33c..77f5d7081a 100644
--- a/compiler/utils/arm/assembler_arm_vixl.cc
+++ b/compiler/utils/arm/assembler_arm_vixl.cc
@@ -91,7 +91,7 @@ void ArmVIXLAssembler::GenerateMarkingRegisterCheck(vixl32::Register temp, int c
___ Ldr(temp, MemOperand(tr, Thread::IsGcMarkingOffset<kArmPointerSize>().Int32Value()));
// Check that mr == self.tls32_.is.gc_marking.
___ Cmp(mr, temp);
- ___ B(eq, &mr_is_ok, /* far_target */ false);
+ ___ B(eq, &mr_is_ok, /* is_far_target= */ false);
___ Bkpt(code);
___ Bind(&mr_is_ok);
}
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index c83fd4404a..d7ade058a4 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "arch/arm64/instruction_set_features_arm64.h"
#include "assembler_arm64.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "heap_poisoning.h"
@@ -31,6 +32,37 @@ namespace arm64 {
#define ___ vixl_masm_.
#endif
+// Sets vixl::CPUFeatures according to ART instruction set features.
+static void SetVIXLCPUFeaturesFromART(vixl::aarch64::MacroAssembler* vixl_masm_,
+ const Arm64InstructionSetFeatures* art_features) {
+ // Retrieve already initialized default features of vixl.
+ vixl::CPUFeatures* features = vixl_masm_->GetCPUFeatures();
+
+ DCHECK(features->Has(vixl::CPUFeatures::kFP));
+ DCHECK(features->Has(vixl::CPUFeatures::kNEON));
+ DCHECK(art_features != nullptr);
+ if (art_features->HasCRC()) {
+ features->Combine(vixl::CPUFeatures::kCRC32);
+ }
+ if (art_features->HasDotProd()) {
+ features->Combine(vixl::CPUFeatures::kDotProduct);
+ }
+ if (art_features->HasFP16()) {
+ features->Combine(vixl::CPUFeatures::kFPHalf);
+ }
+ if (art_features->HasLSE()) {
+ features->Combine(vixl::CPUFeatures::kAtomics);
+ }
+}
+
+Arm64Assembler::Arm64Assembler(ArenaAllocator* allocator,
+ const Arm64InstructionSetFeatures* art_features)
+ : Assembler(allocator) {
+ if (art_features != nullptr) {
+ SetVIXLCPUFeaturesFromART(&vixl_masm_, art_features);
+ }
+}
+
void Arm64Assembler::FinalizeCode() {
___ FinalizeCode();
}
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 74537dd5a3..fdecab8251 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -37,6 +37,9 @@
#pragma GCC diagnostic pop
namespace art {
+
+class Arm64InstructionSetFeatures;
+
namespace arm64 {
#define MEM_OP(...) vixl::aarch64::MemOperand(__VA_ARGS__)
@@ -63,7 +66,8 @@ enum StoreOperandType {
class Arm64Assembler final : public Assembler {
public:
- explicit Arm64Assembler(ArenaAllocator* allocator) : Assembler(allocator) {}
+ explicit Arm64Assembler(
+ ArenaAllocator* allocator, const Arm64InstructionSetFeatures* features = nullptr);
virtual ~Arm64Assembler() {}
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 096410de3e..05372251dc 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -295,7 +295,7 @@ class DebugFrameOpCodeWriterForAssembler final
void ImplicitlyAdvancePC() final;
explicit DebugFrameOpCodeWriterForAssembler(Assembler* buffer)
- : dwarf::DebugFrameOpCodeWriter<>(false /* enabled */),
+ : dwarf::DebugFrameOpCodeWriter<>(/* enabled= */ false),
assembler_(buffer),
delay_emitting_advance_pc_(false),
delayed_advance_pcs_() {
diff --git a/compiler/utils/assembler_test_base.h b/compiler/utils/assembler_test_base.h
index 778a01566c..5fa0b3cd39 100644
--- a/compiler/utils/assembler_test_base.h
+++ b/compiler/utils/assembler_test_base.h
@@ -59,12 +59,12 @@ class AssemblerTestInfrastructure {
disassembler_cmd_name_(disasm),
disassembler_parameters_(disasm_params) {
// Fake a runtime test for ScratchFile
- CommonRuntimeTest::SetUpAndroidData(android_data_);
+ CommonRuntimeTest::SetUpAndroidDataDir(android_data_);
}
virtual ~AssemblerTestInfrastructure() {
// We leave temporaries in case this failed so we can debug issues.
- CommonRuntimeTest::TearDownAndroidData(android_data_, false);
+ CommonRuntimeTest::TearDownAndroidDataDir(android_data_, false);
tmpnam_ = "";
}
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 053e202523..c9ece1df69 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -125,7 +125,7 @@ void DumpAndCheck(std::vector<uint8_t>& code, const char* testname, const char*
// Assemble the .S
snprintf(cmd, sizeof(cmd), "%sas %s -o %s.o", toolsdir.c_str(), filename, filename);
int cmd_result = system(cmd);
- ASSERT_EQ(cmd_result, 0) << strerror(errno);
+ ASSERT_EQ(cmd_result, 0) << cmd << strerror(errno);
// Disassemble.
snprintf(cmd, sizeof(cmd), "%sobjdump -D -M force-thumb --section=.text %s.o | grep '^ *[0-9a-f][0-9a-f]*:'",
@@ -239,7 +239,7 @@ TEST_F(ArmVIXLAssemblerTest, VixlJniHelpers) {
__ Load(scratch_register, FrameOffset(4092), 4);
__ Load(scratch_register, FrameOffset(4096), 4);
__ LoadRawPtrFromThread(scratch_register, ThreadOffset32(512));
- __ LoadRef(method_register, scratch_register, MemberOffset(128), /* unpoison_reference */ false);
+ __ LoadRef(method_register, scratch_register, MemberOffset(128), /* unpoison_reference= */ false);
// Stores
__ Store(FrameOffset(32), method_register, 4);
@@ -284,7 +284,7 @@ TEST_F(ArmVIXLAssemblerTest, VixlJniHelpers) {
__ DecreaseFrameSize(4096);
__ DecreaseFrameSize(32);
- __ RemoveFrame(frame_size, callee_save_regs, /* may_suspend */ true);
+ __ RemoveFrame(frame_size, callee_save_regs, /* may_suspend= */ true);
EmitAndCheck(&assembler, "VixlJniHelpers");
}
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index 85e4326494..0d279ede19 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -76,7 +76,7 @@ const char* const VixlJniHelpersResults[] = {
" f0: f1bc 0f00 cmp.w ip, #0\n",
" f4: bf18 it ne\n",
" f6: f20d 4c01 addwne ip, sp, #1025 ; 0x401\n",
- " fa: f8d9 c08c ldr.w ip, [r9, #140] ; 0x8c\n",
+ " fa: f8d9 c094 ldr.w ip, [r9, #148] ; 0x94\n",
" fe: f1bc 0f00 cmp.w ip, #0\n",
" 102: d171 bne.n 1e8 <VixlJniHelpers+0x1e8>\n",
" 104: f8cd c7ff str.w ip, [sp, #2047] ; 0x7ff\n",
@@ -153,7 +153,7 @@ const char* const VixlJniHelpersResults[] = {
" 21c: f8d9 8034 ldr.w r8, [r9, #52] ; 0x34\n",
" 220: 4770 bx lr\n",
" 222: 4660 mov r0, ip\n",
- " 224: f8d9 c2d4 ldr.w ip, [r9, #724] ; 0x2d4\n",
+ " 224: f8d9 c2dc ldr.w ip, [r9, #732] ; 0x2dc\n",
" 228: 47e0 blx ip\n",
nullptr
};
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index c0b6f988d4..a9d1a25530 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -179,7 +179,7 @@ void MipsAssembler::PatchCFI(size_t number_of_delayed_adjust_pcs) {
return;
}
- typedef DebugFrameOpCodeWriterForAssembler::DelayedAdvancePC DelayedAdvancePC;
+ using DelayedAdvancePC = DebugFrameOpCodeWriterForAssembler::DelayedAdvancePC;
const auto data = cfi().ReleaseStreamAndPrepareForDelayedAdvancePC();
const std::vector<uint8_t>& old_stream = data.first;
const std::vector<DelayedAdvancePC>& advances = data.second;
@@ -463,7 +463,7 @@ void MipsAssembler::Addiu(Register rt, Register rs, uint16_t imm16, MipsLabel* p
}
void MipsAssembler::Addiu(Register rt, Register rs, uint16_t imm16) {
- Addiu(rt, rs, imm16, /* patcher_label */ nullptr);
+ Addiu(rt, rs, imm16, /* patcher_label= */ nullptr);
}
void MipsAssembler::Subu(Register rd, Register rs, Register rt) {
@@ -732,7 +732,7 @@ void MipsAssembler::Lw(Register rt, Register rs, uint16_t imm16, MipsLabel* patc
}
void MipsAssembler::Lw(Register rt, Register rs, uint16_t imm16) {
- Lw(rt, rs, imm16, /* patcher_label */ nullptr);
+ Lw(rt, rs, imm16, /* patcher_label= */ nullptr);
}
void MipsAssembler::Lwl(Register rt, Register rs, uint16_t imm16) {
@@ -814,7 +814,7 @@ void MipsAssembler::Sw(Register rt, Register rs, uint16_t imm16, MipsLabel* patc
}
void MipsAssembler::Sw(Register rt, Register rs, uint16_t imm16) {
- Sw(rt, rs, imm16, /* patcher_label */ nullptr);
+ Sw(rt, rs, imm16, /* patcher_label= */ nullptr);
}
void MipsAssembler::Swl(Register rt, Register rs, uint16_t imm16) {
@@ -3610,7 +3610,7 @@ void MipsAssembler::FinalizeLabeledBranch(MipsLabel* label) {
label->LinkTo(branch_id);
}
// Reserve space for the branch.
- while (length--) {
+ for (; length != 0u; --length) {
Nop();
}
}
@@ -3755,7 +3755,7 @@ void MipsAssembler::MoveInstructionToDelaySlot(Branch& branch) {
void MipsAssembler::Buncond(MipsLabel* label, bool is_r6, bool is_bare) {
uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
- branches_.emplace_back(is_r6, buffer_.Size(), target, /* is_call */ false, is_bare);
+ branches_.emplace_back(is_r6, buffer_.Size(), target, /* is_call= */ false, is_bare);
MoveInstructionToDelaySlot(branches_.back());
FinalizeLabeledBranch(label);
}
@@ -3778,7 +3778,7 @@ void MipsAssembler::Bcond(MipsLabel* label,
void MipsAssembler::Call(MipsLabel* label, bool is_r6, bool is_bare) {
uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
- branches_.emplace_back(is_r6, buffer_.Size(), target, /* is_call */ true, is_bare);
+ branches_.emplace_back(is_r6, buffer_.Size(), target, /* is_call= */ true, is_bare);
MoveInstructionToDelaySlot(branches_.back());
FinalizeLabeledBranch(label);
}
@@ -4300,43 +4300,43 @@ void MipsAssembler::EmitBranch(uint32_t branch_id) {
}
void MipsAssembler::B(MipsLabel* label, bool is_bare) {
- Buncond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare);
+ Buncond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare);
}
void MipsAssembler::Bal(MipsLabel* label, bool is_bare) {
- Call(label, /* is_r6 */ (IsR6() && !is_bare), is_bare);
+ Call(label, /* is_r6= */ (IsR6() && !is_bare), is_bare);
}
void MipsAssembler::Beq(Register rs, Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondEQ, rs, rt);
+ Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondEQ, rs, rt);
}
void MipsAssembler::Bne(Register rs, Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondNE, rs, rt);
+ Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondNE, rs, rt);
}
void MipsAssembler::Beqz(Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondEQZ, rt);
+ Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondEQZ, rt);
}
void MipsAssembler::Bnez(Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondNEZ, rt);
+ Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondNEZ, rt);
}
void MipsAssembler::Bltz(Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondLTZ, rt);
+ Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondLTZ, rt);
}
void MipsAssembler::Bgez(Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondGEZ, rt);
+ Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondGEZ, rt);
}
void MipsAssembler::Blez(Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondLEZ, rt);
+ Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondLEZ, rt);
}
void MipsAssembler::Bgtz(Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ (IsR6() && !is_bare), is_bare, kCondGTZ, rt);
+ Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondGTZ, rt);
}
bool MipsAssembler::CanExchangeWithSlt(Register rs, Register rt) const {
@@ -4392,7 +4392,7 @@ void MipsAssembler::Blt(Register rs, Register rt, MipsLabel* label, bool is_bare
Bcond(label, IsR6(), is_bare, kCondLT, rs, rt);
} else if (!Branch::IsNop(kCondLT, rs, rt)) {
// Synthesize the instruction (not available on R2).
- GenerateSltForCondBranch(/* unsigned_slt */ false, rs, rt);
+ GenerateSltForCondBranch(/* unsigned_slt= */ false, rs, rt);
Bnez(AT, label, is_bare);
}
}
@@ -4404,7 +4404,7 @@ void MipsAssembler::Bge(Register rs, Register rt, MipsLabel* label, bool is_bare
B(label, is_bare);
} else {
// Synthesize the instruction (not available on R2).
- GenerateSltForCondBranch(/* unsigned_slt */ false, rs, rt);
+ GenerateSltForCondBranch(/* unsigned_slt= */ false, rs, rt);
Beqz(AT, label, is_bare);
}
}
@@ -4414,7 +4414,7 @@ void MipsAssembler::Bltu(Register rs, Register rt, MipsLabel* label, bool is_bar
Bcond(label, IsR6(), is_bare, kCondLTU, rs, rt);
} else if (!Branch::IsNop(kCondLTU, rs, rt)) {
// Synthesize the instruction (not available on R2).
- GenerateSltForCondBranch(/* unsigned_slt */ true, rs, rt);
+ GenerateSltForCondBranch(/* unsigned_slt= */ true, rs, rt);
Bnez(AT, label, is_bare);
}
}
@@ -4426,7 +4426,7 @@ void MipsAssembler::Bgeu(Register rs, Register rt, MipsLabel* label, bool is_bar
B(label, is_bare);
} else {
// Synthesize the instruction (not available on R2).
- GenerateSltForCondBranch(/* unsigned_slt */ true, rs, rt);
+ GenerateSltForCondBranch(/* unsigned_slt= */ true, rs, rt);
Beqz(AT, label, is_bare);
}
}
@@ -4437,7 +4437,7 @@ void MipsAssembler::Bc1f(MipsLabel* label, bool is_bare) {
void MipsAssembler::Bc1f(int cc, MipsLabel* label, bool is_bare) {
CHECK(IsUint<3>(cc)) << cc;
- Bcond(label, /* is_r6 */ false, is_bare, kCondF, static_cast<Register>(cc), ZERO);
+ Bcond(label, /* is_r6= */ false, is_bare, kCondF, static_cast<Register>(cc), ZERO);
}
void MipsAssembler::Bc1t(MipsLabel* label, bool is_bare) {
@@ -4446,71 +4446,71 @@ void MipsAssembler::Bc1t(MipsLabel* label, bool is_bare) {
void MipsAssembler::Bc1t(int cc, MipsLabel* label, bool is_bare) {
CHECK(IsUint<3>(cc)) << cc;
- Bcond(label, /* is_r6 */ false, is_bare, kCondT, static_cast<Register>(cc), ZERO);
+ Bcond(label, /* is_r6= */ false, is_bare, kCondT, static_cast<Register>(cc), ZERO);
}
void MipsAssembler::Bc(MipsLabel* label, bool is_bare) {
- Buncond(label, /* is_r6 */ true, is_bare);
+ Buncond(label, /* is_r6= */ true, is_bare);
}
void MipsAssembler::Balc(MipsLabel* label, bool is_bare) {
- Call(label, /* is_r6 */ true, is_bare);
+ Call(label, /* is_r6= */ true, is_bare);
}
void MipsAssembler::Beqc(Register rs, Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondEQ, rs, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondEQ, rs, rt);
}
void MipsAssembler::Bnec(Register rs, Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondNE, rs, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondNE, rs, rt);
}
void MipsAssembler::Beqzc(Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondEQZ, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondEQZ, rt);
}
void MipsAssembler::Bnezc(Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondNEZ, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondNEZ, rt);
}
void MipsAssembler::Bltzc(Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondLTZ, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondLTZ, rt);
}
void MipsAssembler::Bgezc(Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondGEZ, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondGEZ, rt);
}
void MipsAssembler::Blezc(Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondLEZ, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondLEZ, rt);
}
void MipsAssembler::Bgtzc(Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondGTZ, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondGTZ, rt);
}
void MipsAssembler::Bltc(Register rs, Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondLT, rs, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondLT, rs, rt);
}
void MipsAssembler::Bgec(Register rs, Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondGE, rs, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondGE, rs, rt);
}
void MipsAssembler::Bltuc(Register rs, Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondLTU, rs, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondLTU, rs, rt);
}
void MipsAssembler::Bgeuc(Register rs, Register rt, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondGEU, rs, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondGEU, rs, rt);
}
void MipsAssembler::Bc1eqz(FRegister ft, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondF, static_cast<Register>(ft), ZERO);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondF, static_cast<Register>(ft), ZERO);
}
void MipsAssembler::Bc1nez(FRegister ft, MipsLabel* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondT, static_cast<Register>(ft), ZERO);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondT, static_cast<Register>(ft), ZERO);
}
void MipsAssembler::AdjustBaseAndOffset(Register& base,
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 8a1e1df777..69189a49aa 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -862,7 +862,7 @@ class MipsAssembler final : public Assembler, public JNIMacroAssembler<PointerSi
// We permit `base` and `temp` to coincide (however, we check that neither is AT),
// in which case the `base` register may be overwritten in the process.
CHECK_NE(temp, AT); // Must not use AT as temp, so as not to overwrite the adjusted base.
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword));
+ AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kStoreDoubleword));
uint32_t low = Low32Bits(value);
uint32_t high = High32Bits(value);
Register reg;
@@ -917,7 +917,7 @@ class MipsAssembler final : public Assembler, public JNIMacroAssembler<PointerSi
Register base,
int32_t offset,
ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kLoadDoubleword));
+ AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kLoadDoubleword));
switch (type) {
case kLoadSignedByte:
Lb(reg, base, offset);
@@ -960,7 +960,7 @@ class MipsAssembler final : public Assembler, public JNIMacroAssembler<PointerSi
Register base,
int32_t offset,
ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ false, /* is_float */ true);
+ AdjustBaseAndOffset(base, offset, /* is_doubleword= */ false, /* is_float= */ true);
Lwc1(reg, base, offset);
null_checker();
}
@@ -970,7 +970,7 @@ class MipsAssembler final : public Assembler, public JNIMacroAssembler<PointerSi
Register base,
int32_t offset,
ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ true, /* is_float */ true);
+ AdjustBaseAndOffset(base, offset, /* is_doubleword= */ true, /* is_float= */ true);
if (IsAligned<kMipsDoublewordSize>(offset)) {
Ldc1(reg, base, offset);
null_checker();
@@ -1016,7 +1016,7 @@ class MipsAssembler final : public Assembler, public JNIMacroAssembler<PointerSi
// Must not use AT as `reg`, so as not to overwrite the value being stored
// with the adjusted `base`.
CHECK_NE(reg, AT);
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword));
+ AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kStoreDoubleword));
switch (type) {
case kStoreByte:
Sb(reg, base, offset);
@@ -1047,7 +1047,7 @@ class MipsAssembler final : public Assembler, public JNIMacroAssembler<PointerSi
Register base,
int32_t offset,
ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ false, /* is_float */ true);
+ AdjustBaseAndOffset(base, offset, /* is_doubleword= */ false, /* is_float= */ true);
Swc1(reg, base, offset);
null_checker();
}
@@ -1057,7 +1057,7 @@ class MipsAssembler final : public Assembler, public JNIMacroAssembler<PointerSi
Register base,
int32_t offset,
ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ true, /* is_float */ true);
+ AdjustBaseAndOffset(base, offset, /* is_doubleword= */ true, /* is_float= */ true);
if (IsAligned<kMipsDoublewordSize>(offset)) {
Sdc1(reg, base, offset);
null_checker();
diff --git a/compiler/utils/mips/assembler_mips32r5_test.cc b/compiler/utils/mips/assembler_mips32r5_test.cc
index f9919f52b5..98fc44ba5d 100644
--- a/compiler/utils/mips/assembler_mips32r5_test.cc
+++ b/compiler/utils/mips/assembler_mips32r5_test.cc
@@ -38,12 +38,12 @@ class AssemblerMIPS32r5Test : public AssemblerTest<mips::MipsAssembler,
uint32_t,
mips::VectorRegister> {
public:
- typedef AssemblerTest<mips::MipsAssembler,
- mips::MipsLabel,
- mips::Register,
- mips::FRegister,
- uint32_t,
- mips::VectorRegister> Base;
+ using Base = AssemblerTest<mips::MipsAssembler,
+ mips::MipsLabel,
+ mips::Register,
+ mips::FRegister,
+ uint32_t,
+ mips::VectorRegister>;
// These tests were taking too long, so we hide the DriverStr() from AssemblerTest<>
// and reimplement it without the verification against `assembly_string`. b/73903608
@@ -229,7 +229,7 @@ class AssemblerMIPS32r5Test : public AssemblerTest<mips::MipsAssembler,
STLDeleteElements(&vec_registers_);
}
- std::vector<mips::MipsLabel> GetAddresses() {
+ std::vector<mips::MipsLabel> GetAddresses() override {
UNIMPLEMENTED(FATAL) << "Feature not implemented yet";
UNREACHABLE();
}
diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc
index 1ec7a6a3e0..4e27bbf28d 100644
--- a/compiler/utils/mips/assembler_mips32r6_test.cc
+++ b/compiler/utils/mips/assembler_mips32r6_test.cc
@@ -38,12 +38,12 @@ class AssemblerMIPS32r6Test : public AssemblerTest<mips::MipsAssembler,
uint32_t,
mips::VectorRegister> {
public:
- typedef AssemblerTest<mips::MipsAssembler,
- mips::MipsLabel,
- mips::Register,
- mips::FRegister,
- uint32_t,
- mips::VectorRegister> Base;
+ using Base = AssemblerTest<mips::MipsAssembler,
+ mips::MipsLabel,
+ mips::Register,
+ mips::FRegister,
+ uint32_t,
+ mips::VectorRegister>;
// These tests were taking too long, so we hide the DriverStr() from AssemblerTest<>
// and reimplement it without the verification against `assembly_string`. b/73903608
@@ -242,7 +242,7 @@ class AssemblerMIPS32r6Test : public AssemblerTest<mips::MipsAssembler,
STLDeleteElements(&vec_registers_);
}
- std::vector<mips::MipsLabel> GetAddresses() {
+ std::vector<mips::MipsLabel> GetAddresses() override {
UNIMPLEMENTED(FATAL) << "Feature not implemented yet";
UNREACHABLE();
}
@@ -1078,11 +1078,11 @@ TEST_F(AssemblerMIPS32r6Test, StoreQToOffset) {
//////////////
TEST_F(AssemblerMIPS32r6Test, Bc) {
- BranchHelper(&mips::MipsAssembler::Bc, "Bc", /* has_slot */ false);
+ BranchHelper(&mips::MipsAssembler::Bc, "Bc", /* has_slot= */ false);
}
TEST_F(AssemblerMIPS32r6Test, Balc) {
- BranchHelper(&mips::MipsAssembler::Balc, "Balc", /* has_slot */ false);
+ BranchHelper(&mips::MipsAssembler::Balc, "Balc", /* has_slot= */ false);
}
TEST_F(AssemblerMIPS32r6Test, Beqc) {
@@ -1142,11 +1142,11 @@ TEST_F(AssemblerMIPS32r6Test, Bc1nez) {
}
TEST_F(AssemblerMIPS32r6Test, B) {
- BranchHelper(&mips::MipsAssembler::B, "Bc", /* has_slot */ false);
+ BranchHelper(&mips::MipsAssembler::B, "Bc", /* has_slot= */ false);
}
TEST_F(AssemblerMIPS32r6Test, Bal) {
- BranchHelper(&mips::MipsAssembler::Bal, "Balc", /* has_slot */ false);
+ BranchHelper(&mips::MipsAssembler::Bal, "Balc", /* has_slot= */ false);
}
TEST_F(AssemblerMIPS32r6Test, Beq) {
@@ -1198,123 +1198,123 @@ TEST_F(AssemblerMIPS32r6Test, Bgeu) {
}
TEST_F(AssemblerMIPS32r6Test, BareBc) {
- BranchHelper(&mips::MipsAssembler::Bc, "Bc", /* has_slot */ false, /* is_bare */ true);
+ BranchHelper(&mips::MipsAssembler::Bc, "Bc", /* has_slot= */ false, /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBalc) {
- BranchHelper(&mips::MipsAssembler::Balc, "Balc", /* has_slot */ false, /* is_bare */ true);
+ BranchHelper(&mips::MipsAssembler::Balc, "Balc", /* has_slot= */ false, /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBeqc) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Beqc, "Beqc", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Beqc, "Beqc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBnec) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Bnec, "Bnec", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Bnec, "Bnec", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBeqzc) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Beqzc, "Beqzc", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Beqzc, "Beqzc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBnezc) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Bnezc, "Bnezc", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Bnezc, "Bnezc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBltzc) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Bltzc, "Bltzc", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Bltzc, "Bltzc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBgezc) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Bgezc, "Bgezc", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Bgezc, "Bgezc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBlezc) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Blezc, "Blezc", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Blezc, "Blezc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBgtzc) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Bgtzc, "Bgtzc", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Bgtzc, "Bgtzc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBltc) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltc, "Bltc", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltc, "Bltc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBgec) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgec, "Bgec", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgec, "Bgec", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBltuc) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltuc, "Bltuc", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltuc, "Bltuc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBgeuc) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeuc, "Bgeuc", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeuc, "Bgeuc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBc1eqz) {
- BranchFpuCondHelper(&mips::MipsAssembler::Bc1eqz, "Bc1eqz", /* is_bare */ true);
+ BranchFpuCondHelper(&mips::MipsAssembler::Bc1eqz, "Bc1eqz", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBc1nez) {
- BranchFpuCondHelper(&mips::MipsAssembler::Bc1nez, "Bc1nez", /* is_bare */ true);
+ BranchFpuCondHelper(&mips::MipsAssembler::Bc1nez, "Bc1nez", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareB) {
- BranchHelper(&mips::MipsAssembler::B, "B", /* has_slot */ true, /* is_bare */ true);
+ BranchHelper(&mips::MipsAssembler::B, "B", /* has_slot= */ true, /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBal) {
- BranchHelper(&mips::MipsAssembler::Bal, "Bal", /* has_slot */ true, /* is_bare */ true);
+ BranchHelper(&mips::MipsAssembler::Bal, "Bal", /* has_slot= */ true, /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBeq) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Beq, "Beq", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Beq, "Beq", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBne) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Bne, "Bne", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Bne, "Bne", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBeqz) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Beqz, "Beqz", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Beqz, "Beqz", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBnez) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Bnez, "Bnez", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Bnez, "Bnez", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBltz) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Bltz, "Bltz", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Bltz, "Bltz", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBgez) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Bgez, "Bgez", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Bgez, "Bgez", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBlez) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Blez, "Blez", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Blez, "Blez", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBgtz) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Bgtz, "Bgtz", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Bgtz, "Bgtz", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBlt) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Blt, "Blt", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Blt, "Blt", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBge) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Bge, "Bge", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Bge, "Bge", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBltu) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltu, "Bltu", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltu, "Bltu", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, BareBgeu) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeu, "Bgeu", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeu, "Bgeu", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS32r6Test, LongBeqc) {
diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc
index 9527fa6ddd..c0894d309e 100644
--- a/compiler/utils/mips/assembler_mips_test.cc
+++ b/compiler/utils/mips/assembler_mips_test.cc
@@ -37,11 +37,11 @@ class AssemblerMIPSTest : public AssemblerTest<mips::MipsAssembler,
mips::FRegister,
uint32_t> {
public:
- typedef AssemblerTest<mips::MipsAssembler,
- mips::MipsLabel,
- mips::Register,
- mips::FRegister,
- uint32_t> Base;
+ using Base = AssemblerTest<mips::MipsAssembler,
+ mips::MipsLabel,
+ mips::Register,
+ mips::FRegister,
+ uint32_t>;
// These tests were taking too long, so we hide the DriverStr() from AssemblerTest<>
// and reimplement it without the verification against `assembly_string`. b/73903608
@@ -176,7 +176,7 @@ class AssemblerMIPSTest : public AssemblerTest<mips::MipsAssembler,
STLDeleteElements(&fp_registers_);
}
- std::vector<mips::MipsLabel> GetAddresses() {
+ std::vector<mips::MipsLabel> GetAddresses() override {
UNIMPLEMENTED(FATAL) << "Feature not implemented yet";
UNREACHABLE();
}
@@ -2241,67 +2241,67 @@ TEST_F(AssemblerMIPSTest, Bc1t) {
}
TEST_F(AssemblerMIPSTest, BareB) {
- BranchHelper(&mips::MipsAssembler::B, "B", /* is_bare */ true);
+ BranchHelper(&mips::MipsAssembler::B, "B", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBal) {
- BranchHelper(&mips::MipsAssembler::Bal, "Bal", /* is_bare */ true);
+ BranchHelper(&mips::MipsAssembler::Bal, "Bal", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBeq) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Beq, "Beq", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Beq, "Beq", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBne) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Bne, "Bne", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Bne, "Bne", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBeqz) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Beqz, "Beqz", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Beqz, "Beqz", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBnez) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Bnez, "Bnez", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Bnez, "Bnez", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBltz) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Bltz, "Bltz", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Bltz, "Bltz", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBgez) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Bgez, "Bgez", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Bgez, "Bgez", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBlez) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Blez, "Blez", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Blez, "Blez", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBgtz) {
- BranchCondOneRegHelper(&mips::MipsAssembler::Bgtz, "Bgtz", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips::MipsAssembler::Bgtz, "Bgtz", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBlt) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Blt, "Blt", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Blt, "Blt", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBge) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Bge, "Bge", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Bge, "Bge", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBltu) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltu, "Bltu", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltu, "Bltu", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBgeu) {
- BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeu, "Bgeu", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeu, "Bgeu", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBc1f) {
- BranchFpuCondCodeHelper(&mips::MipsAssembler::Bc1f, "Bc1f", /* is_bare */ true);
+ BranchFpuCondCodeHelper(&mips::MipsAssembler::Bc1f, "Bc1f", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, BareBc1t) {
- BranchFpuCondCodeHelper(&mips::MipsAssembler::Bc1t, "Bc1t", /* is_bare */ true);
+ BranchFpuCondCodeHelper(&mips::MipsAssembler::Bc1t, "Bc1t", /* is_bare= */ true);
}
TEST_F(AssemblerMIPSTest, ImpossibleReordering) {
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 5b1c5d9e01..70313ca093 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -52,7 +52,7 @@ void Mips64Assembler::PatchCFI() {
return;
}
- typedef DebugFrameOpCodeWriterForAssembler::DelayedAdvancePC DelayedAdvancePC;
+ using DelayedAdvancePC = DebugFrameOpCodeWriterForAssembler::DelayedAdvancePC;
const auto data = cfi().ReleaseStreamAndPrepareForDelayedAdvancePC();
const std::vector<uint8_t>& old_stream = data.first;
const std::vector<DelayedAdvancePC>& advances = data.second;
@@ -2455,7 +2455,7 @@ Mips64Assembler::Branch::Branch(uint32_t location, uint32_t target, bool is_call
condition_(kUncond) {
InitializeType(
(is_call ? (is_bare ? kBareCall : kCall) : (is_bare ? kBareCondBranch : kCondBranch)),
- /* is_r6 */ true);
+ /* is_r6= */ true);
}
Mips64Assembler::Branch::Branch(bool is_r6,
@@ -2516,7 +2516,7 @@ Mips64Assembler::Branch::Branch(uint32_t location, GpuRegister dest_reg, Type la
rhs_reg_(ZERO),
condition_(kUncond) {
CHECK_NE(dest_reg, ZERO);
- InitializeType(label_or_literal_type, /* is_r6 */ true);
+ InitializeType(label_or_literal_type, /* is_r6= */ true);
}
Mips64Assembler::BranchCondition Mips64Assembler::Branch::OppositeCondition(
@@ -2889,14 +2889,14 @@ void Mips64Assembler::FinalizeLabeledBranch(Mips64Label* label) {
label->LinkTo(branch_id);
}
// Reserve space for the branch.
- while (length--) {
+ for (; length != 0u; --length) {
Nop();
}
}
void Mips64Assembler::Buncond(Mips64Label* label, bool is_bare) {
uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
- branches_.emplace_back(buffer_.Size(), target, /* is_call */ false, is_bare);
+ branches_.emplace_back(buffer_.Size(), target, /* is_call= */ false, is_bare);
FinalizeLabeledBranch(label);
}
@@ -2917,7 +2917,7 @@ void Mips64Assembler::Bcond(Mips64Label* label,
void Mips64Assembler::Call(Mips64Label* label, bool is_bare) {
uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
- branches_.emplace_back(buffer_.Size(), target, /* is_call */ true, is_bare);
+ branches_.emplace_back(buffer_.Size(), target, /* is_call= */ true, is_bare);
FinalizeLabeledBranch(label);
}
@@ -3278,99 +3278,99 @@ void Mips64Assembler::Balc(Mips64Label* label, bool is_bare) {
}
void Mips64Assembler::Bltc(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondLT, rs, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondLT, rs, rt);
}
void Mips64Assembler::Bltzc(GpuRegister rt, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondLTZ, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondLTZ, rt);
}
void Mips64Assembler::Bgtzc(GpuRegister rt, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondGTZ, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondGTZ, rt);
}
void Mips64Assembler::Bgec(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondGE, rs, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondGE, rs, rt);
}
void Mips64Assembler::Bgezc(GpuRegister rt, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondGEZ, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondGEZ, rt);
}
void Mips64Assembler::Blezc(GpuRegister rt, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondLEZ, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondLEZ, rt);
}
void Mips64Assembler::Bltuc(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondLTU, rs, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondLTU, rs, rt);
}
void Mips64Assembler::Bgeuc(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondGEU, rs, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondGEU, rs, rt);
}
void Mips64Assembler::Beqc(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondEQ, rs, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondEQ, rs, rt);
}
void Mips64Assembler::Bnec(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondNE, rs, rt);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondNE, rs, rt);
}
void Mips64Assembler::Beqzc(GpuRegister rs, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondEQZ, rs);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondEQZ, rs);
}
void Mips64Assembler::Bnezc(GpuRegister rs, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondNEZ, rs);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondNEZ, rs);
}
void Mips64Assembler::Bc1eqz(FpuRegister ft, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondF, static_cast<GpuRegister>(ft), ZERO);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondF, static_cast<GpuRegister>(ft), ZERO);
}
void Mips64Assembler::Bc1nez(FpuRegister ft, Mips64Label* label, bool is_bare) {
- Bcond(label, /* is_r6 */ true, is_bare, kCondT, static_cast<GpuRegister>(ft), ZERO);
+ Bcond(label, /* is_r6= */ true, is_bare, kCondT, static_cast<GpuRegister>(ft), ZERO);
}
void Mips64Assembler::Bltz(GpuRegister rt, Mips64Label* label, bool is_bare) {
CHECK(is_bare);
- Bcond(label, /* is_r6 */ false, is_bare, kCondLTZ, rt);
+ Bcond(label, /* is_r6= */ false, is_bare, kCondLTZ, rt);
}
void Mips64Assembler::Bgtz(GpuRegister rt, Mips64Label* label, bool is_bare) {
CHECK(is_bare);
- Bcond(label, /* is_r6 */ false, is_bare, kCondGTZ, rt);
+ Bcond(label, /* is_r6= */ false, is_bare, kCondGTZ, rt);
}
void Mips64Assembler::Bgez(GpuRegister rt, Mips64Label* label, bool is_bare) {
CHECK(is_bare);
- Bcond(label, /* is_r6 */ false, is_bare, kCondGEZ, rt);
+ Bcond(label, /* is_r6= */ false, is_bare, kCondGEZ, rt);
}
void Mips64Assembler::Blez(GpuRegister rt, Mips64Label* label, bool is_bare) {
CHECK(is_bare);
- Bcond(label, /* is_r6 */ false, is_bare, kCondLEZ, rt);
+ Bcond(label, /* is_r6= */ false, is_bare, kCondLEZ, rt);
}
void Mips64Assembler::Beq(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
CHECK(is_bare);
- Bcond(label, /* is_r6 */ false, is_bare, kCondEQ, rs, rt);
+ Bcond(label, /* is_r6= */ false, is_bare, kCondEQ, rs, rt);
}
void Mips64Assembler::Bne(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
CHECK(is_bare);
- Bcond(label, /* is_r6 */ false, is_bare, kCondNE, rs, rt);
+ Bcond(label, /* is_r6= */ false, is_bare, kCondNE, rs, rt);
}
void Mips64Assembler::Beqz(GpuRegister rs, Mips64Label* label, bool is_bare) {
CHECK(is_bare);
- Bcond(label, /* is_r6 */ false, is_bare, kCondEQZ, rs);
+ Bcond(label, /* is_r6= */ false, is_bare, kCondEQZ, rs);
}
void Mips64Assembler::Bnez(GpuRegister rs, Mips64Label* label, bool is_bare) {
CHECK(is_bare);
- Bcond(label, /* is_r6 */ false, is_bare, kCondNEZ, rs);
+ Bcond(label, /* is_r6= */ false, is_bare, kCondNEZ, rs);
}
void Mips64Assembler::AdjustBaseAndOffset(GpuRegister& base,
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index ce447db4fb..2f991e92c5 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -1058,7 +1058,7 @@ class Mips64Assembler final : public Assembler, public JNIMacroAssembler<Pointer
// We permit `base` and `temp` to coincide (however, we check that neither is AT),
// in which case the `base` register may be overwritten in the process.
CHECK_NE(temp, AT); // Must not use AT as temp, so as not to overwrite the adjusted base.
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword));
+ AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kStoreDoubleword));
GpuRegister reg;
// If the adjustment left `base` unchanged and equal to `temp`, we can't use `temp`
// to load and hold the value but we can use AT instead as AT hasn't been used yet.
@@ -1127,7 +1127,7 @@ class Mips64Assembler final : public Assembler, public JNIMacroAssembler<Pointer
GpuRegister base,
int32_t offset,
ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kLoadDoubleword));
+ AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kLoadDoubleword));
switch (type) {
case kLoadSignedByte:
@@ -1178,7 +1178,7 @@ class Mips64Assembler final : public Assembler, public JNIMacroAssembler<Pointer
ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
int element_size_shift = -1;
if (type != kLoadQuadword) {
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kLoadDoubleword));
+ AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kLoadDoubleword));
} else {
AdjustBaseOffsetAndElementSizeShift(base, offset, element_size_shift);
}
@@ -1226,7 +1226,7 @@ class Mips64Assembler final : public Assembler, public JNIMacroAssembler<Pointer
// Must not use AT as `reg`, so as not to overwrite the value being stored
// with the adjusted `base`.
CHECK_NE(reg, AT);
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword));
+ AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kStoreDoubleword));
switch (type) {
case kStoreByte:
@@ -1267,7 +1267,7 @@ class Mips64Assembler final : public Assembler, public JNIMacroAssembler<Pointer
ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
int element_size_shift = -1;
if (type != kStoreQuadword) {
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword));
+ AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kStoreDoubleword));
} else {
AdjustBaseOffsetAndElementSizeShift(base, offset, element_size_shift);
}
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index 4ceb356910..499e8f4e15 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -41,12 +41,12 @@ class AssemblerMIPS64Test : public AssemblerTest<mips64::Mips64Assembler,
uint32_t,
mips64::VectorRegister> {
public:
- typedef AssemblerTest<mips64::Mips64Assembler,
- mips64::Mips64Label,
- mips64::GpuRegister,
- mips64::FpuRegister,
- uint32_t,
- mips64::VectorRegister> Base;
+ using Base = AssemblerTest<mips64::Mips64Assembler,
+ mips64::Mips64Label,
+ mips64::GpuRegister,
+ mips64::FpuRegister,
+ uint32_t,
+ mips64::VectorRegister>;
// These tests were taking too long, so we hide the DriverStr() from AssemblerTest<>
// and reimplement it without the verification against `assembly_string`. b/73903608
@@ -240,7 +240,7 @@ class AssemblerMIPS64Test : public AssemblerTest<mips64::Mips64Assembler,
STLDeleteElements(&vec_registers_);
}
- std::vector<mips64::Mips64Label> GetAddresses() {
+ std::vector<mips64::Mips64Label> GetAddresses() override {
UNIMPLEMENTED(FATAL) << "Feature not implemented yet";
UNREACHABLE();
}
@@ -852,99 +852,99 @@ TEST_F(AssemblerMIPS64Test, Bc1nez) {
}
TEST_F(AssemblerMIPS64Test, BareBc) {
- BranchHelper(&mips64::Mips64Assembler::Bc, "Bc", /* is_bare */ true);
+ BranchHelper(&mips64::Mips64Assembler::Bc, "Bc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBalc) {
- BranchHelper(&mips64::Mips64Assembler::Balc, "Balc", /* is_bare */ true);
+ BranchHelper(&mips64::Mips64Assembler::Balc, "Balc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBeqzc) {
- BranchCondOneRegHelper(&mips64::Mips64Assembler::Beqzc, "Beqzc", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips64::Mips64Assembler::Beqzc, "Beqzc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBnezc) {
- BranchCondOneRegHelper(&mips64::Mips64Assembler::Bnezc, "Bnezc", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips64::Mips64Assembler::Bnezc, "Bnezc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBltzc) {
- BranchCondOneRegHelper(&mips64::Mips64Assembler::Bltzc, "Bltzc", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips64::Mips64Assembler::Bltzc, "Bltzc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBgezc) {
- BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgezc, "Bgezc", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgezc, "Bgezc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBlezc) {
- BranchCondOneRegHelper(&mips64::Mips64Assembler::Blezc, "Blezc", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips64::Mips64Assembler::Blezc, "Blezc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBgtzc) {
- BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgtzc, "Bgtzc", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgtzc, "Bgtzc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBeqc) {
- BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Beqc, "Beqc", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Beqc, "Beqc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBnec) {
- BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bnec, "Bnec", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bnec, "Bnec", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBltc) {
- BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bltc, "Bltc", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bltc, "Bltc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBgec) {
- BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bgec, "Bgec", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bgec, "Bgec", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBltuc) {
- BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bltuc, "Bltuc", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bltuc, "Bltuc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBgeuc) {
- BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bgeuc, "Bgeuc", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bgeuc, "Bgeuc", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBc1eqz) {
- BranchFpuCondHelper(&mips64::Mips64Assembler::Bc1eqz, "Bc1eqz", /* is_bare */ true);
+ BranchFpuCondHelper(&mips64::Mips64Assembler::Bc1eqz, "Bc1eqz", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBc1nez) {
- BranchFpuCondHelper(&mips64::Mips64Assembler::Bc1nez, "Bc1nez", /* is_bare */ true);
+ BranchFpuCondHelper(&mips64::Mips64Assembler::Bc1nez, "Bc1nez", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBeqz) {
- BranchCondOneRegHelper(&mips64::Mips64Assembler::Beqz, "Beqz", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips64::Mips64Assembler::Beqz, "Beqz", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBnez) {
- BranchCondOneRegHelper(&mips64::Mips64Assembler::Bnez, "Bnez", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips64::Mips64Assembler::Bnez, "Bnez", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBltz) {
- BranchCondOneRegHelper(&mips64::Mips64Assembler::Bltz, "Bltz", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips64::Mips64Assembler::Bltz, "Bltz", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBgez) {
- BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgez, "Bgez", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgez, "Bgez", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBlez) {
- BranchCondOneRegHelper(&mips64::Mips64Assembler::Blez, "Blez", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips64::Mips64Assembler::Blez, "Blez", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBgtz) {
- BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgtz, "Bgtz", /* is_bare */ true);
+ BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgtz, "Bgtz", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBeq) {
- BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Beq, "Beq", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Beq, "Beq", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, BareBne) {
- BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bne, "Bne", /* is_bare */ true);
+ BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bne, "Bne", /* is_bare= */ true);
}
TEST_F(AssemblerMIPS64Test, LongBeqc) {
@@ -1252,7 +1252,7 @@ TEST_F(AssemblerMIPS64Test, Daui) {
std::vector<mips64::GpuRegister*> reg1_registers = GetRegisters();
std::vector<mips64::GpuRegister*> reg2_registers = GetRegisters();
reg2_registers.erase(reg2_registers.begin()); // reg2 can't be ZERO, remove it.
- std::vector<int64_t> imms = CreateImmediateValuesBits(/* imm_bits */ 16, /* as_uint */ true);
+ std::vector<int64_t> imms = CreateImmediateValuesBits(/* imm_bits= */ 16, /* as_uint= */ true);
WarnOnCombinations(reg1_registers.size() * reg2_registers.size() * imms.size());
std::ostringstream expected;
for (mips64::GpuRegister* reg1 : reg1_registers) {
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 86f9010ea3..4b073bde0b 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -59,6 +59,98 @@ std::ostream& operator<<(std::ostream& os, const Address& addr) {
}
}
+uint8_t X86Assembler::EmitVexByteZero(bool is_two_byte) {
+ uint8_t vex_zero = 0xC0;
+ if (!is_two_byte) {
+ vex_zero |= 0xC4;
+ } else {
+ vex_zero |= 0xC5;
+ }
+ return vex_zero;
+}
+
+uint8_t X86Assembler::EmitVexByte1(bool r, bool x, bool b, int mmmmm ) {
+ // VEX Byte 1
+ uint8_t vex_prefix = 0;
+ if (!r) {
+ vex_prefix |= 0x80; // VEX.R
+ }
+ if (!x) {
+ vex_prefix |= 0x40; // VEX.X
+ }
+ if (!b) {
+ vex_prefix |= 0x20; // VEX.B
+ }
+
+ // VEX.mmmmm
+ switch (mmmmm) {
+ case 1:
+ // implied 0F leading opcode byte
+ vex_prefix |= 0x01;
+ break;
+ case 2:
+ // implied leading 0F 38 opcode byte
+ vex_prefix |= 0x02;
+ break;
+ case 3:
+ // implied leading OF 3A opcode byte
+ vex_prefix |= 0x03;
+ break;
+ default:
+ LOG(FATAL) << "unknown opcode bytes";
+ }
+ return vex_prefix;
+}
+
+uint8_t X86Assembler::EmitVexByte2(bool w, int l, X86ManagedRegister operand, int pp) {
+ uint8_t vex_prefix = 0;
+ // VEX Byte 2
+ if (w) {
+ vex_prefix |= 0x80;
+ }
+ // VEX.vvvv
+ if (operand.IsXmmRegister()) {
+ XmmRegister vvvv = operand.AsXmmRegister();
+ int inverted_reg = 15-static_cast<int>(vvvv);
+ uint8_t reg = static_cast<uint8_t>(inverted_reg);
+ vex_prefix |= ((reg & 0x0F) << 3);
+ } else if (operand.IsCpuRegister()) {
+ Register vvvv = operand.AsCpuRegister();
+ int inverted_reg = 15 - static_cast<int>(vvvv);
+ uint8_t reg = static_cast<uint8_t>(inverted_reg);
+ vex_prefix |= ((reg & 0x0F) << 3);
+ }
+
+ // VEX.L
+ if (l == 256) {
+ vex_prefix |= 0x04;
+ }
+
+ // VEX.pp
+ switch (pp) {
+ case 0:
+ // SIMD Pefix - None
+ vex_prefix |= 0x00;
+ break;
+ case 1:
+ // SIMD Prefix - 66
+ vex_prefix |= 0x01;
+ break;
+ case 2:
+ // SIMD Prefix - F3
+ vex_prefix |= 0x02;
+ break;
+ case 3:
+ // SIMD Prefix - F2
+ vex_prefix |= 0x03;
+ break;
+ default:
+ LOG(FATAL) << "unknown SIMD Prefix";
+ }
+
+ return vex_prefix;
+}
+
void X86Assembler::call(Register reg) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xFF);
@@ -179,6 +271,60 @@ void X86Assembler::movntl(const Address& dst, Register src) {
EmitOperand(src, dst);
}
+void X86Assembler::blsi(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
+ uint8_t byte_one = EmitVexByte1(/*r=*/ false,
+ /*x=*/ false,
+ /*b=*/ false,
+ /*mmmmm=*/ 2);
+ uint8_t byte_two = EmitVexByte2(/*w=*/ false,
+ /*l=*/ 128,
+ X86ManagedRegister::FromCpuRegister(dst),
+ /*pp=*/ 0);
+ EmitUint8(byte_zero);
+ EmitUint8(byte_one);
+ EmitUint8(byte_two);
+ EmitUint8(0xF3);
+ EmitRegisterOperand(3, src);
+}
+
+void X86Assembler::blsmsk(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
+ uint8_t byte_one = EmitVexByte1(/*r=*/ false,
+ /*x=*/ false,
+ /*b=*/ false,
+ /*mmmmm=*/ 2);
+ uint8_t byte_two = EmitVexByte2(/*w=*/ false,
+ /*l=*/ 128,
+ X86ManagedRegister::FromCpuRegister(dst),
+ /*pp=*/ 0);
+ EmitUint8(byte_zero);
+ EmitUint8(byte_one);
+ EmitUint8(byte_two);
+ EmitUint8(0xF3);
+ EmitRegisterOperand(2, src);
+}
+
+void X86Assembler::blsr(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
+ uint8_t byte_one = EmitVexByte1(/*r=*/ false,
+ /*x=*/ false,
+ /*b=*/ false,
+ /*mmmmm=*/ 2);
+ uint8_t byte_two = EmitVexByte2(/*w=*/ false,
+ /*l=*/ 128,
+ X86ManagedRegister::FromCpuRegister(dst),
+ /*pp=*/ 0);
+ EmitUint8(byte_zero);
+ EmitUint8(byte_one);
+ EmitUint8(byte_two);
+ EmitUint8(0xF3);
+ EmitRegisterOperand(1, src);
+}
+
void X86Assembler::bswapl(Register dst) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x0F);
@@ -1267,6 +1413,25 @@ void X86Assembler::pand(XmmRegister dst, XmmRegister src) {
EmitXmmRegisterOperand(dst, src);
}
+void X86Assembler::andn(Register dst, Register src1, Register src2) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
+ uint8_t byte_one = EmitVexByte1(/*r=*/ false,
+ /*x=*/ false,
+ /*b=*/ false,
+ /*mmmmm=*/ 2);
+ uint8_t byte_two = EmitVexByte2(/*w=*/ false,
+ /*l=*/ 128,
+ X86ManagedRegister::FromCpuRegister(src1),
+ /*pp=*/ 0);
+ EmitUint8(byte_zero);
+ EmitUint8(byte_one);
+ EmitUint8(byte_two);
+ // Opcode field
+ EmitUint8(0xF2);
+ EmitRegisterOperand(dst, src2);
+}
+
void X86Assembler::andnpd(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -1986,7 +2151,7 @@ void X86Assembler::cmpb(const Address& address, const Immediate& imm) {
void X86Assembler::cmpw(const Address& address, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
- EmitComplex(7, address, imm, /* is_16_op */ true);
+ EmitComplex(7, address, imm, /* is_16_op= */ true);
}
@@ -2176,7 +2341,7 @@ void X86Assembler::addw(const Address& address, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
CHECK(imm.is_uint16() || imm.is_int16()) << imm.value();
EmitUint8(0x66);
- EmitComplex(0, address, imm, /* is_16_op */ true);
+ EmitComplex(0, address, imm, /* is_16_op= */ true);
}
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 5ac9236d6b..275e5c1234 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -337,6 +337,10 @@ class X86Assembler final : public Assembler {
void movntl(const Address& dst, Register src);
+ void blsi(Register dst, Register src); // no addr variant (for now)
+ void blsmsk(Register dst, Register src); // no addr variant (for now)
+ void blsr(Register dst, Register src); // no addr varianr (for now)
+
void bswapl(Register dst);
void bsfl(Register dst, Register src);
@@ -500,6 +504,7 @@ class X86Assembler final : public Assembler {
void andps(XmmRegister dst, const Address& src);
void pand(XmmRegister dst, XmmRegister src); // no addr variant (for now)
+ void andn(Register dst, Register src1, Register src2); // no addr variant (for now)
void andnpd(XmmRegister dst, XmmRegister src); // no addr variant (for now)
void andnps(XmmRegister dst, XmmRegister src);
void pandn(XmmRegister dst, XmmRegister src);
@@ -837,6 +842,11 @@ class X86Assembler final : public Assembler {
void EmitGenericShift(int rm, const Operand& operand, const Immediate& imm);
void EmitGenericShift(int rm, const Operand& operand, Register shifter);
+ // Emit a 3 byte VEX Prefix
+ uint8_t EmitVexByteZero(bool is_two_byte);
+ uint8_t EmitVexByte1(bool r, bool x, bool b, int mmmmm);
+ uint8_t EmitVexByte2(bool w , int l , X86ManagedRegister operand, int pp);
+
ConstantArea constant_area_;
DISALLOW_COPY_AND_ASSIGN(X86Assembler);
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index b03c40aa3e..1d8bfe7fa7 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -44,11 +44,11 @@ class AssemblerX86Test : public AssemblerTest<x86::X86Assembler,
x86::XmmRegister,
x86::Immediate> {
public:
- typedef AssemblerTest<x86::X86Assembler,
- x86::Address,
- x86::Register,
- x86::XmmRegister,
- x86::Immediate> Base;
+ using Base = AssemblerTest<x86::X86Assembler,
+ x86::Address,
+ x86::Register,
+ x86::XmmRegister,
+ x86::Immediate>;
protected:
std::string GetArchitectureString() override {
@@ -349,6 +349,18 @@ TEST_F(AssemblerX86Test, RepMovsw) {
DriverStr(expected, "rep_movsw");
}
+TEST_F(AssemblerX86Test, Blsmask) {
+ DriverStr(RepeatRR(&x86::X86Assembler::blsmsk, "blsmsk %{reg2}, %{reg1}"), "blsmsk");
+}
+
+TEST_F(AssemblerX86Test, Blsi) {
+ DriverStr(RepeatRR(&x86::X86Assembler::blsi, "blsi %{reg2}, %{reg1}"), "blsi");
+}
+
+TEST_F(AssemblerX86Test, Blsr) {
+ DriverStr(RepeatRR(&x86::X86Assembler::blsr, "blsr %{reg2}, %{reg1}"), "blsr");
+}
+
TEST_F(AssemblerX86Test, Bsfl) {
DriverStr(RepeatRR(&x86::X86Assembler::bsfl, "bsfl %{reg2}, %{reg1}"), "bsfl");
}
@@ -657,6 +669,10 @@ TEST_F(AssemblerX86Test, PAnd) {
DriverStr(RepeatFF(&x86::X86Assembler::pand, "pand %{reg2}, %{reg1}"), "pand");
}
+TEST_F(AssemblerX86Test, Andn) {
+ DriverStr(RepeatRRR(&x86::X86Assembler::andn, "andn %{reg3}, %{reg2}, %{reg1}"), "andn");
+}
+
TEST_F(AssemblerX86Test, AndnPD) {
DriverStr(RepeatFF(&x86::X86Assembler::andnpd, "andnpd %{reg2}, %{reg1}"), "andnpd");
}
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index bd31561937..c118bc6fbe 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -64,6 +64,99 @@ std::ostream& operator<<(std::ostream& os, const Address& addr) {
}
}
+uint8_t X86_64Assembler::EmitVexByteZero(bool is_two_byte) {
+ uint8_t vex_zero = 0xC0;
+ if (!is_two_byte) {
+ vex_zero |= 0xC4;
+ } else {
+ vex_zero |= 0xC5;
+ }
+ return vex_zero;
+}
+
+uint8_t X86_64Assembler::EmitVexByte1(bool r, bool x, bool b, int mmmmm) {
+ // VEX Byte 1
+ uint8_t vex_prefix = 0;
+ if (!r) {
+ vex_prefix |= 0x80; // VEX.R
+ }
+ if (!x) {
+ vex_prefix |= 0x40; // VEX.X
+ }
+ if (!b) {
+ vex_prefix |= 0x20; // VEX.B
+ }
+
+ // VEX.mmmmm
+ switch (mmmmm) {
+ case 1:
+ // implied 0F leading opcode byte
+ vex_prefix |= 0x01;
+ break;
+ case 2:
+ // implied leading 0F 38 opcode byte
+ vex_prefix |= 0x02;
+ break;
+ case 3:
+ // implied leading OF 3A opcode byte
+ vex_prefix |= 0x03;
+ break;
+ default:
+ LOG(FATAL) << "unknown opcode bytes";
+ }
+
+ return vex_prefix;
+}
+
+uint8_t X86_64Assembler::EmitVexByte2(bool w, int l, X86_64ManagedRegister operand, int pp) {
+ // VEX Byte 2
+ uint8_t vex_prefix = 0;
+ if (w) {
+ vex_prefix |= 0x80;
+ }
+ // VEX.vvvv
+ if (operand.IsXmmRegister()) {
+ XmmRegister vvvv = operand.AsXmmRegister();
+ int inverted_reg = 15-static_cast<int>(vvvv.AsFloatRegister());
+ uint8_t reg = static_cast<uint8_t>(inverted_reg);
+ vex_prefix |= ((reg & 0x0F) << 3);
+ } else if (operand.IsCpuRegister()) {
+ CpuRegister vvvv = operand.AsCpuRegister();
+ int inverted_reg = 15 - static_cast<int>(vvvv.AsRegister());
+ uint8_t reg = static_cast<uint8_t>(inverted_reg);
+ vex_prefix |= ((reg & 0x0F) << 3);
+ }
+
+ // VEX.L
+ if (l == 256) {
+ vex_prefix |= 0x04;
+ }
+
+ // VEX.pp
+ switch (pp) {
+ case 0:
+ // SIMD Pefix - None
+ vex_prefix |= 0x00;
+ break;
+ case 1:
+ // SIMD Prefix - 66
+ vex_prefix |= 0x01;
+ break;
+ case 2:
+ // SIMD Prefix - F3
+ vex_prefix |= 0x02;
+ break;
+ case 3:
+ // SIMD Prefix - F2
+ vex_prefix |= 0x03;
+ break;
+ default:
+ LOG(FATAL) << "unknown SIMD Prefix";
+ }
+
+ return vex_prefix;
+}
+
void X86_64Assembler::call(CpuRegister reg) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(reg);
@@ -1483,6 +1576,25 @@ void X86_64Assembler::pand(XmmRegister dst, XmmRegister src) {
EmitXmmRegisterOperand(dst.LowBits(), src);
}
+void X86_64Assembler::andn(CpuRegister dst, CpuRegister src1, CpuRegister src2) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
+ uint8_t byte_one = EmitVexByte1(dst.NeedsRex(),
+ /*x=*/ false,
+ src2.NeedsRex(),
+ /*mmmmm=*/ 2);
+ uint8_t byte_two = EmitVexByte2(/*w=*/ true,
+ /*l=*/ 128,
+ X86_64ManagedRegister::FromCpuRegister(src1.AsRegister()),
+ /*pp=*/ 0);
+ EmitUint8(byte_zero);
+ EmitUint8(byte_one);
+ EmitUint8(byte_two);
+ // Opcode field
+ EmitUint8(0xF2);
+ EmitRegisterOperand(dst.LowBits(), src2.LowBits());
+}
+
void X86_64Assembler::andnpd(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x66);
@@ -2279,7 +2391,7 @@ void X86_64Assembler::cmpw(const Address& address, const Immediate& imm) {
CHECK(imm.is_int32());
EmitOperandSizeOverride();
EmitOptionalRex32(address);
- EmitComplex(7, address, imm, /* is_16_op */ true);
+ EmitComplex(7, address, imm, /* is_16_op= */ true);
}
@@ -2693,7 +2805,7 @@ void X86_64Assembler::addw(const Address& address, const Immediate& imm) {
CHECK(imm.is_uint16() || imm.is_int16()) << imm.value();
EmitUint8(0x66);
EmitOptionalRex32(address);
- EmitComplex(0, address, imm, /* is_16_op */ true);
+ EmitComplex(0, address, imm, /* is_16_op= */ true);
}
@@ -3260,6 +3372,60 @@ void X86_64Assembler::setcc(Condition condition, CpuRegister dst) {
EmitUint8(0xC0 + dst.LowBits());
}
+void X86_64Assembler::blsi(CpuRegister dst, CpuRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
+ uint8_t byte_one = EmitVexByte1(/*r=*/ false,
+ /*x=*/ false,
+ src.NeedsRex(),
+ /*mmmmm=*/ 2);
+ uint8_t byte_two = EmitVexByte2(/*w=*/ true,
+ /*l=*/ 128,
+ X86_64ManagedRegister::FromCpuRegister(dst.AsRegister()),
+ /*pp=*/ 0);
+ EmitUint8(byte_zero);
+ EmitUint8(byte_one);
+ EmitUint8(byte_two);
+ EmitUint8(0xF3);
+ EmitRegisterOperand(3, src.LowBits());
+}
+
+void X86_64Assembler::blsmsk(CpuRegister dst, CpuRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
+ uint8_t byte_one = EmitVexByte1(/*r=*/ false,
+ /*x=*/ false,
+ src.NeedsRex(),
+ /*mmmmm=*/ 2);
+ uint8_t byte_two = EmitVexByte2(/*w=*/ true,
+ /*l=*/ 128,
+ X86_64ManagedRegister::FromCpuRegister(dst.AsRegister()),
+ /*pp=*/ 0);
+ EmitUint8(byte_zero);
+ EmitUint8(byte_one);
+ EmitUint8(byte_two);
+ EmitUint8(0xF3);
+ EmitRegisterOperand(2, src.LowBits());
+}
+
+void X86_64Assembler::blsr(CpuRegister dst, CpuRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
+ uint8_t byte_one = EmitVexByte1(/*r=*/ false,
+ /*x=*/ false,
+ src.NeedsRex(),
+ /*mmmmm=*/ 2);
+ uint8_t byte_two = EmitVexByte2(/*w=*/ true,
+ /*l=*/ 128,
+ X86_64ManagedRegister::FromCpuRegister(dst.AsRegister()),
+ /*pp=*/ 0);
+ EmitUint8(byte_zero);
+ EmitUint8(byte_one);
+ EmitUint8(byte_two);
+ EmitUint8(0xF3);
+ EmitRegisterOperand(1, src.LowBits());
+}
+
void X86_64Assembler::bswapl(CpuRegister dst) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex(false, false, false, false, dst.NeedsRex());
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index e696635e62..ff13ea3293 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -543,6 +543,7 @@ class X86_64Assembler final : public Assembler {
void andps(XmmRegister dst, XmmRegister src); // no addr variant (for now)
void pand(XmmRegister dst, XmmRegister src);
+ void andn(CpuRegister dst, CpuRegister src1, CpuRegister src2);
void andnpd(XmmRegister dst, XmmRegister src); // no addr variant (for now)
void andnps(XmmRegister dst, XmmRegister src);
void pandn(XmmRegister dst, XmmRegister src);
@@ -796,6 +797,10 @@ class X86_64Assembler final : public Assembler {
void bsfq(CpuRegister dst, CpuRegister src);
void bsfq(CpuRegister dst, const Address& src);
+ void blsi(CpuRegister dst, CpuRegister src); // no addr variant (for now)
+ void blsmsk(CpuRegister dst, CpuRegister src); // no addr variant (for now)
+ void blsr(CpuRegister dst, CpuRegister src); // no addr variant (for now)
+
void bsrl(CpuRegister dst, CpuRegister src);
void bsrl(CpuRegister dst, const Address& src);
void bsrq(CpuRegister dst, CpuRegister src);
@@ -951,6 +956,11 @@ class X86_64Assembler final : public Assembler {
void EmitOptionalByteRegNormalizingRex32(CpuRegister dst, CpuRegister src);
void EmitOptionalByteRegNormalizingRex32(CpuRegister dst, const Operand& operand);
+ // Emit a 3 byte VEX Prefix
+ uint8_t EmitVexByteZero(bool is_two_byte);
+ uint8_t EmitVexByte1(bool r, bool x, bool b, int mmmmm);
+ uint8_t EmitVexByte2(bool w , int l , X86_64ManagedRegister operand, int pp);
+
ConstantArea constant_area_;
DISALLOW_COPY_AND_ASSIGN(X86_64Assembler);
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index e1de1f172f..461f028d9a 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -137,11 +137,11 @@ class AssemblerX86_64Test : public AssemblerTest<x86_64::X86_64Assembler,
x86_64::XmmRegister,
x86_64::Immediate> {
public:
- typedef AssemblerTest<x86_64::X86_64Assembler,
- x86_64::Address,
- x86_64::CpuRegister,
- x86_64::XmmRegister,
- x86_64::Immediate> Base;
+ using Base = AssemblerTest<x86_64::X86_64Assembler,
+ x86_64::Address,
+ x86_64::CpuRegister,
+ x86_64::XmmRegister,
+ x86_64::Immediate>;
protected:
// Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
@@ -297,7 +297,7 @@ class AssemblerX86_64Test : public AssemblerTest<x86_64::X86_64Assembler,
STLDeleteElements(&fp_registers_);
}
- std::vector<x86_64::Address> GetAddresses() {
+ std::vector<x86_64::Address> GetAddresses() override {
return addresses_;
}
@@ -1414,7 +1414,9 @@ TEST_F(AssemblerX86_64Test, Andpd) {
TEST_F(AssemblerX86_64Test, Pand) {
DriverStr(RepeatFF(&x86_64::X86_64Assembler::pand, "pand %{reg2}, %{reg1}"), "pand");
}
-
+TEST_F(AssemblerX86_64Test, Andn) {
+ DriverStr(RepeatRRR(&x86_64::X86_64Assembler::andn, "andn %{reg3}, %{reg2}, %{reg1}"), "andn");
+}
TEST_F(AssemblerX86_64Test, andnpd) {
DriverStr(RepeatFF(&x86_64::X86_64Assembler::andnpd, "andnpd %{reg2}, %{reg1}"), "andnpd");
}
@@ -1785,6 +1787,18 @@ TEST_F(AssemblerX86_64Test, RetAndLeave) {
DriverFn(&ret_and_leave_fn, "retleave");
}
+TEST_F(AssemblerX86_64Test, Blsmask) {
+ DriverStr(RepeatRR(&x86_64::X86_64Assembler::blsmsk, "blsmsk %{reg2}, %{reg1}"), "blsmsk");
+}
+
+TEST_F(AssemblerX86_64Test, Blsi) {
+ DriverStr(RepeatRR(&x86_64::X86_64Assembler::blsi, "blsi %{reg2}, %{reg1}"), "blsi");
+}
+
+TEST_F(AssemblerX86_64Test, Blsr) {
+ DriverStr(RepeatRR(&x86_64::X86_64Assembler::blsr, "blsr %{reg2}, %{reg1}"), "blsr");
+}
+
TEST_F(AssemblerX86_64Test, Bswapl) {
DriverStr(Repeatr(&x86_64::X86_64Assembler::bswapl, "bswap %{reg}"), "bswapl");
}
@@ -2080,7 +2094,7 @@ std::string removeframe_test_fn(JNIMacroAssemblerX86_64Test::Base* assembler_tes
ArrayRef<const ManagedRegister> spill_regs(raw_spill_regs);
size_t frame_size = 10 * kStackAlignment;
- assembler->RemoveFrame(frame_size, spill_regs, /* may_suspend */ true);
+ assembler->RemoveFrame(frame_size, spill_regs, /* may_suspend= */ true);
// Construct assembly text counterpart.
std::ostringstream str;
diff --git a/compiler/verifier_deps_test.cc b/compiler/verifier_deps_test.cc
index 81932a90ce..092e931944 100644
--- a/compiler/verifier_deps_test.cc
+++ b/compiler/verifier_deps_test.cc
@@ -49,10 +49,9 @@ class VerifierDepsCompilerCallbacks : public CompilerCallbacks {
void MethodVerified(verifier::MethodVerifier* verifier ATTRIBUTE_UNUSED) override {}
void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) override {}
- bool IsRelocationPossible() override { return false; }
verifier::VerifierDeps* GetVerifierDeps() const override { return deps_; }
- void SetVerifierDeps(verifier::VerifierDeps* deps) { deps_ = deps; }
+ void SetVerifierDeps(verifier::VerifierDeps* deps) override { deps_ = deps; }
private:
verifier::VerifierDeps* deps_;
@@ -60,7 +59,7 @@ class VerifierDepsCompilerCallbacks : public CompilerCallbacks {
class VerifierDepsTest : public CommonCompilerTest {
public:
- void SetUpRuntimeOptions(RuntimeOptions* options) {
+ void SetUpRuntimeOptions(RuntimeOptions* options) override {
CommonCompilerTest::SetUpRuntimeOptions(options);
callbacks_.reset(new VerifierDepsCompilerCallbacks());
}
@@ -80,26 +79,26 @@ class VerifierDepsTest : public CommonCompilerTest {
}
void SetupCompilerDriver() {
- compiler_options_->boot_image_ = false;
+ compiler_options_->image_type_ = CompilerOptions::ImageType::kNone;
compiler_driver_->InitializeThreadPools();
}
- void VerifyWithCompilerDriver(verifier::VerifierDeps* deps) {
+ void VerifyWithCompilerDriver(verifier::VerifierDeps* verifier_deps) {
TimingLogger timings("Verify", false, false);
// The compiler driver handles the verifier deps in the callbacks, so
// remove what this class did for unit testing.
- if (deps == nullptr) {
+ if (verifier_deps == nullptr) {
// Create some verifier deps by default if they are not already specified.
- deps = new verifier::VerifierDeps(dex_files_);
- verifier_deps_.reset(deps);
+ verifier_deps = new verifier::VerifierDeps(dex_files_);
+ verifier_deps_.reset(verifier_deps);
}
- callbacks_->SetVerifierDeps(deps);
- compiler_driver_->Verify(class_loader_, dex_files_, &timings);
+ callbacks_->SetVerifierDeps(verifier_deps);
+ compiler_driver_->Verify(class_loader_, dex_files_, &timings, verification_results_.get());
callbacks_->SetVerifierDeps(nullptr);
// Clear entries in the verification results to avoid hitting a DCHECK that
// we always succeed inserting a new entry after verifying.
AtomicDexRefMap<MethodReference, const VerifiedMethod*>* map =
- &compiler_driver_->GetVerificationResults()->atomic_verified_methods_;
+ &verification_results_->atomic_verified_methods_;
map->Visit([](const DexFileReference& ref ATTRIBUTE_UNUSED, const VerifiedMethod* method) {
delete method;
});
@@ -127,7 +126,7 @@ class VerifierDepsTest : public CommonCompilerTest {
class_linker_->RegisterDexFile(*dex_file, loader.Get());
}
for (const DexFile* dex_file : dex_files_) {
- compiler_driver_->GetVerificationResults()->AddDexFile(dex_file);
+ verification_results_->AddDexFile(dex_file);
}
SetDexFilesForOatFile(dex_files_);
}
@@ -148,7 +147,7 @@ class VerifierDepsTest : public CommonCompilerTest {
hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader_)));
Handle<mirror::DexCache> dex_cache_handle(hs.NewHandle(klass_Main_->GetDexCache()));
- const DexFile::ClassDef* class_def = klass_Main_->GetClassDef();
+ const dex::ClassDef* class_def = klass_Main_->GetClassDef();
ClassAccessor accessor(*primary_dex_file_, *class_def);
bool has_failures = true;
@@ -160,7 +159,7 @@ class VerifierDepsTest : public CommonCompilerTest {
method.GetIndex(),
dex_cache_handle,
class_loader_handle,
- /* referrer */ nullptr,
+ /* referrer= */ nullptr,
method.GetInvokeType(class_def->access_flags_));
CHECK(resolved_method != nullptr);
if (method_name == resolved_method->GetName()) {
@@ -174,12 +173,12 @@ class VerifierDepsTest : public CommonCompilerTest {
method.GetIndex(),
resolved_method,
method.GetAccessFlags(),
- true /* can_load_classes */,
- true /* allow_soft_failures */,
- true /* need_precise_constants */,
- false /* verify to dump */,
- true /* allow_thread_suspension */,
- 0 /* api_level */);
+ /* can_load_classes= */ true,
+ /* allow_soft_failures= */ true,
+ /* need_precise_constants= */ true,
+ /* verify to dump */ false,
+ /* allow_thread_suspension= */ true,
+ /* api_level= */ 0);
verifier.Verify();
soa.Self()->SetVerifierDeps(nullptr);
has_failures = verifier.HasFailures();
@@ -196,7 +195,7 @@ class VerifierDepsTest : public CommonCompilerTest {
LoadDexFile(soa, "VerifierDeps", multidex);
}
SetupCompilerDriver();
- VerifyWithCompilerDriver(/* verifier_deps */ nullptr);
+ VerifyWithCompilerDriver(/* verifier_deps= */ nullptr);
}
bool TestAssignabilityRecording(const std::string& dst,
@@ -229,7 +228,7 @@ class VerifierDepsTest : public CommonCompilerTest {
for (const DexFile* dex_file : dex_files_) {
const std::set<dex::TypeIndex>& unverified_classes = deps.GetUnverifiedClasses(*dex_file);
for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
+ const dex::ClassDef& class_def = dex_file->GetClassDef(i);
const char* descriptor = dex_file->GetClassDescriptor(class_def);
cls.Assign(class_linker_->FindClass(soa.Self(), descriptor, class_loader_handle));
if (cls == nullptr) {
@@ -251,7 +250,7 @@ class VerifierDepsTest : public CommonCompilerTest {
}
bool HasUnverifiedClass(const std::string& cls, const DexFile& dex_file) {
- const DexFile::TypeId* type_id = dex_file.FindTypeId(cls.c_str());
+ const dex::TypeId* type_id = dex_file.FindTypeId(cls.c_str());
DCHECK(type_id != nullptr);
dex::TypeIndex index = dex_file.GetIndexForTypeId(*type_id);
for (const auto& dex_dep : verifier_deps_->dex_deps_) {
@@ -330,7 +329,7 @@ class VerifierDepsTest : public CommonCompilerTest {
continue;
}
- const DexFile::FieldId& field_id = dex_dep.first->GetFieldId(entry.GetDexFieldIndex());
+ const dex::FieldId& field_id = dex_dep.first->GetFieldId(entry.GetDexFieldIndex());
std::string actual_klass = dex_dep.first->StringByTypeIdx(field_id.class_idx_);
if (expected_klass != actual_klass) {
@@ -373,16 +372,16 @@ class VerifierDepsTest : public CommonCompilerTest {
bool HasMethod(const std::string& expected_klass,
const std::string& expected_name,
const std::string& expected_signature,
- bool expected_resolved,
+ bool expect_resolved,
const std::string& expected_access_flags = "",
const std::string& expected_decl_klass = "") {
for (auto& dex_dep : verifier_deps_->dex_deps_) {
for (const VerifierDeps::MethodResolution& entry : dex_dep.second->methods_) {
- if (expected_resolved != entry.IsResolved()) {
+ if (expect_resolved != entry.IsResolved()) {
continue;
}
- const DexFile::MethodId& method_id = dex_dep.first->GetMethodId(entry.GetDexMethodIndex());
+ const dex::MethodId& method_id = dex_dep.first->GetMethodId(entry.GetDexMethodIndex());
std::string actual_klass = dex_dep.first->StringByTypeIdx(method_id.class_idx_);
if (expected_klass != actual_klass) {
@@ -399,7 +398,7 @@ class VerifierDepsTest : public CommonCompilerTest {
continue;
}
- if (expected_resolved) {
+ if (expect_resolved) {
// Test access flags. Note that PrettyJavaAccessFlags always appends
// a space after the modifiers. Add it to the expected access flags.
std::string actual_access_flags = PrettyJavaAccessFlags(entry.GetAccessFlags());
@@ -424,7 +423,7 @@ class VerifierDepsTest : public CommonCompilerTest {
return verifier_deps_->dex_deps_.size();
}
- size_t HasEachKindOfRecord() {
+ bool HasEachKindOfRecord() {
bool has_strings = false;
bool has_assignability = false;
bool has_classes = false;
@@ -483,42 +482,42 @@ TEST_F(VerifierDepsTest, StringToId) {
}
TEST_F(VerifierDepsTest, Assignable_BothInBoot) {
- ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/util/TimeZone;",
- /* src */ "Ljava/util/SimpleTimeZone;",
- /* is_strict */ true,
- /* is_assignable */ true));
+ ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/util/TimeZone;",
+ /* src= */ "Ljava/util/SimpleTimeZone;",
+ /* is_strict= */ true,
+ /* is_assignable= */ true));
ASSERT_TRUE(HasAssignable("Ljava/util/TimeZone;", "Ljava/util/SimpleTimeZone;", true));
}
TEST_F(VerifierDepsTest, Assignable_DestinationInBoot1) {
- ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/net/Socket;",
- /* src */ "LMySSLSocket;",
- /* is_strict */ true,
- /* is_assignable */ true));
+ ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/net/Socket;",
+ /* src= */ "LMySSLSocket;",
+ /* is_strict= */ true,
+ /* is_assignable= */ true));
ASSERT_TRUE(HasAssignable("Ljava/net/Socket;", "Ljavax/net/ssl/SSLSocket;", true));
}
TEST_F(VerifierDepsTest, Assignable_DestinationInBoot2) {
- ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/util/TimeZone;",
- /* src */ "LMySimpleTimeZone;",
- /* is_strict */ true,
- /* is_assignable */ true));
+ ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/util/TimeZone;",
+ /* src= */ "LMySimpleTimeZone;",
+ /* is_strict= */ true,
+ /* is_assignable= */ true));
ASSERT_TRUE(HasAssignable("Ljava/util/TimeZone;", "Ljava/util/SimpleTimeZone;", true));
}
TEST_F(VerifierDepsTest, Assignable_DestinationInBoot3) {
- ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/util/Collection;",
- /* src */ "LMyThreadSet;",
- /* is_strict */ true,
- /* is_assignable */ true));
+ ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/util/Collection;",
+ /* src= */ "LMyThreadSet;",
+ /* is_strict= */ true,
+ /* is_assignable= */ true));
ASSERT_TRUE(HasAssignable("Ljava/util/Collection;", "Ljava/util/Set;", true));
}
TEST_F(VerifierDepsTest, Assignable_BothArrays_Resolved) {
- ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "[[Ljava/util/TimeZone;",
- /* src */ "[[Ljava/util/SimpleTimeZone;",
- /* is_strict */ true,
- /* is_assignable */ true));
+ ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "[[Ljava/util/TimeZone;",
+ /* src= */ "[[Ljava/util/SimpleTimeZone;",
+ /* is_strict= */ true,
+ /* is_assignable= */ true));
// If the component types of both arrays are resolved, we optimize the list of
// dependencies by recording a dependency on the component types.
ASSERT_FALSE(HasAssignable("[[Ljava/util/TimeZone;", "[[Ljava/util/SimpleTimeZone;", true));
@@ -527,34 +526,34 @@ TEST_F(VerifierDepsTest, Assignable_BothArrays_Resolved) {
}
TEST_F(VerifierDepsTest, NotAssignable_BothInBoot) {
- ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/lang/Exception;",
- /* src */ "Ljava/util/SimpleTimeZone;",
- /* is_strict */ true,
- /* is_assignable */ false));
+ ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/lang/Exception;",
+ /* src= */ "Ljava/util/SimpleTimeZone;",
+ /* is_strict= */ true,
+ /* is_assignable= */ false));
ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "Ljava/util/SimpleTimeZone;", false));
}
TEST_F(VerifierDepsTest, NotAssignable_DestinationInBoot1) {
- ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/lang/Exception;",
- /* src */ "LMySSLSocket;",
- /* is_strict */ true,
- /* is_assignable */ false));
+ ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/lang/Exception;",
+ /* src= */ "LMySSLSocket;",
+ /* is_strict= */ true,
+ /* is_assignable= */ false));
ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "Ljavax/net/ssl/SSLSocket;", false));
}
TEST_F(VerifierDepsTest, NotAssignable_DestinationInBoot2) {
- ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/lang/Exception;",
- /* src */ "LMySimpleTimeZone;",
- /* is_strict */ true,
- /* is_assignable */ false));
+ ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/lang/Exception;",
+ /* src= */ "LMySimpleTimeZone;",
+ /* is_strict= */ true,
+ /* is_assignable= */ false));
ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "Ljava/util/SimpleTimeZone;", false));
}
TEST_F(VerifierDepsTest, NotAssignable_BothArrays) {
- ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "[Ljava/lang/Exception;",
- /* src */ "[Ljava/util/SimpleTimeZone;",
- /* is_strict */ true,
- /* is_assignable */ false));
+ ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "[Ljava/lang/Exception;",
+ /* src= */ "[Ljava/util/SimpleTimeZone;",
+ /* is_strict= */ true,
+ /* is_assignable= */ false));
ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "Ljava/util/SimpleTimeZone;", false));
}
@@ -590,7 +589,7 @@ TEST_F(VerifierDepsTest, InvokeArgumentType) {
ASSERT_TRUE(HasMethod("Ljava/text/SimpleDateFormat;",
"setTimeZone",
"(Ljava/util/TimeZone;)V",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public",
"Ljava/text/DateFormat;"));
ASSERT_TRUE(HasAssignable("Ljava/util/TimeZone;", "Ljava/util/SimpleTimeZone;", true));
@@ -825,7 +824,7 @@ TEST_F(VerifierDepsTest, InvokeStatic_Resolved_DeclaredInReferenced) {
ASSERT_TRUE(HasMethod("Ljava/net/Socket;",
"setSocketImplFactory",
"(Ljava/net/SocketImplFactory;)V",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public static",
"Ljava/net/Socket;"));
}
@@ -836,7 +835,7 @@ TEST_F(VerifierDepsTest, InvokeStatic_Resolved_DeclaredInSuperclass1) {
ASSERT_TRUE(HasMethod("Ljavax/net/ssl/SSLSocket;",
"setSocketImplFactory",
"(Ljava/net/SocketImplFactory;)V",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public static",
"Ljava/net/Socket;"));
}
@@ -846,7 +845,7 @@ TEST_F(VerifierDepsTest, InvokeStatic_Resolved_DeclaredInSuperclass2) {
ASSERT_TRUE(HasMethod("LMySSLSocket;",
"setSocketImplFactory",
"(Ljava/net/SocketImplFactory;)V",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public static",
"Ljava/net/Socket;"));
}
@@ -857,7 +856,7 @@ TEST_F(VerifierDepsTest, InvokeStatic_DeclaredInInterface1) {
ASSERT_TRUE(HasMethod("Ljava/util/Map$Entry;",
"comparingByKey",
"()Ljava/util/Comparator;",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public static",
"Ljava/util/Map$Entry;"));
}
@@ -868,7 +867,7 @@ TEST_F(VerifierDepsTest, InvokeStatic_DeclaredInInterface2) {
ASSERT_TRUE(HasMethod("Ljava/util/AbstractMap$SimpleEntry;",
"comparingByKey",
"()Ljava/util/Comparator;",
- /* expect_resolved */ false));
+ /* expect_resolved= */ false));
}
TEST_F(VerifierDepsTest, InvokeStatic_Unresolved1) {
@@ -877,7 +876,7 @@ TEST_F(VerifierDepsTest, InvokeStatic_Unresolved1) {
ASSERT_TRUE(HasMethod("Ljavax/net/ssl/SSLSocket;",
"x",
"()V",
- /* expect_resolved */ false));
+ /* expect_resolved= */ false));
}
TEST_F(VerifierDepsTest, InvokeStatic_Unresolved2) {
@@ -885,7 +884,7 @@ TEST_F(VerifierDepsTest, InvokeStatic_Unresolved2) {
ASSERT_TRUE(HasMethod("LMySSLSocket;",
"x",
"()V",
- /* expect_resolved */ false));
+ /* expect_resolved= */ false));
}
TEST_F(VerifierDepsTest, InvokeDirect_Resolved_DeclaredInReferenced) {
@@ -894,7 +893,7 @@ TEST_F(VerifierDepsTest, InvokeDirect_Resolved_DeclaredInReferenced) {
ASSERT_TRUE(HasMethod("Ljava/net/Socket;",
"<init>",
"()V",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public",
"Ljava/net/Socket;"));
}
@@ -905,7 +904,7 @@ TEST_F(VerifierDepsTest, InvokeDirect_Resolved_DeclaredInSuperclass1) {
ASSERT_TRUE(HasMethod("Ljavax/net/ssl/SSLSocket;",
"checkOldImpl",
"()V",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"private",
"Ljava/net/Socket;"));
}
@@ -915,7 +914,7 @@ TEST_F(VerifierDepsTest, InvokeDirect_Resolved_DeclaredInSuperclass2) {
ASSERT_TRUE(HasMethod("LMySSLSocket;",
"checkOldImpl",
"()V",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"private",
"Ljava/net/Socket;"));
}
@@ -926,7 +925,7 @@ TEST_F(VerifierDepsTest, InvokeDirect_Unresolved1) {
ASSERT_TRUE(HasMethod("Ljavax/net/ssl/SSLSocket;",
"x",
"()V",
- /* expect_resolved */ false));
+ /* expect_resolved= */ false));
}
TEST_F(VerifierDepsTest, InvokeDirect_Unresolved2) {
@@ -934,7 +933,7 @@ TEST_F(VerifierDepsTest, InvokeDirect_Unresolved2) {
ASSERT_TRUE(HasMethod("LMySSLSocket;",
"x",
"()V",
- /* expect_resolved */ false));
+ /* expect_resolved= */ false));
}
TEST_F(VerifierDepsTest, InvokeVirtual_Resolved_DeclaredInReferenced) {
@@ -943,7 +942,7 @@ TEST_F(VerifierDepsTest, InvokeVirtual_Resolved_DeclaredInReferenced) {
ASSERT_TRUE(HasMethod("Ljava/lang/Throwable;",
"getMessage",
"()Ljava/lang/String;",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public",
"Ljava/lang/Throwable;"));
// Type dependency on `this` argument.
@@ -956,7 +955,7 @@ TEST_F(VerifierDepsTest, InvokeVirtual_Resolved_DeclaredInSuperclass1) {
ASSERT_TRUE(HasMethod("Ljava/io/InterruptedIOException;",
"getMessage",
"()Ljava/lang/String;",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public",
"Ljava/lang/Throwable;"));
// Type dependency on `this` argument.
@@ -968,7 +967,7 @@ TEST_F(VerifierDepsTest, InvokeVirtual_Resolved_DeclaredInSuperclass2) {
ASSERT_TRUE(HasMethod("LMySocketTimeoutException;",
"getMessage",
"()Ljava/lang/String;",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public",
"Ljava/lang/Throwable;"));
}
@@ -978,7 +977,7 @@ TEST_F(VerifierDepsTest, InvokeVirtual_Resolved_DeclaredInSuperinterface) {
ASSERT_TRUE(HasMethod("LMyThreadSet;",
"size",
"()I",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public",
"Ljava/util/Set;"));
}
@@ -989,7 +988,7 @@ TEST_F(VerifierDepsTest, InvokeVirtual_Unresolved1) {
ASSERT_TRUE(HasMethod("Ljava/io/InterruptedIOException;",
"x",
"()V",
- /* expect_resolved */ false));
+ /* expect_resolved= */ false));
}
TEST_F(VerifierDepsTest, InvokeVirtual_Unresolved2) {
@@ -997,7 +996,7 @@ TEST_F(VerifierDepsTest, InvokeVirtual_Unresolved2) {
ASSERT_TRUE(HasMethod("LMySocketTimeoutException;",
"x",
"()V",
- /* expect_resolved */ false));
+ /* expect_resolved= */ false));
}
TEST_F(VerifierDepsTest, InvokeInterface_Resolved_DeclaredInReferenced) {
@@ -1006,7 +1005,7 @@ TEST_F(VerifierDepsTest, InvokeInterface_Resolved_DeclaredInReferenced) {
ASSERT_TRUE(HasMethod("Ljava/lang/Runnable;",
"run",
"()V",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public",
"Ljava/lang/Runnable;"));
}
@@ -1017,7 +1016,7 @@ TEST_F(VerifierDepsTest, InvokeInterface_Resolved_DeclaredInSuperclass) {
ASSERT_TRUE(HasMethod("LMyThread;",
"join",
"()V",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public",
"Ljava/lang/Thread;"));
}
@@ -1028,7 +1027,7 @@ TEST_F(VerifierDepsTest, InvokeInterface_Resolved_DeclaredInSuperinterface1) {
ASSERT_TRUE(HasMethod("LMyThreadSet;",
"run",
"()V",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public",
"Ljava/lang/Thread;"));
}
@@ -1038,7 +1037,7 @@ TEST_F(VerifierDepsTest, InvokeInterface_Resolved_DeclaredInSuperinterface2) {
ASSERT_TRUE(HasMethod("LMyThreadSet;",
"isEmpty",
"()Z",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public",
"Ljava/util/Set;"));
}
@@ -1049,12 +1048,12 @@ TEST_F(VerifierDepsTest, InvokeInterface_Unresolved1) {
ASSERT_TRUE(HasMethod("Ljava/lang/Runnable;",
"x",
"()V",
- /* expect_resolved */ false));
+ /* expect_resolved= */ false));
}
TEST_F(VerifierDepsTest, InvokeInterface_Unresolved2) {
ASSERT_FALSE(VerifyMethod("InvokeInterface_Unresolved2"));
- ASSERT_TRUE(HasMethod("LMyThreadSet;", "x", "()V", /* expect_resolved */ false));
+ ASSERT_TRUE(HasMethod("LMyThreadSet;", "x", "()V", /* expect_resolved= */ false));
}
TEST_F(VerifierDepsTest, InvokeSuper_ThisAssignable) {
@@ -1064,7 +1063,7 @@ TEST_F(VerifierDepsTest, InvokeSuper_ThisAssignable) {
ASSERT_TRUE(HasMethod("Ljava/lang/Runnable;",
"run",
"()V",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public",
"Ljava/lang/Runnable;"));
}
@@ -1075,7 +1074,7 @@ TEST_F(VerifierDepsTest, InvokeSuper_ThisNotAssignable) {
ASSERT_TRUE(HasAssignable("Ljava/lang/Integer;", "Ljava/lang/Thread;", false));
ASSERT_TRUE(HasMethod("Ljava/lang/Integer;",
"intValue", "()I",
- /* expect_resolved */ true,
+ /* expect_resolved= */ true,
"public", "Ljava/lang/Integer;"));
}
@@ -1444,7 +1443,7 @@ TEST_F(VerifierDepsTest, CompilerDriver) {
ScopedObjectAccess soa(Thread::Current());
LoadDexFile(soa, "VerifierDeps", multi);
}
- VerifyWithCompilerDriver(/* verifier_deps */ nullptr);
+ VerifyWithCompilerDriver(/* verifier_deps= */ nullptr);
std::vector<uint8_t> buffer;
verifier_deps_->Encode(dex_files_, &buffer);
@@ -1494,22 +1493,22 @@ TEST_F(VerifierDepsTest, MultiDexVerification) {
}
TEST_F(VerifierDepsTest, NotAssignable_InterfaceWithClassInBoot) {
- ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/lang/Exception;",
- /* src */ "LIface;",
- /* is_strict */ true,
- /* is_assignable */ false));
+ ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "Ljava/lang/Exception;",
+ /* src= */ "LIface;",
+ /* is_strict= */ true,
+ /* is_assignable= */ false));
ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "LIface;", false));
}
TEST_F(VerifierDepsTest, Assignable_Arrays) {
- ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "[LIface;",
- /* src */ "[LMyClassExtendingInterface;",
- /* is_strict */ false,
- /* is_assignable */ true));
+ ASSERT_TRUE(TestAssignabilityRecording(/* dst= */ "[LIface;",
+ /* src= */ "[LMyClassExtendingInterface;",
+ /* is_strict= */ false,
+ /* is_assignable= */ true));
ASSERT_FALSE(HasAssignable(
- "LIface;", "LMyClassExtendingInterface;", /* expected_is_assignable */ true));
+ "LIface;", "LMyClassExtendingInterface;", /* expected_is_assignable= */ true));
ASSERT_FALSE(HasAssignable(
- "LIface;", "LMyClassExtendingInterface;", /* expected_is_assignable */ false));
+ "LIface;", "LMyClassExtendingInterface;", /* expected_is_assignable= */ false));
}
} // namespace verifier