summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/Android.bp2
-rw-r--r--compiler/common_compiler_test.cc5
-rw-r--r--compiler/common_compiler_test.h1
-rw-r--r--compiler/debug/elf_debug_info_writer.h6
-rw-r--r--compiler/debug/elf_debug_line_writer.h5
-rw-r--r--compiler/driver/compiled_method_storage_test.cc3
-rw-r--r--compiler/driver/compiler_driver.cc41
-rw-r--r--compiler/driver/compiler_driver.h20
-rw-r--r--compiler/driver/compiler_options.cc2
-rw-r--r--compiler/driver/compiler_options.h10
-rw-r--r--compiler/driver/compiler_options_map-inl.h14
-rw-r--r--compiler/driver/compiler_options_map.def2
-rw-r--r--compiler/jit/jit_compiler.cc4
-rw-r--r--compiler/jit/jit_compiler.h1
-rw-r--r--compiler/jni/quick/jni_compiler.cc33
-rw-r--r--compiler/jni/quick/jni_compiler.h51
-rw-r--r--compiler/linker/method_bss_mapping_encoder.h79
-rw-r--r--compiler/linker/method_bss_mapping_encoder_test.cc50
-rw-r--r--compiler/linker/relative_patcher_test.h3
-rw-r--r--compiler/optimizing/code_generator.cc4
-rw-r--r--compiler/optimizing/code_generator_arm64.cc101
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc86
-rw-r--r--compiler/optimizing/code_generator_mips.cc131
-rw-r--r--compiler/optimizing/code_generator_mips64.cc131
-rw-r--r--compiler/optimizing/code_generator_x86.cc17
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc15
-rw-r--r--compiler/optimizing/data_type.h13
-rw-r--r--compiler/optimizing/induction_var_analysis.cc240
-rw-r--r--compiler/optimizing/induction_var_analysis.h13
-rw-r--r--compiler/optimizing/inliner.cc76
-rw-r--r--compiler/optimizing/instruction_builder.cc4
-rw-r--r--compiler/optimizing/instruction_simplifier.cc16
-rw-r--r--compiler/optimizing/instruction_simplifier_arm.cc4
-rw-r--r--compiler/optimizing/instruction_simplifier_arm64.cc4
-rw-r--r--compiler/optimizing/instruction_simplifier_mips.cc4
-rw-r--r--compiler/optimizing/intrinsics.cc61
-rw-r--r--compiler/optimizing/intrinsics.h6
-rw-r--r--compiler/optimizing/optimizing_compiler.cc46
-rw-r--r--compiler/optimizing/optimizing_compiler_stats.h136
39 files changed, 581 insertions, 859 deletions
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 249aaf5632..37a18cb9e9 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -211,6 +211,7 @@ gensrcs {
"driver/compiler_options.h",
"linker/linker_patch.h",
"optimizing/locations.h",
+ "optimizing/optimizing_compiler_stats.h",
"utils/arm/constants_arm.h",
"utils/mips/assembler_mips.h",
@@ -320,7 +321,6 @@ art_cc_test {
"exception_test.cc",
"jni/jni_compiler_test.cc",
"linker/linker_patch_test.cc",
- "linker/method_bss_mapping_encoder_test.cc",
"linker/output_stream_test.cc",
"optimizing/bounds_check_elimination_test.cc",
"optimizing/cloner_test.cc",
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 500fc4ae9a..40a5370ec7 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -174,7 +174,6 @@ void CommonCompilerTest::SetUp() {
}
}
- timer_.reset(new CumulativeLogger("Compilation times"));
CreateCompilerDriver(compiler_kind_, instruction_set);
}
}
@@ -193,9 +192,6 @@ void CommonCompilerTest::CreateCompilerDriver(Compiler::Kind kind,
GetCompiledClasses(),
GetCompiledMethods(),
number_of_threads,
- /* dump_stats */ true,
- /* dump_passes */ true,
- timer_.get(),
/* swap_fd */ -1,
GetProfileCompilationInfo()));
// We typically don't generate an image in unit tests, disable this optimization by default.
@@ -227,7 +223,6 @@ InstructionSet CommonCompilerTest::GetInstructionSet() const {
}
void CommonCompilerTest::TearDown() {
- timer_.reset();
compiler_driver_.reset();
callbacks_.reset();
verification_results_.reset();
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index bcda41a9b8..05fdc97e07 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -106,7 +106,6 @@ class CommonCompilerTest : public CommonRuntimeTest {
std::unique_ptr<CompilerOptions> compiler_options_;
std::unique_ptr<VerificationResults> verification_results_;
std::unique_ptr<CompilerDriver> compiler_driver_;
- std::unique_ptr<CumulativeLogger> timer_;
std::unique_ptr<const InstructionSetFeatures> instruction_set_features_;
diff --git a/compiler/debug/elf_debug_info_writer.h b/compiler/debug/elf_debug_info_writer.h
index 37c2d32091..d5999941d7 100644
--- a/compiler/debug/elf_debug_info_writer.h
+++ b/compiler/debug/elf_debug_info_writer.h
@@ -35,6 +35,7 @@
#include "mirror/array.h"
#include "mirror/class-inl.h"
#include "mirror/class.h"
+#include "oat_file.h"
namespace art {
namespace debug {
@@ -49,7 +50,8 @@ static std::vector<const char*> GetParamNames(const MethodDebugInfo* mi) {
std::vector<const char*> names;
if (mi->code_item != nullptr) {
DCHECK(mi->dex_file != nullptr);
- const uint8_t* stream = mi->dex_file->GetDebugInfoStream(mi->code_item);
+ uint32_t debug_info_offset = OatFile::GetDebugInfoOffset(*mi->dex_file, mi->code_item);
+ const uint8_t* stream = mi->dex_file->GetDebugInfoStream(debug_info_offset);
if (stream != nullptr) {
DecodeUnsignedLeb128(&stream); // line.
uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
@@ -257,7 +259,9 @@ class ElfCompilationUnitWriter {
// Write local variables.
LocalInfos local_infos;
+ uint32_t debug_info_offset = OatFile::GetDebugInfoOffset(*dex, dex_code);
if (dex->DecodeDebugLocalInfo(dex_code,
+ debug_info_offset,
is_static,
mi->dex_method_index,
LocalInfoCallback,
diff --git a/compiler/debug/elf_debug_line_writer.h b/compiler/debug/elf_debug_line_writer.h
index 6e72b46174..943e03a765 100644
--- a/compiler/debug/elf_debug_line_writer.h
+++ b/compiler/debug/elf_debug_line_writer.h
@@ -26,6 +26,7 @@
#include "debug/src_map_elem.h"
#include "dex_file-inl.h"
#include "linker/elf_builder.h"
+#include "oat_file.h"
#include "stack_map.h"
namespace art {
@@ -158,7 +159,9 @@ class ElfDebugLineWriter {
PositionInfos dex2line_map;
DCHECK(mi->dex_file != nullptr);
const DexFile* dex = mi->dex_file;
- if (!dex->DecodeDebugPositionInfo(mi->code_item, PositionInfoCallback, &dex2line_map)) {
+ uint32_t debug_info_offset = OatFile::GetDebugInfoOffset(*dex, mi->code_item);
+ if (!dex->DecodeDebugPositionInfo(
+ mi->code_item, debug_info_offset, PositionInfoCallback, &dex2line_map)) {
continue;
}
diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc
index de481caf07..0769561d0e 100644
--- a/compiler/driver/compiled_method_storage_test.cc
+++ b/compiler/driver/compiled_method_storage_test.cc
@@ -37,9 +37,6 @@ TEST(CompiledMethodStorage, Deduplicate) {
/* compiled_classes */ nullptr,
/* compiled_methods */ nullptr,
/* thread_count */ 1u,
- /* dump_stats */ false,
- /* dump_passes */ false,
- /* timer */ nullptr,
/* swap_fd */ -1,
/* profile_compilation_info */ nullptr);
CompiledMethodStorage* storage = driver.GetCompiledMethodStorage();
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 726401d09e..0ca3c8f613 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -282,9 +282,6 @@ CompilerDriver::CompilerDriver(
std::unordered_set<std::string>* compiled_classes,
std::unordered_set<std::string>* compiled_methods,
size_t thread_count,
- bool dump_stats,
- bool dump_passes,
- CumulativeLogger* timer,
int swap_fd,
const ProfileCompilationInfo* profile_compilation_info)
: compiler_options_(compiler_options),
@@ -303,9 +300,6 @@ CompilerDriver::CompilerDriver(
had_hard_verifier_failure_(false),
parallel_thread_count_(thread_count),
stats_(new AOTCompilationStats),
- dump_stats_(dump_stats),
- dump_passes_(dump_passes),
- timings_logger_(timer),
compiler_context_(nullptr),
support_boot_image_fixup_(true),
compiled_method_storage_(swap_fd),
@@ -396,7 +390,7 @@ void CompilerDriver::CompileAll(jobject class_loader,
if (GetCompilerOptions().IsAnyCompilationEnabled()) {
Compile(class_loader, dex_files, timings);
}
- if (dump_stats_) {
+ if (GetCompilerOptions().GetDumpStats()) {
stats_->Dump();
}
@@ -429,6 +423,10 @@ static optimizer::DexToDexCompilationLevel GetDexToDexCompilationLevel(
// optimizations that could break that.
max_level = optimizer::DexToDexCompilationLevel::kDontDexToDexCompile;
}
+ if (!VdexFile::CanEncodeQuickenedData(dex_file)) {
+ // Don't do any dex level optimizations if we cannot encode the quickening.
+ return optimizer::DexToDexCompilationLevel::kDontDexToDexCompile;
+ }
if (klass->IsVerified()) {
// Class is verified so we can enable DEX-to-DEX compilation for performance.
return max_level;
@@ -2025,28 +2023,19 @@ class VerifyClassVisitor : public CompilationVisitor {
ClassReference ref(manager_->GetDexFile(), class_def_index);
manager_->GetCompiler()->RecordClassStatus(ref, klass->GetStatus());
- // It is *very* problematic if there are verification errors in the boot classpath.
- // For example, we rely on things working OK without verification when the decryption dialog
- // is brought up. So abort in a debug build if we find this violated.
+ // It is *very* problematic if there are resolution errors in the boot classpath.
+ //
+ // It is also bad if classes fail verification. For example, we rely on things working
+ // OK without verification when the decryption dialog is brought up. It is thus highly
+ // recommended to compile the boot classpath with
+ // --abort-on-hard-verifier-error --abort-on-soft-verifier-error
+ // which is the default build system configuration.
if (kIsDebugBuild) {
if (manager_->GetCompiler()->GetCompilerOptions().IsBootImage()) {
- if (!klass->IsVerified()) {
- // Re-run verification to get all failure messages if it soft-failed.
- if (!klass->IsErroneous()) {
- gLogVerbosity.verifier = true;
- // Note: We can't call ClassLinker::VerifyClass, as it will elide the second
- // verification.
- Runtime* runtime = Runtime::Current();
- std::string v_error;
- verifier::MethodVerifier::VerifyClass(soa.Self(),
- klass.Get(),
- runtime->GetCompilerCallbacks(),
- runtime->IsAotCompiler(),
- verifier::HardFailLogMode::kLogInternalFatal,
- &v_error);
- }
+ if (!klass->IsResolved() || klass->IsErroneous()) {
LOG(FATAL) << "Boot classpath class " << klass->PrettyClass()
- << " failed to fully verify: state= " << klass->GetStatus();
+ << " failed to resolve/is erroneous: state= " << klass->GetStatus();
+ UNREACHABLE();
}
}
if (klass->IsVerified()) {
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index da4a580bf2..d2141e8bc7 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -97,9 +97,6 @@ class CompilerDriver {
std::unordered_set<std::string>* compiled_classes,
std::unordered_set<std::string>* compiled_methods,
size_t thread_count,
- bool dump_stats,
- bool dump_passes,
- CumulativeLogger* timer,
int swap_fd,
const ProfileCompilationInfo* profile_compilation_info);
@@ -302,18 +299,6 @@ class CompilerDriver {
return parallel_thread_count_;
}
- bool GetDumpStats() const {
- return dump_stats_;
- }
-
- bool GetDumpPasses() const {
- return dump_passes_;
- }
-
- CumulativeLogger* GetTimingsLogger() const {
- return timings_logger_;
- }
-
void SetDedupeEnabled(bool dedupe_enabled) {
compiled_method_storage_.SetDedupeEnabled(dedupe_enabled);
}
@@ -536,11 +521,6 @@ class CompilerDriver {
class AOTCompilationStats;
std::unique_ptr<AOTCompilationStats> stats_;
- bool dump_stats_;
- const bool dump_passes_;
-
- CumulativeLogger* const timings_logger_;
-
typedef void (*CompilerCallbackFn)(CompilerDriver& driver);
typedef MutexLock* (*CompilerMutexLockFn)(CompilerDriver& driver);
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 032763cdff..c0a9a05aa6 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -49,6 +49,8 @@ CompilerOptions::CompilerOptions()
implicit_so_checks_(true),
implicit_suspend_checks_(false),
compile_pic_(false),
+ dump_timings_(false),
+ dump_stats_(false),
verbose_methods_(),
abort_on_hard_verifier_failure_(false),
abort_on_soft_verifier_failure_(false),
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index a71f61a9e3..3f660293d2 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -266,6 +266,14 @@ class CompilerOptions FINAL {
return passes_to_run_;
}
+ bool GetDumpTimings() const {
+ return dump_timings_;
+ }
+
+ bool GetDumpStats() const {
+ return dump_stats_;
+ }
+
private:
bool ParseDumpInitFailures(const std::string& option, std::string* error_msg);
void ParseDumpCfgPasses(const StringPiece& option, UsageFn Usage);
@@ -303,6 +311,8 @@ class CompilerOptions FINAL {
bool implicit_so_checks_;
bool implicit_suspend_checks_;
bool compile_pic_;
+ bool dump_timings_;
+ bool dump_stats_;
// Vector of methods to have verbose output enabled for.
std::vector<std::string> verbose_methods_;
diff --git a/compiler/driver/compiler_options_map-inl.h b/compiler/driver/compiler_options_map-inl.h
index e28d49974a..f97ab08600 100644
--- a/compiler/driver/compiler_options_map-inl.h
+++ b/compiler/driver/compiler_options_map-inl.h
@@ -78,6 +78,14 @@ inline bool ReadCompilerOptions(Base& map, CompilerOptions* options, std::string
map.AssignIfExists(Base::VerboseMethods, &options->verbose_methods_);
options->deduplicate_code_ = map.GetOrDefault(Base::DeduplicateCode);
+ if (map.Exists(Base::DumpTimings)) {
+ options->dump_timings_ = true;
+ }
+
+ if (map.Exists(Base::DumpStats)) {
+ options->dump_stats_ = true;
+ }
+
return true;
}
@@ -129,6 +137,12 @@ inline void AddCompilerOptionsArgumentParserOptions(Builder& b) {
.WithValueMap({{"false", false}, {"true", true}})
.IntoKey(Map::DeduplicateCode)
+ .Define({"--dump-timings"})
+ .IntoKey(Map::DumpTimings)
+
+ .Define({"--dump-stats"})
+ .IntoKey(Map::DumpStats)
+
.Define("--debuggable")
.IntoKey(Map::Debuggable)
diff --git a/compiler/driver/compiler_options_map.def b/compiler/driver/compiler_options_map.def
index cccd6184c6..2c56fd7974 100644
--- a/compiler/driver/compiler_options_map.def
+++ b/compiler/driver/compiler_options_map.def
@@ -58,5 +58,7 @@ COMPILER_OPTIONS_KEY (Unit, DumpCFGAppend)
COMPILER_OPTIONS_KEY (std::string, RegisterAllocationStrategy)
COMPILER_OPTIONS_KEY (ParseStringList<','>, VerboseMethods)
COMPILER_OPTIONS_KEY (bool, DeduplicateCode, true)
+COMPILER_OPTIONS_KEY (Unit, DumpTimings)
+COMPILER_OPTIONS_KEY (Unit, DumpStats)
#undef COMPILER_OPTIONS_KEY
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 0c82d601a7..f33c5e1b97 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -130,7 +130,6 @@ JitCompiler::JitCompiler() {
if (instruction_set_features_ == nullptr) {
instruction_set_features_ = InstructionSetFeatures::FromCppDefines();
}
- cumulative_logger_.reset(new CumulativeLogger("jit times"));
compiler_driver_.reset(new CompilerDriver(
compiler_options_.get(),
/* verification_results */ nullptr,
@@ -141,9 +140,6 @@ JitCompiler::JitCompiler() {
/* compiled_classes */ nullptr,
/* compiled_methods */ nullptr,
/* thread_count */ 1,
- /* dump_stats */ false,
- /* dump_passes */ false,
- cumulative_logger_.get(),
/* swap_fd */ -1,
/* profile_compilation_info */ nullptr));
// Disable dedupe so we can remove compiled methods.
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index 1e1838efd5..31dc9e2fe5 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -48,7 +48,6 @@ class JitCompiler {
private:
std::unique_ptr<CompilerOptions> compiler_options_;
- std::unique_ptr<CumulativeLogger> cumulative_logger_;
std::unique_ptr<CompilerDriver> compiler_driver_;
std::unique_ptr<const InstructionSetFeatures> instruction_set_features_;
std::unique_ptr<JitLogger> jit_logger_;
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index b93b05cbd4..37f7d632ca 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -29,7 +29,6 @@
#include "base/macros.h"
#include "calling_convention.h"
#include "class_linker.h"
-#include "compiled_method.h"
#include "debug/dwarf/debug_frame_opcode_writer.h"
#include "dex_file-inl.h"
#include "driver/compiler_driver.h"
@@ -115,10 +114,10 @@ static ThreadOffset<kPointerSize> GetJniEntrypointThreadOffset(JniEntrypoint whi
// convention.
//
template <PointerSize kPointerSize>
-static CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
- uint32_t access_flags,
- uint32_t method_idx,
- const DexFile& dex_file) {
+static JniCompiledMethod ArtJniCompileMethodInternal(CompilerDriver* driver,
+ uint32_t access_flags,
+ uint32_t method_idx,
+ const DexFile& dex_file) {
const bool is_native = (access_flags & kAccNative) != 0;
CHECK(is_native);
const bool is_static = (access_flags & kAccStatic) != 0;
@@ -657,16 +656,12 @@ static CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
MemoryRegion code(&managed_code[0], managed_code.size());
__ FinalizeInstructions(code);
- return CompiledMethod::SwapAllocCompiledMethod(driver,
- instruction_set,
- ArrayRef<const uint8_t>(managed_code),
- frame_size,
- main_jni_conv->CoreSpillMask(),
- main_jni_conv->FpSpillMask(),
- /* method_info */ ArrayRef<const uint8_t>(),
- /* vmap_table */ ArrayRef<const uint8_t>(),
- ArrayRef<const uint8_t>(*jni_asm->cfi().data()),
- ArrayRef<const linker::LinkerPatch>());
+ return JniCompiledMethod(instruction_set,
+ std::move(managed_code),
+ frame_size,
+ main_jni_conv->CoreSpillMask(),
+ main_jni_conv->FpSpillMask(),
+ ArrayRef<const uint8_t>(*jni_asm->cfi().data()));
}
// Copy a single parameter from the managed to the JNI calling convention.
@@ -775,10 +770,10 @@ static void SetNativeParameter(JNIMacroAssembler<kPointerSize>* jni_asm,
}
}
-CompiledMethod* ArtQuickJniCompileMethod(CompilerDriver* compiler,
- uint32_t access_flags,
- uint32_t method_idx,
- const DexFile& dex_file) {
+JniCompiledMethod ArtQuickJniCompileMethod(CompilerDriver* compiler,
+ uint32_t access_flags,
+ uint32_t method_idx,
+ const DexFile& dex_file) {
if (Is64BitInstructionSet(compiler->GetInstructionSet())) {
return ArtJniCompileMethodInternal<PointerSize::k64>(
compiler, access_flags, method_idx, dex_file);
diff --git a/compiler/jni/quick/jni_compiler.h b/compiler/jni/quick/jni_compiler.h
index 3fcce55b5a..11419947a0 100644
--- a/compiler/jni/quick/jni_compiler.h
+++ b/compiler/jni/quick/jni_compiler.h
@@ -17,18 +17,55 @@
#ifndef ART_COMPILER_JNI_QUICK_JNI_COMPILER_H_
#define ART_COMPILER_JNI_QUICK_JNI_COMPILER_H_
-#include "compiler.h"
-#include "dex_file.h"
+#include <vector>
+
+#include "arch/instruction_set.h"
+#include "base/array_ref.h"
namespace art {
+class ArtMethod;
class CompilerDriver;
-class CompiledMethod;
+class DexFile;
+
+class JniCompiledMethod {
+ public:
+ JniCompiledMethod(InstructionSet instruction_set,
+ std::vector<uint8_t>&& code,
+ uint32_t frame_size,
+ uint32_t core_spill_mask,
+ uint32_t fp_spill_mask,
+ ArrayRef<const uint8_t> cfi)
+ : instruction_set_(instruction_set),
+ code_(std::move(code)),
+ frame_size_(frame_size),
+ core_spill_mask_(core_spill_mask),
+ fp_spill_mask_(fp_spill_mask),
+ cfi_(cfi.begin(), cfi.end()) {}
+
+ JniCompiledMethod(JniCompiledMethod&& other) = default;
+ ~JniCompiledMethod() = default;
+
+ InstructionSet GetInstructionSet() const { return instruction_set_; }
+ ArrayRef<const uint8_t> GetCode() const { return ArrayRef<const uint8_t>(code_); }
+ uint32_t GetFrameSize() const { return frame_size_; }
+ uint32_t GetCoreSpillMask() const { return core_spill_mask_; }
+ uint32_t GetFpSpillMask() const { return fp_spill_mask_; }
+ ArrayRef<const uint8_t> GetCfi() const { return ArrayRef<const uint8_t>(cfi_); }
+
+ private:
+ InstructionSet instruction_set_;
+ std::vector<uint8_t> code_;
+ uint32_t frame_size_;
+ uint32_t core_spill_mask_;
+ uint32_t fp_spill_mask_;
+ std::vector<uint8_t> cfi_;
+};
-CompiledMethod* ArtQuickJniCompileMethod(CompilerDriver* compiler,
- uint32_t access_flags,
- uint32_t method_idx,
- const DexFile& dex_file);
+JniCompiledMethod ArtQuickJniCompileMethod(CompilerDriver* compiler,
+ uint32_t access_flags,
+ uint32_t method_idx,
+ const DexFile& dex_file);
} // namespace art
diff --git a/compiler/linker/method_bss_mapping_encoder.h b/compiler/linker/method_bss_mapping_encoder.h
deleted file mode 100644
index b2922ec6d2..0000000000
--- a/compiler/linker/method_bss_mapping_encoder.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_LINKER_METHOD_BSS_MAPPING_ENCODER_H_
-#define ART_COMPILER_LINKER_METHOD_BSS_MAPPING_ENCODER_H_
-
-#include "base/enums.h"
-#include "base/logging.h"
-#include "dex_file.h"
-#include "method_bss_mapping.h"
-
-namespace art {
-namespace linker {
-
-// Helper class for encoding compressed MethodBssMapping.
-class MethodBssMappingEncoder {
- public:
- explicit MethodBssMappingEncoder(PointerSize pointer_size)
- : pointer_size_(static_cast<size_t>(pointer_size)) {
- entry_.method_index = DexFile::kDexNoIndex16;
- entry_.index_mask = 0u;
- entry_.bss_offset = static_cast<uint32_t>(-1);
- }
-
- // Try to merge the next method_index -> bss_offset mapping into the current entry.
- // Return true on success, false on failure.
- bool TryMerge(uint32_t method_index, uint32_t bss_offset) {
- DCHECK_NE(method_index, entry_.method_index);
- if (entry_.bss_offset + pointer_size_ != bss_offset) {
- return false;
- }
- uint32_t diff = method_index - entry_.method_index;
- if (diff > 16u) {
- return false;
- }
- if ((entry_.index_mask & ~(static_cast<uint32_t>(-1) << diff)) != 0u) {
- return false;
- }
- entry_.method_index = method_index;
- // Insert the bit indicating the method index we've just overwritten
- // and shift bits indicating method indexes before that.
- entry_.index_mask = dchecked_integral_cast<uint16_t>(
- (static_cast<uint32_t>(entry_.index_mask) | 0x10000u) >> diff);
- entry_.bss_offset = bss_offset;
- return true;
- }
-
- void Reset(uint32_t method_index, uint32_t bss_offset) {
- entry_.method_index = method_index;
- entry_.index_mask = 0u;
- entry_.bss_offset = bss_offset;
- }
-
- MethodBssMappingEntry GetEntry() {
- return entry_;
- }
-
- private:
- size_t pointer_size_;
- MethodBssMappingEntry entry_;
-};
-
-} // namespace linker
-} // namespace art
-
-#endif // ART_COMPILER_LINKER_METHOD_BSS_MAPPING_ENCODER_H_
diff --git a/compiler/linker/method_bss_mapping_encoder_test.cc b/compiler/linker/method_bss_mapping_encoder_test.cc
deleted file mode 100644
index 1240389bef..0000000000
--- a/compiler/linker/method_bss_mapping_encoder_test.cc
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "method_bss_mapping_encoder.h"
-
-#include "gtest/gtest.h"
-
-namespace art {
-namespace linker {
-
-TEST(MethodBssMappingEncoder, TryMerge) {
- for (PointerSize pointer_size : {PointerSize::k32, PointerSize::k64}) {
- size_t raw_pointer_size = static_cast<size_t>(pointer_size);
- MethodBssMappingEncoder encoder(pointer_size);
- encoder.Reset(1u, 0u);
- ASSERT_FALSE(encoder.TryMerge(5u, raw_pointer_size + 1)); // Wrong bss_offset difference.
- ASSERT_FALSE(encoder.TryMerge(18u, raw_pointer_size)); // Method index out of range.
- ASSERT_TRUE(encoder.TryMerge(5u, raw_pointer_size));
- ASSERT_TRUE(encoder.GetEntry().CoversIndex(1u));
- ASSERT_TRUE(encoder.GetEntry().CoversIndex(5u));
- ASSERT_FALSE(encoder.GetEntry().CoversIndex(17u));
- ASSERT_FALSE(encoder.TryMerge(17u, 2 * raw_pointer_size + 1)); // Wrong bss_offset difference.
- ASSERT_FALSE(encoder.TryMerge(18u, 2 * raw_pointer_size)); // Method index out of range.
- ASSERT_TRUE(encoder.TryMerge(17u, 2 * raw_pointer_size));
- ASSERT_TRUE(encoder.GetEntry().CoversIndex(1u));
- ASSERT_TRUE(encoder.GetEntry().CoversIndex(5u));
- ASSERT_TRUE(encoder.GetEntry().CoversIndex(17u));
- ASSERT_EQ(0u, encoder.GetEntry().GetBssOffset(1u, raw_pointer_size));
- ASSERT_EQ(raw_pointer_size, encoder.GetEntry().GetBssOffset(5u, raw_pointer_size));
- ASSERT_EQ(2 * raw_pointer_size, encoder.GetEntry().GetBssOffset(17u, raw_pointer_size));
- ASSERT_EQ(0x0011u, encoder.GetEntry().index_mask);
- ASSERT_FALSE(encoder.TryMerge(18u, 2 * raw_pointer_size)); // Method index out of range.
- }
-}
-
-} // namespace linker
-} // namespace art
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index 6297dd0481..9e9d14af9e 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -52,9 +52,6 @@ class RelativePatcherTest : public testing::Test {
/* compiled_classes */ nullptr,
/* compiled_methods */ nullptr,
/* thread_count */ 1u,
- /* dump_stats */ false,
- /* dump_passes */ false,
- /* timer */ nullptr,
/* swap_fd */ -1,
/* profile_compilation_info */ nullptr),
error_msg_(),
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 0bd3ce937a..aff6f9f64f 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1411,10 +1411,10 @@ LocationSummary* CodeGenerator::CreateThrowingSlowPathLocations(HInstruction* in
void CodeGenerator::GenerateNullCheck(HNullCheck* instruction) {
if (compiler_options_.GetImplicitNullChecks()) {
- MaybeRecordStat(stats_, kImplicitNullCheckGenerated);
+ MaybeRecordStat(stats_, MethodCompilationStat::kImplicitNullCheckGenerated);
GenerateImplicitNullCheck(instruction);
} else {
- MaybeRecordStat(stats_, kExplicitNullCheckGenerated);
+ MaybeRecordStat(stats_, MethodCompilationStat::kExplicitNullCheckGenerated);
GenerateExplicitNullCheck(instruction);
}
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index a0cb43ee01..5054a299d3 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -311,40 +311,23 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
LoadClassSlowPathARM64(HLoadClass* cls,
HInstruction* at,
uint32_t dex_pc,
- bool do_clinit,
- vixl::aarch64::Register bss_entry_temp = vixl::aarch64::Register(),
- vixl::aarch64::Label* bss_entry_adrp_label = nullptr)
+ bool do_clinit)
: SlowPathCodeARM64(at),
cls_(cls),
dex_pc_(dex_pc),
- do_clinit_(do_clinit),
- bss_entry_temp_(bss_entry_temp),
- bss_entry_adrp_label_(bss_entry_adrp_label) {
+ do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
- constexpr bool call_saves_everything_except_r0_ip0 = (!kUseReadBarrier || kUseBakerReadBarrier);
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
- InvokeRuntimeCallingConvention calling_convention;
- // For HLoadClass/kBssEntry/kSaveEverything, the page address of the entry is in a temp
- // register, make sure it's not clobbered by the call or by saving/restoring registers.
- DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
- bool is_load_class_bss_entry =
- (cls_ == instruction_) && (cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry);
- if (is_load_class_bss_entry) {
- DCHECK(bss_entry_temp_.IsValid());
- DCHECK(!bss_entry_temp_.Is(calling_convention.GetRegisterAt(0)));
- DCHECK(
- !UseScratchRegisterScope(arm64_codegen->GetVIXLAssembler()).IsAvailable(bss_entry_temp_));
- }
-
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
+ InvokeRuntimeCallingConvention calling_convention;
dex::TypeIndex type_index = cls_->GetTypeIndex();
__ Mov(calling_convention.GetRegisterAt(0).W(), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
@@ -363,26 +346,6 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
arm64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
}
RestoreLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
- if (is_load_class_bss_entry) {
- DCHECK(out.IsValid());
- const DexFile& dex_file = cls_->GetDexFile();
- if (call_saves_everything_except_r0_ip0) {
- // The class entry page address was preserved in bss_entry_temp_ thanks to kSaveEverything.
- } else {
- // For non-Baker read barrier, we need to re-calculate the address of the class entry page.
- bss_entry_adrp_label_ = arm64_codegen->NewBssEntryTypePatch(dex_file, type_index);
- arm64_codegen->EmitAdrpPlaceholder(bss_entry_adrp_label_, bss_entry_temp_);
- }
- vixl::aarch64::Label* strp_label =
- arm64_codegen->NewBssEntryTypePatch(dex_file, type_index, bss_entry_adrp_label_);
- {
- SingleEmissionCheckScope guard(arm64_codegen->GetVIXLAssembler());
- __ Bind(strp_label);
- __ str(RegisterFrom(locations->Out(), DataType::Type::kReference),
- MemOperand(bss_entry_temp_, /* offset placeholder */ 0));
- }
- }
__ B(GetExitLabel());
}
@@ -398,34 +361,23 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
// Whether to initialize the class.
const bool do_clinit_;
- // For HLoadClass/kBssEntry, the temp register and the label of the ADRP where it was loaded.
- vixl::aarch64::Register bss_entry_temp_;
- vixl::aarch64::Label* bss_entry_adrp_label_;
-
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM64);
};
class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
public:
- LoadStringSlowPathARM64(HLoadString* instruction, Register temp, vixl::aarch64::Label* adrp_label)
- : SlowPathCodeARM64(instruction),
- temp_(temp),
- adrp_label_(adrp_label) {}
+ explicit LoadStringSlowPathARM64(HLoadString* instruction)
+ : SlowPathCodeARM64(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
- InvokeRuntimeCallingConvention calling_convention;
- // Make sure `temp_` is not clobbered by the call or by saving/restoring registers.
- DCHECK(temp_.IsValid());
- DCHECK(!temp_.Is(calling_convention.GetRegisterAt(0)));
- DCHECK(!UseScratchRegisterScope(arm64_codegen->GetVIXLAssembler()).IsAvailable(temp_));
-
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
+ InvokeRuntimeCallingConvention calling_convention;
const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
__ Mov(calling_convention.GetRegisterAt(0).W(), string_index.index_);
arm64_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
@@ -435,33 +387,12 @@ class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
RestoreLiveRegisters(codegen, locations);
- // Store the resolved String to the BSS entry.
- const DexFile& dex_file = instruction_->AsLoadString()->GetDexFile();
- if (!kUseReadBarrier || kUseBakerReadBarrier) {
- // The string entry page address was preserved in temp_ thanks to kSaveEverything.
- } else {
- // For non-Baker read barrier, we need to re-calculate the address of the string entry page.
- adrp_label_ = arm64_codegen->NewStringBssEntryPatch(dex_file, string_index);
- arm64_codegen->EmitAdrpPlaceholder(adrp_label_, temp_);
- }
- vixl::aarch64::Label* strp_label =
- arm64_codegen->NewStringBssEntryPatch(dex_file, string_index, adrp_label_);
- {
- SingleEmissionCheckScope guard(arm64_codegen->GetVIXLAssembler());
- __ Bind(strp_label);
- __ str(RegisterFrom(locations->Out(), DataType::Type::kReference),
- MemOperand(temp_, /* offset placeholder */ 0));
- }
-
__ B(GetExitLabel());
}
const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM64"; }
private:
- const Register temp_;
- vixl::aarch64::Label* adrp_label_;
-
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64);
};
@@ -4883,7 +4814,6 @@ void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
if (cls->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the type resolution or initialization and marking to save everything we need.
- locations->AddTemp(FixedTempLocation());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode()));
@@ -4910,8 +4840,6 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
Location out_loc = cls->GetLocations()->Out();
Register out = OutputRegister(cls);
- Register bss_entry_temp;
- vixl::aarch64::Label* bss_entry_adrp_label = nullptr;
const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
? kWithoutReadBarrier
@@ -4975,16 +4903,16 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
// Add ADRP with its PC-relative Class .bss entry patch.
const DexFile& dex_file = cls->GetDexFile();
dex::TypeIndex type_index = cls->GetTypeIndex();
- bss_entry_temp = XRegisterFrom(cls->GetLocations()->GetTemp(0));
- bss_entry_adrp_label = codegen_->NewBssEntryTypePatch(dex_file, type_index);
- codegen_->EmitAdrpPlaceholder(bss_entry_adrp_label, bss_entry_temp);
+ vixl::aarch64::Register temp = XRegisterFrom(out_loc);
+ vixl::aarch64::Label* adrp_label = codegen_->NewBssEntryTypePatch(dex_file, type_index);
+ codegen_->EmitAdrpPlaceholder(adrp_label, temp);
// Add LDR with its PC-relative Class patch.
vixl::aarch64::Label* ldr_label =
- codegen_->NewBssEntryTypePatch(dex_file, type_index, bss_entry_adrp_label);
+ codegen_->NewBssEntryTypePatch(dex_file, type_index, adrp_label);
// /* GcRoot<mirror::Class> */ out = *(base_address + offset) /* PC-relative */
GenerateGcRootFieldLoad(cls,
out_loc,
- bss_entry_temp,
+ temp,
/* offset placeholder */ 0u,
ldr_label,
read_barrier_option);
@@ -5013,7 +4941,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
if (generate_null_check || do_clinit) {
DCHECK(cls->CanCallRuntime());
SlowPathCodeARM64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathARM64(
- cls, cls, cls->GetDexPc(), do_clinit, bss_entry_temp, bss_entry_adrp_label);
+ cls, cls, cls->GetDexPc(), do_clinit);
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
__ Cbz(out, slow_path->GetEntryLabel());
@@ -5078,7 +5006,6 @@ void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
if (load->GetLoadKind() == HLoadString::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the pResolveString and marking to save everything we need.
- locations->AddTemp(FixedTempLocation());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode()));
@@ -5138,7 +5065,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD
const DexFile& dex_file = load->GetDexFile();
const dex::StringIndex string_index = load->GetStringIndex();
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
- Register temp = XRegisterFrom(load->GetLocations()->GetTemp(0));
+ Register temp = XRegisterFrom(out_loc);
vixl::aarch64::Label* adrp_label = codegen_->NewStringBssEntryPatch(dex_file, string_index);
codegen_->EmitAdrpPlaceholder(adrp_label, temp);
// Add LDR with its .bss entry String patch.
@@ -5152,7 +5079,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD
ldr_label,
kCompilerReadBarrierOption);
SlowPathCodeARM64* slow_path =
- new (codegen_->GetScopedAllocator()) LoadStringSlowPathARM64(load, temp, adrp_label);
+ new (codegen_->GetScopedAllocator()) LoadStringSlowPathARM64(load);
codegen_->AddSlowPath(slow_path);
__ Cbz(out.X(), slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 9e7455d488..3f8f0c44f3 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -532,29 +532,12 @@ class LoadClassSlowPathARMVIXL : public SlowPathCodeARMVIXL {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
- constexpr bool call_saves_everything_except_r0 = (!kUseReadBarrier || kUseBakerReadBarrier);
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
- // For HLoadClass/kBssEntry/kSaveEverything, make sure we preserve the address of the entry.
- DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
- bool is_load_class_bss_entry =
- (cls_ == instruction_) && (cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry);
- vixl32::Register entry_address;
- if (is_load_class_bss_entry && call_saves_everything_except_r0) {
- vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
- // In the unlucky case that the `temp` is R0, we preserve the address in `out` across
- // the kSaveEverything call.
- bool temp_is_r0 = temp.Is(calling_convention.GetRegisterAt(0));
- entry_address = temp_is_r0 ? RegisterFrom(out) : temp;
- DCHECK(!entry_address.Is(calling_convention.GetRegisterAt(0)));
- if (temp_is_r0) {
- __ Mov(entry_address, temp);
- }
- }
dex::TypeIndex type_index = cls_->GetTypeIndex();
__ Mov(calling_convention.GetRegisterAt(0), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
@@ -566,22 +549,6 @@ class LoadClassSlowPathARMVIXL : public SlowPathCodeARMVIXL {
CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
}
- // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
- if (is_load_class_bss_entry) {
- if (call_saves_everything_except_r0) {
- // The class entry address was preserved in `entry_address` thanks to kSaveEverything.
- __ Str(r0, MemOperand(entry_address));
- } else {
- // For non-Baker read barrier, we need to re-calculate the address of the string entry.
- UseScratchRegisterScope temps(
- down_cast<CodeGeneratorARMVIXL*>(codegen)->GetVIXLAssembler());
- vixl32::Register temp = temps.Acquire();
- CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
- arm_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
- arm_codegen->EmitMovwMovtPlaceholder(labels, temp);
- __ Str(r0, MemOperand(temp));
- }
- }
// Move the class to the desired location.
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
@@ -616,48 +583,17 @@ class LoadStringSlowPathARMVIXL : public SlowPathCodeARMVIXL {
DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
- HLoadString* load = instruction_->AsLoadString();
- const dex::StringIndex string_index = load->GetStringIndex();
- vixl32::Register out = OutputRegister(load);
- constexpr bool call_saves_everything_except_r0 = (!kUseReadBarrier || kUseBakerReadBarrier);
+ const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
- // In the unlucky case that the `temp` is R0, we preserve the address in `out` across
- // the kSaveEverything call.
- vixl32::Register entry_address;
- if (call_saves_everything_except_r0) {
- vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
- bool temp_is_r0 = (temp.Is(calling_convention.GetRegisterAt(0)));
- entry_address = temp_is_r0 ? out : temp;
- DCHECK(!entry_address.Is(calling_convention.GetRegisterAt(0)));
- if (temp_is_r0) {
- __ Mov(entry_address, temp);
- }
- }
-
__ Mov(calling_convention.GetRegisterAt(0), string_index.index_);
arm_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
- // Store the resolved String to the .bss entry.
- if (call_saves_everything_except_r0) {
- // The string entry address was preserved in `entry_address` thanks to kSaveEverything.
- __ Str(r0, MemOperand(entry_address));
- } else {
- // For non-Baker read barrier, we need to re-calculate the address of the string entry.
- UseScratchRegisterScope temps(
- down_cast<CodeGeneratorARMVIXL*>(codegen)->GetVIXLAssembler());
- vixl32::Register temp = temps.Acquire();
- CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
- arm_codegen->NewStringBssEntryPatch(load->GetDexFile(), string_index);
- arm_codegen->EmitMovwMovtPlaceholder(labels, temp);
- __ Str(r0, MemOperand(temp));
- }
-
arm_codegen->Move32(locations->Out(), LocationFrom(r0));
RestoreLiveRegisters(codegen, locations);
@@ -7104,9 +7040,6 @@ void LocationsBuilderARMVIXL::VisitLoadClass(HLoadClass* cls) {
if (load_kind == HLoadClass::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the type resolution or initialization and marking to save everything we need.
- // Note that IP may be clobbered by saving/restoring the live register (only one thanks
- // to the custom calling convention) or by marking, so we request a different temp.
- locations->AddTemp(Location::RequiresRegister());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConventionARMVIXL calling_convention;
caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(0)));
@@ -7189,13 +7122,10 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
break;
}
case HLoadClass::LoadKind::kBssEntry: {
- vixl32::Register temp = (!kUseReadBarrier || kUseBakerReadBarrier)
- ? RegisterFrom(locations->GetTemp(0))
- : out;
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
- codegen_->EmitMovwMovtPlaceholder(labels, temp);
- GenerateGcRootFieldLoad(cls, out_loc, temp, /* offset */ 0, read_barrier_option);
+ codegen_->EmitMovwMovtPlaceholder(labels, out);
+ GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option);
generate_null_check = true;
break;
}
@@ -7296,9 +7226,6 @@ void LocationsBuilderARMVIXL::VisitLoadString(HLoadString* load) {
if (load_kind == HLoadString::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the pResolveString and marking to save everything we need, including temps.
- // Note that IP may be clobbered by saving/restoring the live register (only one thanks
- // to the custom calling convention) or by marking, so we request a different temp.
- locations->AddTemp(Location::RequiresRegister());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConventionARMVIXL calling_convention;
caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(0)));
@@ -7348,13 +7275,10 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
}
case HLoadString::LoadKind::kBssEntry: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
- vixl32::Register temp = (!kUseReadBarrier || kUseBakerReadBarrier)
- ? RegisterFrom(locations->GetTemp(0))
- : out;
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex());
- codegen_->EmitMovwMovtPlaceholder(labels, temp);
- GenerateGcRootFieldLoad(load, out_loc, temp, /* offset */ 0, kCompilerReadBarrierOption);
+ codegen_->EmitMovwMovtPlaceholder(labels, out);
+ GenerateGcRootFieldLoad(load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
LoadStringSlowPathARMVIXL* slow_path =
new (codegen_->GetScopedAllocator()) LoadStringSlowPathARMVIXL(load);
codegen_->AddSlowPath(slow_path);
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index ddec0cc453..d6922d2f3f 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -220,13 +220,11 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
LoadClassSlowPathMIPS(HLoadClass* cls,
HInstruction* at,
uint32_t dex_pc,
- bool do_clinit,
- const CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high = nullptr)
+ bool do_clinit)
: SlowPathCodeMIPS(at),
cls_(cls),
dex_pc_(dex_pc),
- do_clinit_(do_clinit),
- bss_info_high_(bss_info_high) {
+ do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
@@ -234,28 +232,11 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
- const bool baker_or_no_read_barriers = (!kUseReadBarrier || kUseBakerReadBarrier);
InvokeRuntimeCallingConvention calling_convention;
DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
- const bool is_load_class_bss_entry =
- (cls_ == instruction_) && (cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry/kSaveEverything, make sure we preserve the address of the entry.
- Register entry_address = kNoRegister;
- if (is_load_class_bss_entry && baker_or_no_read_barriers) {
- Register temp = locations->GetTemp(0).AsRegister<Register>();
- bool temp_is_a0 = (temp == calling_convention.GetRegisterAt(0));
- // In the unlucky case that `temp` is A0, we preserve the address in `out` across the
- // kSaveEverything call.
- entry_address = temp_is_a0 ? out.AsRegister<Register>() : temp;
- DCHECK_NE(entry_address, calling_convention.GetRegisterAt(0));
- if (temp_is_a0) {
- __ Move(entry_address, temp);
- }
- }
-
dex::TypeIndex type_index = cls_->GetTypeIndex();
__ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
@@ -267,18 +248,6 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
}
- // For HLoadClass/kBssEntry, store the resolved class to the BSS entry.
- if (is_load_class_bss_entry && baker_or_no_read_barriers) {
- // The class entry address was preserved in `entry_address` thanks to kSaveEverything.
- DCHECK(bss_info_high_);
- CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
- mips_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index, bss_info_high_);
- __ Sw(calling_convention.GetRegisterAt(0),
- entry_address,
- /* placeholder */ 0x5678,
- &info_low->label);
- }
-
// Move the class to the desired location.
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
@@ -289,21 +258,6 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
}
RestoreLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry, store the resolved class to the BSS entry.
- if (is_load_class_bss_entry && !baker_or_no_read_barriers) {
- // For non-Baker read barriers we need to re-calculate the address of
- // the class entry.
- const bool isR6 = mips_codegen->GetInstructionSetFeatures().IsR6();
- const bool has_irreducible_loops = codegen->GetGraph()->HasIrreducibleLoops();
- Register base =
- (isR6 || has_irreducible_loops) ? ZERO : locations->InAt(0).AsRegister<Register>();
- CodeGeneratorMIPS::PcRelativePatchInfo* info_high =
- mips_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
- CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
- mips_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index, info_high);
- mips_codegen->EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base);
- __ Sw(out.AsRegister<Register>(), TMP, /* placeholder */ 0x5678, &info_low->label);
- }
__ B(GetExitLabel());
}
@@ -319,92 +273,41 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
// Whether to initialize the class.
const bool do_clinit_;
- // Pointer to the high half PC-relative patch info for HLoadClass/kBssEntry.
- const CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high_;
-
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS);
};
class LoadStringSlowPathMIPS : public SlowPathCodeMIPS {
public:
- explicit LoadStringSlowPathMIPS(HLoadString* instruction,
- const CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high)
- : SlowPathCodeMIPS(instruction), bss_info_high_(bss_info_high) {}
+ explicit LoadStringSlowPathMIPS(HLoadString* instruction)
+ : SlowPathCodeMIPS(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
DCHECK(instruction_->IsLoadString());
DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
- HLoadString* load = instruction_->AsLoadString();
- const dex::StringIndex string_index = load->GetStringIndex();
- Register out = locations->Out().AsRegister<Register>();
+ const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
- const bool baker_or_no_read_barriers = (!kUseReadBarrier || kUseBakerReadBarrier);
InvokeRuntimeCallingConvention calling_convention;
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
- // For HLoadString/kBssEntry/kSaveEverything, make sure we preserve the address of the entry.
- Register entry_address = kNoRegister;
- if (baker_or_no_read_barriers) {
- Register temp = locations->GetTemp(0).AsRegister<Register>();
- bool temp_is_a0 = (temp == calling_convention.GetRegisterAt(0));
- // In the unlucky case that `temp` is A0, we preserve the address in `out` across the
- // kSaveEverything call.
- entry_address = temp_is_a0 ? out : temp;
- DCHECK_NE(entry_address, calling_convention.GetRegisterAt(0));
- if (temp_is_a0) {
- __ Move(entry_address, temp);
- }
- }
-
__ LoadConst32(calling_convention.GetRegisterAt(0), string_index.index_);
mips_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
- // Store the resolved string to the BSS entry.
- if (baker_or_no_read_barriers) {
- // The string entry address was preserved in `entry_address` thanks to kSaveEverything.
- DCHECK(bss_info_high_);
- CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
- mips_codegen->NewStringBssEntryPatch(load->GetDexFile(), string_index, bss_info_high_);
- __ Sw(calling_convention.GetRegisterAt(0),
- entry_address,
- /* placeholder */ 0x5678,
- &info_low->label);
- }
-
DataType::Type type = instruction_->GetType();
mips_codegen->MoveLocation(locations->Out(),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
type);
RestoreLiveRegisters(codegen, locations);
- // Store the resolved string to the BSS entry.
- if (!baker_or_no_read_barriers) {
- // For non-Baker read barriers we need to re-calculate the address of
- // the string entry.
- const bool isR6 = mips_codegen->GetInstructionSetFeatures().IsR6();
- const bool has_irreducible_loops = codegen->GetGraph()->HasIrreducibleLoops();
- Register base =
- (isR6 || has_irreducible_loops) ? ZERO : locations->InAt(0).AsRegister<Register>();
- CodeGeneratorMIPS::PcRelativePatchInfo* info_high =
- mips_codegen->NewStringBssEntryPatch(load->GetDexFile(), string_index);
- CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
- mips_codegen->NewStringBssEntryPatch(load->GetDexFile(), string_index, info_high);
- mips_codegen->EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base);
- __ Sw(out, TMP, /* placeholder */ 0x5678, &info_low->label);
- }
__ B(GetExitLabel());
}
const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS"; }
private:
- // Pointer to the high half PC-relative patch info.
- const CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high_;
-
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS);
};
@@ -7736,8 +7639,6 @@ void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) {
if (load_kind == HLoadClass::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the type resolution or initialization and marking to save everything we need.
- // Request a temp to hold the BSS entry location for the slow path.
- locations->AddTemp(Location::RequiresRegister());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -7786,7 +7687,6 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
? kWithoutReadBarrier
: kCompilerReadBarrierOption;
bool generate_null_check = false;
- CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high = nullptr;
switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
DCHECK(!cls->CanCallRuntime());
@@ -7845,17 +7745,16 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
break;
}
case HLoadClass::LoadKind::kBssEntry: {
- bss_info_high = codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
+ CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high =
+ codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex(), bss_info_high);
- constexpr bool non_baker_read_barrier = kUseReadBarrier && !kUseBakerReadBarrier;
- Register temp = non_baker_read_barrier ? out : locations->GetTemp(0).AsRegister<Register>();
codegen_->EmitPcRelativeAddressPlaceholderHigh(bss_info_high,
- temp,
+ out,
base_or_current_method_reg);
GenerateGcRootFieldLoad(cls,
out_loc,
- temp,
+ out,
/* placeholder */ 0x5678,
read_barrier_option,
&info_low->label);
@@ -7887,7 +7786,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS(
- cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck(), bss_info_high);
+ cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
__ Beqz(out, slow_path->GetEntryLabel());
@@ -7960,8 +7859,6 @@ void LocationsBuilderMIPS::VisitLoadString(HLoadString* load) {
if (load_kind == HLoadString::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the pResolveString and marking to save everything we need.
- // Request a temp to hold the BSS entry location for the slow path.
- locations->AddTemp(Location::RequiresRegister());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -8041,19 +7938,17 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex());
CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex(), info_high);
- constexpr bool non_baker_read_barrier = kUseReadBarrier && !kUseBakerReadBarrier;
- Register temp = non_baker_read_barrier ? out : locations->GetTemp(0).AsRegister<Register>();
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
- temp,
+ out,
base_or_current_method_reg);
GenerateGcRootFieldLoad(load,
out_loc,
- temp,
+ out,
/* placeholder */ 0x5678,
kCompilerReadBarrierOption,
&info_low->label);
SlowPathCodeMIPS* slow_path =
- new (codegen_->GetScopedAllocator()) LoadStringSlowPathMIPS(load, info_high);
+ new (codegen_->GetScopedAllocator()) LoadStringSlowPathMIPS(load);
codegen_->AddSlowPath(slow_path);
__ Beqz(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 0a6d9159d1..ee33b3f335 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -175,13 +175,11 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
LoadClassSlowPathMIPS64(HLoadClass* cls,
HInstruction* at,
uint32_t dex_pc,
- bool do_clinit,
- const CodeGeneratorMIPS64::PcRelativePatchInfo* bss_info_high = nullptr)
+ bool do_clinit)
: SlowPathCodeMIPS64(at),
cls_(cls),
dex_pc_(dex_pc),
- do_clinit_(do_clinit),
- bss_info_high_(bss_info_high) {
+ do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
@@ -189,28 +187,11 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
- const bool baker_or_no_read_barriers = (!kUseReadBarrier || kUseBakerReadBarrier);
InvokeRuntimeCallingConvention calling_convention;
DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
- const bool is_load_class_bss_entry =
- (cls_ == instruction_) && (cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry/kSaveEverything, make sure we preserve the address of the entry.
- GpuRegister entry_address = kNoGpuRegister;
- if (is_load_class_bss_entry && baker_or_no_read_barriers) {
- GpuRegister temp = locations->GetTemp(0).AsRegister<GpuRegister>();
- bool temp_is_a0 = (temp == calling_convention.GetRegisterAt(0));
- // In the unlucky case that `temp` is A0, we preserve the address in `out` across the
- // kSaveEverything call.
- entry_address = temp_is_a0 ? out.AsRegister<GpuRegister>() : temp;
- DCHECK_NE(entry_address, calling_convention.GetRegisterAt(0));
- if (temp_is_a0) {
- __ Move(entry_address, temp);
- }
- }
-
dex::TypeIndex type_index = cls_->GetTypeIndex();
__ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
@@ -222,19 +203,6 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
}
- // For HLoadClass/kBssEntry, store the resolved class to the BSS entry.
- if (is_load_class_bss_entry && baker_or_no_read_barriers) {
- // The class entry address was preserved in `entry_address` thanks to kSaveEverything.
- DCHECK(bss_info_high_);
- CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
- mips64_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index, bss_info_high_);
- __ Bind(&info_low->label);
- __ StoreToOffset(kStoreWord,
- calling_convention.GetRegisterAt(0),
- entry_address,
- /* placeholder */ 0x5678);
- }
-
// Move the class to the desired location.
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
@@ -245,17 +213,6 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
}
RestoreLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry, store the resolved class to the BSS entry.
- if (is_load_class_bss_entry && !baker_or_no_read_barriers) {
- // For non-Baker read barriers we need to re-calculate the address of
- // the class entry.
- CodeGeneratorMIPS64::PcRelativePatchInfo* info_high =
- mips64_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
- CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
- mips64_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index, info_high);
- mips64_codegen->EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, info_low);
- __ StoreToOffset(kStoreWord, out.AsRegister<GpuRegister>(), TMP, /* placeholder */ 0x5678);
- }
__ Bc(GetExitLabel());
}
@@ -271,46 +228,25 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
// Whether to initialize the class.
const bool do_clinit_;
- // Pointer to the high half PC-relative patch info for HLoadClass/kBssEntry.
- const CodeGeneratorMIPS64::PcRelativePatchInfo* bss_info_high_;
-
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS64);
};
class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
public:
- explicit LoadStringSlowPathMIPS64(HLoadString* instruction,
- const CodeGeneratorMIPS64::PcRelativePatchInfo* bss_info_high)
- : SlowPathCodeMIPS64(instruction), bss_info_high_(bss_info_high) {}
+ explicit LoadStringSlowPathMIPS64(HLoadString* instruction)
+ : SlowPathCodeMIPS64(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
DCHECK(instruction_->IsLoadString());
DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
- HLoadString* load = instruction_->AsLoadString();
- const dex::StringIndex string_index = load->GetStringIndex();
- GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+ const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
- const bool baker_or_no_read_barriers = (!kUseReadBarrier || kUseBakerReadBarrier);
InvokeRuntimeCallingConvention calling_convention;
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
- // For HLoadString/kBssEntry/kSaveEverything, make sure we preserve the address of the entry.
- GpuRegister entry_address = kNoGpuRegister;
- if (baker_or_no_read_barriers) {
- GpuRegister temp = locations->GetTemp(0).AsRegister<GpuRegister>();
- bool temp_is_a0 = (temp == calling_convention.GetRegisterAt(0));
- // In the unlucky case that `temp` is A0, we preserve the address in `out` across the
- // kSaveEverything call.
- entry_address = temp_is_a0 ? out : temp;
- DCHECK_NE(entry_address, calling_convention.GetRegisterAt(0));
- if (temp_is_a0) {
- __ Move(entry_address, temp);
- }
- }
-
__ LoadConst32(calling_convention.GetRegisterAt(0), string_index.index_);
mips64_codegen->InvokeRuntime(kQuickResolveString,
instruction_,
@@ -318,47 +254,18 @@ class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
- // Store the resolved string to the BSS entry.
- if (baker_or_no_read_barriers) {
- // The string entry address was preserved in `entry_address` thanks to kSaveEverything.
- DCHECK(bss_info_high_);
- CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
- mips64_codegen->NewStringBssEntryPatch(load->GetDexFile(),
- string_index,
- bss_info_high_);
- __ Bind(&info_low->label);
- __ StoreToOffset(kStoreWord,
- calling_convention.GetRegisterAt(0),
- entry_address,
- /* placeholder */ 0x5678);
- }
-
DataType::Type type = instruction_->GetType();
mips64_codegen->MoveLocation(locations->Out(),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
type);
RestoreLiveRegisters(codegen, locations);
- // Store the resolved string to the BSS entry.
- if (!baker_or_no_read_barriers) {
- // For non-Baker read barriers we need to re-calculate the address of
- // the string entry.
- CodeGeneratorMIPS64::PcRelativePatchInfo* info_high =
- mips64_codegen->NewStringBssEntryPatch(load->GetDexFile(), string_index);
- CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
- mips64_codegen->NewStringBssEntryPatch(load->GetDexFile(), string_index, info_high);
- mips64_codegen->EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, info_low);
- __ StoreToOffset(kStoreWord, out, TMP, /* placeholder */ 0x5678);
- }
__ Bc(GetExitLabel());
}
const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS64"; }
private:
- // Pointer to the high half PC-relative patch info.
- const CodeGeneratorMIPS64::PcRelativePatchInfo* bss_info_high_;
-
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS64);
};
@@ -5979,8 +5886,6 @@ void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
if (load_kind == HLoadClass::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the type resolution or initialization and marking to save everything we need.
- // Request a temp to hold the BSS entry location for the slow path.
- locations->AddTemp(Location::RequiresRegister());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -6014,7 +5919,6 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
? kWithoutReadBarrier
: kCompilerReadBarrierOption;
bool generate_null_check = false;
- CodeGeneratorMIPS64::PcRelativePatchInfo* bss_info_high = nullptr;
switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass:
DCHECK(!cls->CanCallRuntime());
@@ -6064,17 +5968,14 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
break;
}
case HLoadClass::LoadKind::kBssEntry: {
- bss_info_high = codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
+ CodeGeneratorMIPS64::PcRelativePatchInfo* bss_info_high =
+ codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex(), bss_info_high);
- constexpr bool non_baker_read_barrier = kUseReadBarrier && !kUseBakerReadBarrier;
- GpuRegister temp = non_baker_read_barrier
- ? out
- : locations->GetTemp(0).AsRegister<GpuRegister>();
- codegen_->EmitPcRelativeAddressPlaceholderHigh(bss_info_high, temp);
+ codegen_->EmitPcRelativeAddressPlaceholderHigh(bss_info_high, out);
GenerateGcRootFieldLoad(cls,
out_loc,
- temp,
+ out,
/* placeholder */ 0x5678,
read_barrier_option,
&info_low->label);
@@ -6098,7 +5999,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
SlowPathCodeMIPS64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS64(
- cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck(), bss_info_high);
+ cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
__ Beqzc(out, slow_path->GetEntryLabel());
@@ -6146,8 +6047,6 @@ void LocationsBuilderMIPS64::VisitLoadString(HLoadString* load) {
if (load_kind == HLoadString::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the pResolveString and marking to save everything we need.
- // Request a temp to hold the BSS entry location for the slow path.
- locations->AddTemp(Location::RequiresRegister());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -6203,19 +6102,15 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA
codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex());
CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex(), info_high);
- constexpr bool non_baker_read_barrier = kUseReadBarrier && !kUseBakerReadBarrier;
- GpuRegister temp = non_baker_read_barrier
- ? out
- : locations->GetTemp(0).AsRegister<GpuRegister>();
- codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, temp);
+ codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, out);
GenerateGcRootFieldLoad(load,
out_loc,
- temp,
+ out,
/* placeholder */ 0x5678,
kCompilerReadBarrierOption,
&info_low->label);
SlowPathCodeMIPS64* slow_path =
- new (codegen_->GetScopedAllocator()) LoadStringSlowPathMIPS64(load, info_high);
+ new (codegen_->GetScopedAllocator()) LoadStringSlowPathMIPS64(load);
codegen_->AddSlowPath(slow_path);
__ Beqzc(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index ad0e71aaf4..2e8170ecc4 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -240,13 +240,6 @@ class LoadStringSlowPathX86 : public SlowPathCode {
x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
RestoreLiveRegisters(codegen, locations);
- // Store the resolved String to the BSS entry.
- Register method_address = locations->InAt(0).AsRegister<Register>();
- __ movl(Address(method_address, CodeGeneratorX86::kDummy32BitOffset),
- locations->Out().AsRegister<Register>());
- Label* fixup_label = x86_codegen->NewStringBssEntryPatch(instruction_->AsLoadString());
- __ Bind(fixup_label);
-
__ jmp(GetExitLabel());
}
@@ -293,16 +286,6 @@ class LoadClassSlowPathX86 : public SlowPathCode {
x86_codegen->Move32(out, Location::RegisterLocation(EAX));
}
RestoreLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
- DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
- if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
- DCHECK(out.IsValid());
- Register method_address = locations->InAt(0).AsRegister<Register>();
- __ movl(Address(method_address, CodeGeneratorX86::kDummy32BitOffset),
- locations->Out().AsRegister<Register>());
- Label* fixup_label = x86_codegen->NewTypeBssEntryPatch(cls_);
- __ Bind(fixup_label);
- }
__ jmp(GetExitLabel());
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index d64a49704e..e25688c9a3 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -273,15 +273,6 @@ class LoadClassSlowPathX86_64 : public SlowPathCode {
}
RestoreLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
- DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
- if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
- DCHECK(out.IsValid());
- __ movl(Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false),
- locations->Out().AsRegister<CpuRegister>());
- Label* fixup_label = x86_64_codegen->NewTypeBssEntryPatch(cls_);
- __ Bind(fixup_label);
- }
__ jmp(GetExitLabel());
}
@@ -323,12 +314,6 @@ class LoadStringSlowPathX86_64 : public SlowPathCode {
x86_64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
RestoreLiveRegisters(codegen, locations);
- // Store the resolved String to the BSS entry.
- __ movl(Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false),
- locations->Out().AsRegister<CpuRegister>());
- Label* fixup_label = x86_64_codegen->NewStringBssEntryPatch(instruction_->AsLoadString());
- __ Bind(fixup_label);
-
__ jmp(GetExitLabel());
}
diff --git a/compiler/optimizing/data_type.h b/compiler/optimizing/data_type.h
index 75a7fbe6ca..d253036479 100644
--- a/compiler/optimizing/data_type.h
+++ b/compiler/optimizing/data_type.h
@@ -186,6 +186,7 @@ class DataType {
}
static bool IsTypeConversionImplicit(Type input_type, Type result_type);
+ static bool IsTypeConversionImplicit(int64_t value, Type result_type);
static const char* PrettyDescriptor(Type type);
@@ -213,6 +214,18 @@ inline bool DataType::IsTypeConversionImplicit(Type input_type, Type result_type
MaxValueOfIntegralType(input_type) <= MaxValueOfIntegralType(result_type));
}
+inline bool DataType::IsTypeConversionImplicit(int64_t value, Type result_type) {
+ if (IsIntegralType(result_type) && result_type != Type::kInt64) {
+ // If the constant value falls in the range of the result_type, type
+ // conversion isn't needed.
+ return value >= MinValueOfIntegralType(result_type) &&
+ value <= MaxValueOfIntegralType(result_type);
+ }
+ // Conversion isn't implicit if it's into non-integer types, or 64-bit int
+ // which may have different number of registers.
+ return false;
+}
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_DATA_TYPE_H_
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index ad29ba56ab..d270c6a28e 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -93,6 +93,136 @@ static DataType::Type ImplicitConversion(DataType::Type type) {
}
}
+/**
+ * Returns true if loop is guarded by "a cmp b" on entry.
+ */
+static bool IsGuardedBy(HLoopInformation* loop,
+ IfCondition cmp,
+ HInstruction* a,
+ HInstruction* b) {
+ // Chase back through straightline code to the first potential
+ // block that has a control dependence.
+ // guard: if (x) bypass
+ // |
+ // entry: straightline code
+ // |
+ // preheader
+ // |
+ // header
+ HBasicBlock* guard = loop->GetPreHeader();
+ HBasicBlock* entry = loop->GetHeader();
+ while (guard->GetPredecessors().size() == 1 &&
+ guard->GetSuccessors().size() == 1) {
+ entry = guard;
+ guard = guard->GetSinglePredecessor();
+ }
+ // Find guard.
+ HInstruction* control = guard->GetLastInstruction();
+ if (!control->IsIf()) {
+ return false;
+ }
+ HIf* ifs = control->AsIf();
+ HInstruction* if_expr = ifs->InputAt(0);
+ if (if_expr->IsCondition()) {
+ IfCondition other_cmp = ifs->IfTrueSuccessor() == entry
+ ? if_expr->AsCondition()->GetCondition()
+ : if_expr->AsCondition()->GetOppositeCondition();
+ if (if_expr->InputAt(0) == a && if_expr->InputAt(1) == b) {
+ return cmp == other_cmp;
+ } else if (if_expr->InputAt(1) == a && if_expr->InputAt(0) == b) {
+ switch (cmp) {
+ case kCondLT: return other_cmp == kCondGT;
+ case kCondLE: return other_cmp == kCondGE;
+ case kCondGT: return other_cmp == kCondLT;
+ case kCondGE: return other_cmp == kCondLE;
+ default: LOG(FATAL) << "unexpected cmp: " << cmp;
+ }
+ }
+ }
+ return false;
+}
+
+/* Finds first loop header phi use. */
+HInstruction* FindFirstLoopHeaderPhiUse(HLoopInformation* loop, HInstruction* instruction) {
+ for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
+ if (use.GetUser()->GetBlock() == loop->GetHeader() &&
+ use.GetUser()->IsPhi() &&
+ use.GetUser()->InputAt(1) == instruction) {
+ return use.GetUser();
+ }
+ }
+ return nullptr;
+}
+
+/**
+ * Relinks the Phi structure after break-loop rewriting.
+ */
+bool FixOutsideUse(HLoopInformation* loop,
+ HInstruction* instruction,
+ HInstruction* replacement,
+ bool rewrite) {
+ // Deal with regular uses.
+ const HUseList<HInstruction*>& uses = instruction->GetUses();
+ for (auto it = uses.begin(), end = uses.end(); it != end; ) {
+ HInstruction* user = it->GetUser();
+ size_t index = it->GetIndex();
+ ++it; // increment prior to potential removal
+ if (user->GetBlock()->GetLoopInformation() != loop) {
+ if (replacement == nullptr) {
+ return false;
+ } else if (rewrite) {
+ user->ReplaceInput(replacement, index);
+ }
+ }
+ }
+ // Deal with environment uses.
+ const HUseList<HEnvironment*>& env_uses = instruction->GetEnvUses();
+ for (auto it = env_uses.begin(), end = env_uses.end(); it != end;) {
+ HEnvironment* user = it->GetUser();
+ size_t index = it->GetIndex();
+ ++it; // increment prior to potential removal
+ if (user->GetHolder()->GetBlock()->GetLoopInformation() != loop) {
+ if (replacement == nullptr) {
+ return false;
+ } else if (rewrite) {
+ user->RemoveAsUserOfInput(index);
+ user->SetRawEnvAt(index, replacement);
+ replacement->AddEnvUseAt(user, index);
+ }
+ }
+ }
+ return true;
+}
+
+/**
+ * Test and rewrite the loop body of a break-loop. Returns true on success.
+ */
+bool RewriteBreakLoopBody(HLoopInformation* loop,
+ HBasicBlock* body,
+ HInstruction* cond,
+ HInstruction* index,
+ HInstruction* upper,
+ bool rewrite) {
+ // Deal with Phis. Outside use prohibited, except for index (which gets exit value).
+ for (HInstructionIterator it(loop->GetHeader()->GetPhis()); !it.Done(); it.Advance()) {
+ HInstruction* exit_value = it.Current() == index ? upper : nullptr;
+ if (!FixOutsideUse(loop, it.Current(), exit_value, rewrite)) {
+ return false;
+ }
+ }
+ // Deal with other statements in header.
+ for (HInstruction* m = cond->GetPrevious(), *p = nullptr; m && !m->IsSuspendCheck(); m = p) {
+ p = m->GetPrevious();
+ if (rewrite) {
+ m->MoveBefore(body->GetFirstInstruction(), false);
+ }
+ if (!FixOutsideUse(loop, m, FindFirstLoopHeaderPhiUse(loop, m), rewrite)) {
+ return false;
+ }
+ }
+ return true;
+}
+
//
// Class methods.
//
@@ -754,6 +884,10 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::SolveConversion(
return nullptr;
}
+//
+// Loop trip count analysis methods.
+//
+
void HInductionVarAnalysis::VisitControl(HLoopInformation* loop) {
HInstruction* control = loop->GetHeader()->GetLastInstruction();
if (control->IsIf()) {
@@ -774,15 +908,16 @@ void HInductionVarAnalysis::VisitControl(HLoopInformation* loop) {
if (a == nullptr || b == nullptr) {
return; // Loop control is not a sequence.
} else if (if_true->GetLoopInformation() != loop && if_false->GetLoopInformation() == loop) {
- VisitCondition(loop, a, b, type, condition->GetOppositeCondition());
+ VisitCondition(loop, if_false, a, b, type, condition->GetOppositeCondition());
} else if (if_true->GetLoopInformation() == loop && if_false->GetLoopInformation() != loop) {
- VisitCondition(loop, a, b, type, condition->GetCondition());
+ VisitCondition(loop, if_true, a, b, type, condition->GetCondition());
}
}
}
}
void HInductionVarAnalysis::VisitCondition(HLoopInformation* loop,
+ HBasicBlock* body,
InductionInfo* a,
InductionInfo* b,
DataType::Type type,
@@ -790,11 +925,11 @@ void HInductionVarAnalysis::VisitCondition(HLoopInformation* loop,
if (a->induction_class == kInvariant && b->induction_class == kLinear) {
// Swap condition if induction is at right-hand-side (e.g. U > i is same as i < U).
switch (cmp) {
- case kCondLT: VisitCondition(loop, b, a, type, kCondGT); break;
- case kCondLE: VisitCondition(loop, b, a, type, kCondGE); break;
- case kCondGT: VisitCondition(loop, b, a, type, kCondLT); break;
- case kCondGE: VisitCondition(loop, b, a, type, kCondLE); break;
- case kCondNE: VisitCondition(loop, b, a, type, kCondNE); break;
+ case kCondLT: VisitCondition(loop, body, b, a, type, kCondGT); break;
+ case kCondLE: VisitCondition(loop, body, b, a, type, kCondGE); break;
+ case kCondGT: VisitCondition(loop, body, b, a, type, kCondLT); break;
+ case kCondGE: VisitCondition(loop, body, b, a, type, kCondLE); break;
+ case kCondNE: VisitCondition(loop, body, b, a, type, kCondNE); break;
default: break;
}
} else if (a->induction_class == kLinear && b->induction_class == kInvariant) {
@@ -802,24 +937,30 @@ void HInductionVarAnalysis::VisitCondition(HLoopInformation* loop,
InductionInfo* lower_expr = a->op_b;
InductionInfo* upper_expr = b;
InductionInfo* stride_expr = a->op_a;
- // Constant stride?
+ // Test for constant stride and integral condition.
int64_t stride_value = 0;
if (!IsExact(stride_expr, &stride_value)) {
- return;
+ return; // unknown stride
+ } else if (type != DataType::Type::kInt32 && type != DataType::Type::kInt64) {
+ return; // not integral
}
- // Rewrite condition i != U into strict end condition i < U or i > U if this end condition
- // is reached exactly (tested by verifying if the loop has a unit stride and the non-strict
- // condition would be always taken).
+ // Since loops with a i != U condition will not be normalized by the method below, first
+ // try to rewrite a break-loop with terminating condition i != U into an equivalent loop
+ // with non-strict end condition i <= U or i >= U if such a rewriting is possible and safe.
+ if (cmp == kCondNE && RewriteBreakLoop(loop, body, stride_value, type)) {
+ cmp = stride_value > 0 ? kCondLE : kCondGE;
+ }
+ // If this rewriting failed, try to rewrite condition i != U into strict end condition i < U
+ // or i > U if this end condition is reached exactly (tested by verifying if the loop has a
+ // unit stride and the non-strict condition would be always taken).
if (cmp == kCondNE && ((stride_value == +1 && IsTaken(lower_expr, upper_expr, kCondLE)) ||
(stride_value == -1 && IsTaken(lower_expr, upper_expr, kCondGE)))) {
cmp = stride_value > 0 ? kCondLT : kCondGT;
}
- // Only accept integral condition. A mismatch between the type of condition and the induction
- // is only allowed if the, necessarily narrower, induction range fits the narrower control.
- if (type != DataType::Type::kInt32 && type != DataType::Type::kInt64) {
- return; // not integral
- } else if (type != a->type &&
- !FitsNarrowerControl(lower_expr, upper_expr, stride_value, a->type, cmp)) {
+ // A mismatch between the type of condition and the induction is only allowed if the,
+ // necessarily narrower, induction range fits the narrower control.
+ if (type != a->type &&
+ !FitsNarrowerControl(lower_expr, upper_expr, stride_value, a->type, cmp)) {
return; // mismatched type
}
// Normalize a linear loop control with a nonzero stride:
@@ -984,6 +1125,69 @@ bool HInductionVarAnalysis::FitsNarrowerControl(InductionInfo* lower_expr,
IsAtMost(upper_expr, &value) && value <= max;
}
+bool HInductionVarAnalysis::RewriteBreakLoop(HLoopInformation* loop,
+ HBasicBlock* body,
+ int64_t stride_value,
+ DataType::Type type) {
+ // Only accept unit stride.
+ if (std::abs(stride_value) != 1) {
+ return false;
+ }
+ // Simple terminating i != U condition, used nowhere else.
+ HIf* ifs = loop->GetHeader()->GetLastInstruction()->AsIf();
+ HInstruction* cond = ifs->InputAt(0);
+ if (ifs->GetPrevious() != cond || !cond->HasOnlyOneNonEnvironmentUse()) {
+ return false;
+ }
+ int c = LookupInfo(loop, cond->InputAt(0))->induction_class == kLinear ? 0 : 1;
+ HInstruction* index = cond->InputAt(c);
+ HInstruction* upper = cond->InputAt(1 - c);
+ // Safe to rewrite into i <= U?
+ IfCondition cmp = stride_value > 0 ? kCondLE : kCondGE;
+ if (!index->IsPhi() || !IsFinite(LookupInfo(loop, upper), stride_value, type, cmp)) {
+ return false;
+ }
+ // Body consists of update to index i only, used nowhere else.
+ if (body->GetSuccessors().size() != 1 ||
+ body->GetSingleSuccessor() != loop->GetHeader() ||
+ !body->GetPhis().IsEmpty() ||
+ body->GetInstructions().IsEmpty() ||
+ body->GetFirstInstruction() != index->InputAt(1) ||
+ !body->GetFirstInstruction()->HasOnlyOneNonEnvironmentUse() ||
+ !body->GetFirstInstruction()->GetNext()->IsGoto()) {
+ return false;
+ }
+ // Always taken or guarded by enclosing condition.
+ if (!IsTaken(LookupInfo(loop, index)->op_b, LookupInfo(loop, upper), cmp) &&
+ !IsGuardedBy(loop, cmp, index->InputAt(0), upper)) {
+ return false;
+ }
+ // Test if break-loop body can be written, and do so on success.
+ if (RewriteBreakLoopBody(loop, body, cond, index, upper, /*rewrite*/ false)) {
+ RewriteBreakLoopBody(loop, body, cond, index, upper, /*rewrite*/ true);
+ } else {
+ return false;
+ }
+ // Rewrite condition in HIR.
+ if (ifs->IfTrueSuccessor() != body) {
+ cmp = (cmp == kCondLE) ? kCondGT : kCondLT;
+ }
+ HInstruction* rep = nullptr;
+ switch (cmp) {
+ case kCondLT: rep = new (graph_->GetAllocator()) HLessThan(index, upper); break;
+ case kCondGT: rep = new (graph_->GetAllocator()) HGreaterThan(index, upper); break;
+ case kCondLE: rep = new (graph_->GetAllocator()) HLessThanOrEqual(index, upper); break;
+ case kCondGE: rep = new (graph_->GetAllocator()) HGreaterThanOrEqual(index, upper); break;
+ default: LOG(FATAL) << cmp; UNREACHABLE();
+ }
+ loop->GetHeader()->ReplaceAndRemoveInstructionWith(cond, rep);
+ return true;
+}
+
+//
+// Helper methods.
+//
+
void HInductionVarAnalysis::AssignInfo(HLoopInformation* loop,
HInstruction* instruction,
InductionInfo* info) {
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index 8737b890d9..acad77d35f 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -195,9 +195,14 @@ class HInductionVarAnalysis : public HOptimization {
HInstruction* entry_phi,
HTypeConversion* conversion);
+ //
+ // Loop trip count analysis methods.
+ //
+
// Trip count information.
void VisitControl(HLoopInformation* loop);
void VisitCondition(HLoopInformation* loop,
+ HBasicBlock* body,
InductionInfo* a,
InductionInfo* b,
DataType::Type type,
@@ -219,6 +224,14 @@ class HInductionVarAnalysis : public HOptimization {
int64_t stride_value,
DataType::Type type,
IfCondition cmp);
+ bool RewriteBreakLoop(HLoopInformation* loop,
+ HBasicBlock* body,
+ int64_t stride_value,
+ DataType::Type type);
+
+ //
+ // Helper methods.
+ //
// Assign and lookup.
void AssignInfo(HLoopInformation* loop, HInstruction* instruction, InductionInfo* info);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 3f4a3d8b8e..2444e43d64 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -441,9 +441,9 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) {
// Add dependency due to devirtulization. We've assumed resolved_method
// has single implementation.
outermost_graph_->AddCHASingleImplementationDependency(resolved_method);
- MaybeRecordStat(stats_, kCHAInline);
+ MaybeRecordStat(stats_, MethodCompilationStat::kCHAInline);
} else {
- MaybeRecordStat(stats_, kInlinedInvokeVirtualOrInterface);
+ MaybeRecordStat(stats_, MethodCompilationStat::kInlinedInvokeVirtualOrInterface);
}
}
return result;
@@ -533,7 +533,7 @@ bool HInliner::TryInlineFromInlineCache(const DexFile& caller_dex_file,
}
case kInlineCacheMonomorphic: {
- MaybeRecordStat(stats_, kMonomorphicCall);
+ MaybeRecordStat(stats_, MethodCompilationStat::kMonomorphicCall);
if (UseOnlyPolymorphicInliningWithNoDeopt()) {
return TryInlinePolymorphicCall(invoke_instruction, resolved_method, inline_cache);
} else {
@@ -542,7 +542,7 @@ bool HInliner::TryInlineFromInlineCache(const DexFile& caller_dex_file,
}
case kInlineCachePolymorphic: {
- MaybeRecordStat(stats_, kPolymorphicCall);
+ MaybeRecordStat(stats_, MethodCompilationStat::kPolymorphicCall);
return TryInlinePolymorphicCall(invoke_instruction, resolved_method, inline_cache);
}
@@ -551,7 +551,7 @@ bool HInliner::TryInlineFromInlineCache(const DexFile& caller_dex_file,
<< "Interface or virtual call to "
<< caller_dex_file.PrettyMethod(invoke_instruction->GetDexMethodIndex())
<< " is megamorphic and not inlined";
- MaybeRecordStat(stats_, kMegamorphicCall);
+ MaybeRecordStat(stats_, MethodCompilationStat::kMegamorphicCall);
return false;
}
@@ -755,7 +755,7 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
dex::TypeIndex class_index = FindClassIndexIn(
GetMonomorphicType(classes), caller_compilation_unit_);
if (!class_index.IsValid()) {
- LOG_FAIL(stats_, kNotInlinedDexCache)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedDexCache)
<< "Call to " << ArtMethod::PrettyMethod(resolved_method)
<< " from inline cache is not inlined because its class is not"
<< " accessible to the caller";
@@ -804,7 +804,7 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
/* is_first_run */ false);
rtp_fixup.Run();
- MaybeRecordStat(stats_, kInlinedMonomorphicCall);
+ MaybeRecordStat(stats_, MethodCompilationStat::kInlinedMonomorphicCall);
return true;
}
@@ -994,7 +994,7 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
return false;
}
- MaybeRecordStat(stats_, kInlinedPolymorphicCall);
+ MaybeRecordStat(stats_, MethodCompilationStat::kInlinedPolymorphicCall);
// Run type propagation to get the guards typed.
ReferenceTypePropagation rtp_fixup(graph_,
@@ -1200,7 +1200,7 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(
/* is_first_run */ false);
rtp_fixup.Run();
- MaybeRecordStat(stats_, kInlinedPolymorphicCall);
+ MaybeRecordStat(stats_, MethodCompilationStat::kInlinedPolymorphicCall);
LOG_SUCCESS() << "Inlined same polymorphic target " << actual_method->PrettyMethod();
return true;
@@ -1258,6 +1258,13 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction,
new_invoke->SetReferenceTypeInfo(invoke_instruction->GetReferenceTypeInfo());
}
return_replacement = new_invoke;
+ // Directly check if the new virtual can be recognized as an intrinsic.
+ // This way, we avoid running a full recognition pass just to detect
+ // these relative rare cases.
+ bool wrong_invoke_type = false;
+ if (IntrinsicsRecognizer::Recognize(new_invoke, &wrong_invoke_type)) {
+ MaybeRecordStat(stats_, MethodCompilationStat::kIntrinsicRecognized);
+ }
} else {
// TODO: Consider sharpening an invoke virtual once it is not dependent on the
// compiler driver.
@@ -1301,14 +1308,14 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
ReferenceTypeInfo receiver_type,
HInstruction** return_replacement) {
if (method->IsProxyMethod()) {
- LOG_FAIL(stats_, kNotInlinedProxy)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedProxy)
<< "Method " << method->PrettyMethod()
<< " is not inlined because of unimplemented inline support for proxy methods.";
return false;
}
if (CountRecursiveCallsOf(method) > kMaximumNumberOfRecursiveCalls) {
- LOG_FAIL(stats_, kNotInlinedRecursiveBudget)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedRecursiveBudget)
<< "Method "
<< method->PrettyMethod()
<< " is not inlined because it has reached its recursive call budget.";
@@ -1322,10 +1329,10 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
if (TryPatternSubstitution(invoke_instruction, method, return_replacement)) {
LOG_SUCCESS() << "Successfully replaced pattern of invoke "
<< method->PrettyMethod();
- MaybeRecordStat(stats_, kReplacedInvokeWithSimplePattern);
+ MaybeRecordStat(stats_, MethodCompilationStat::kReplacedInvokeWithSimplePattern);
return true;
}
- LOG_FAIL(stats_, kNotInlinedWont)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedWont)
<< "Won't inline " << method->PrettyMethod() << " in "
<< outer_compilation_unit_.GetDexFile()->GetLocation() << " ("
<< caller_compilation_unit_.GetDexFile()->GetLocation() << ") from "
@@ -1345,7 +1352,7 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
size_t inline_max_code_units = compiler_driver_->GetCompilerOptions().GetInlineMaxCodeUnits();
if (code_item->insns_size_in_code_units_ > inline_max_code_units) {
- LOG_FAIL(stats_, kNotInlinedCodeItem)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedCodeItem)
<< "Method " << method->PrettyMethod()
<< " is not inlined because its code item is too big: "
<< code_item->insns_size_in_code_units_
@@ -1355,13 +1362,13 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
}
if (code_item->tries_size_ != 0) {
- LOG_FAIL(stats_, kNotInlinedTryCatch)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedTryCatch)
<< "Method " << method->PrettyMethod() << " is not inlined because of try block";
return false;
}
if (!method->IsCompilable()) {
- LOG_FAIL(stats_, kNotInlinedNotVerified)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedNotVerified)
<< "Method " << method->PrettyMethod()
<< " has soft failures un-handled by the compiler, so it cannot be inlined";
}
@@ -1371,7 +1378,7 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
if (Runtime::Current()->UseJitCompilation() ||
!compiler_driver_->IsMethodVerifiedWithoutFailures(
method->GetDexMethodIndex(), class_def_idx, *method->GetDexFile())) {
- LOG_FAIL(stats_, kNotInlinedNotVerified)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedNotVerified)
<< "Method " << method->PrettyMethod()
<< " couldn't be verified, so it cannot be inlined";
return false;
@@ -1382,9 +1389,10 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
invoke_instruction->AsInvokeStaticOrDirect()->IsStaticWithImplicitClinitCheck()) {
// Case of a static method that cannot be inlined because it implicitly
// requires an initialization check of its declaring class.
- LOG_FAIL(stats_, kNotInlinedDexCache) << "Method " << method->PrettyMethod()
- << " is not inlined because it is static and requires a clinit"
- << " check that cannot be emitted due to Dex cache limitations";
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedDexCache)
+ << "Method " << method->PrettyMethod()
+ << " is not inlined because it is static and requires a clinit"
+ << " check that cannot be emitted due to Dex cache limitations";
return false;
}
@@ -1394,7 +1402,7 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
}
LOG_SUCCESS() << method->PrettyMethod();
- MaybeRecordStat(stats_, kInlinedInvoke);
+ MaybeRecordStat(stats_, MethodCompilationStat::kInlinedInvoke);
return true;
}
@@ -1677,7 +1685,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
handles_);
if (builder.BuildGraph() != kAnalysisSuccess) {
- LOG_FAIL(stats_, kNotInlinedCannotBuild)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedCannotBuild)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
<< " could not be built, so cannot be inlined";
return false;
@@ -1685,7 +1693,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
if (!RegisterAllocator::CanAllocateRegistersFor(*callee_graph,
compiler_driver_->GetInstructionSet())) {
- LOG_FAIL(stats_, kNotInlinedRegisterAllocator)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedRegisterAllocator)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
<< " cannot be inlined because of the register allocator";
return false;
@@ -1738,7 +1746,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
HBasicBlock* exit_block = callee_graph->GetExitBlock();
if (exit_block == nullptr) {
- LOG_FAIL(stats_, kNotInlinedInfiniteLoop)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedInfiniteLoop)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
<< " could not be inlined because it has an infinite loop";
return false;
@@ -1749,14 +1757,14 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
if (predecessor->GetLastInstruction()->IsThrow()) {
if (invoke_instruction->GetBlock()->IsTryBlock()) {
// TODO(ngeoffray): Support adding HTryBoundary in Hgraph::InlineInto.
- LOG_FAIL(stats_, kNotInlinedTryCatch)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedTryCatch)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
<< " could not be inlined because one branch always throws and"
<< " caller is in a try/catch block";
return false;
} else if (graph_->GetExitBlock() == nullptr) {
// TODO(ngeoffray): Support adding HExit in the caller graph.
- LOG_FAIL(stats_, kNotInlinedInfiniteLoop)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedInfiniteLoop)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
<< " could not be inlined because one branch always throws and"
<< " caller does not have an exit block";
@@ -1775,7 +1783,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
}
if (!has_one_return) {
- LOG_FAIL(stats_, kNotInlinedAlwaysThrows)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedAlwaysThrows)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
<< " could not be inlined because it always throws";
return false;
@@ -1788,7 +1796,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
if (block->GetLoopInformation()->IsIrreducible()) {
// Don't inline methods with irreducible loops, they could prevent some
// optimizations to run.
- LOG_FAIL(stats_, kNotInlinedIrreducibleLoop)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedIrreducibleLoop)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
<< " could not be inlined because it contains an irreducible loop";
return false;
@@ -1797,7 +1805,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
// Don't inline methods with loops without exit, since they cause the
// loop information to be computed incorrectly when updating after
// inlining.
- LOG_FAIL(stats_, kNotInlinedLoopWithoutExit)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedLoopWithoutExit)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
<< " could not be inlined because it contains a loop with no exit";
return false;
@@ -1808,7 +1816,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
!instr_it.Done();
instr_it.Advance()) {
if (++number_of_instructions >= inlining_budget_) {
- LOG_FAIL(stats_, kNotInlinedInstructionBudget)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedInstructionBudget)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
<< " is not inlined because the outer method has reached"
<< " its instruction budget limit.";
@@ -1817,7 +1825,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
HInstruction* current = instr_it.Current();
if (current->NeedsEnvironment() &&
(total_number_of_dex_registers_ >= kMaximumNumberOfCumulatedDexRegisters)) {
- LOG_FAIL(stats_, kNotInlinedEnvironmentBudget)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedEnvironmentBudget)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
<< " is not inlined because its caller has reached"
<< " its environment budget limit.";
@@ -1827,7 +1835,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
if (current->NeedsEnvironment() &&
!CanEncodeInlinedMethodInStackMap(*caller_compilation_unit_.GetDexFile(),
resolved_method)) {
- LOG_FAIL(stats_, kNotInlinedStackMaps)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedStackMaps)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
<< " could not be inlined because " << current->DebugName()
<< " needs an environment, is in a different dex file"
@@ -1836,7 +1844,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
}
if (!same_dex_file && current->NeedsDexCacheOfDeclaringClass()) {
- LOG_FAIL(stats_, kNotInlinedDexCache)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedDexCache)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
<< " could not be inlined because " << current->DebugName()
<< " it is in a different dex file and requires access to the dex cache";
@@ -1848,7 +1856,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
current->IsUnresolvedStaticFieldSet() ||
current->IsUnresolvedInstanceFieldSet()) {
// Entrypoint for unresolved fields does not handle inlined frames.
- LOG_FAIL(stats_, kNotInlinedUnresolvedEntrypoint)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedUnresolvedEntrypoint)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
<< " could not be inlined because it is using an unresolved"
<< " entrypoint";
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 61840cc20f..978d0c2225 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -29,6 +29,7 @@
#include "driver/compiler_options.h"
#include "imtable-inl.h"
#include "mirror/dex_cache.h"
+#include "oat_file.h"
#include "optimizing_compiler_stats.h"
#include "quicken_info.h"
#include "scoped_thread_state_change-inl.h"
@@ -447,7 +448,8 @@ ArenaBitVector* HInstructionBuilder::FindNativeDebugInfoLocations() {
/* expandable */ false,
kArenaAllocGraphBuilder);
locations->ClearAllBits();
- dex_file_->DecodeDebugPositionInfo(code_item_, Callback::Position, locations);
+ uint32_t debug_info_offset = OatFile::GetDebugInfoOffset(*dex_file_, code_item_);
+ dex_file_->DecodeDebugPositionInfo(code_item_, debug_info_offset, Callback::Position, locations);
// Instruction-specific tweaks.
IterationRange<DexInstructionIterator> instructions = code_item_->Instructions();
for (const DexInstructionPcPair& inst : instructions) {
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 4c18e16c48..bd20d28992 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -48,7 +48,7 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor {
void RecordSimplification() {
simplification_occurred_ = true;
simplifications_at_current_position_++;
- MaybeRecordStat(stats_, kInstructionSimplifications);
+ MaybeRecordStat(stats_, MethodCompilationStat::kInstructionSimplifications);
}
bool ReplaceRotateWithRor(HBinaryOperation* op, HUShr* ushr, HShl* shl);
@@ -663,7 +663,7 @@ void InstructionSimplifierVisitor::VisitInstanceOf(HInstanceOf* instruction) {
HGraph* graph = GetGraph();
if (object->IsNullConstant()) {
- MaybeRecordStat(stats_, kRemovedInstanceOf);
+ MaybeRecordStat(stats_, MethodCompilationStat::kRemovedInstanceOf);
instruction->ReplaceWith(graph->GetIntConstant(0));
instruction->GetBlock()->RemoveInstruction(instruction);
RecordSimplification();
@@ -674,7 +674,7 @@ void InstructionSimplifierVisitor::VisitInstanceOf(HInstanceOf* instruction) {
// the return value check with the `outcome` check, b/27651442 .
bool outcome = false;
if (TypeCheckHasKnownOutcome(load_class, object, &outcome)) {
- MaybeRecordStat(stats_, kRemovedInstanceOf);
+ MaybeRecordStat(stats_, MethodCompilationStat::kRemovedInstanceOf);
if (outcome && can_be_null) {
// Type test will succeed, we just need a null test.
HNotEqual* test = new (graph->GetAllocator()) HNotEqual(graph->GetNullConstant(), object);
@@ -1168,6 +1168,16 @@ void InstructionSimplifierVisitor::VisitTypeConversion(HTypeConversion* instruct
RecordSimplification();
return;
}
+ } else if (input->IsIntConstant()) {
+ // Try to eliminate type conversion on int constant whose value falls into
+ // the range of the result type.
+ int32_t value = input->AsIntConstant()->GetValue();
+ if (DataType::IsTypeConversionImplicit(value, result_type)) {
+ instruction->ReplaceWith(input);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ RecordSimplification();
+ return;
+ }
}
}
diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc
index d41e49a0f3..92081e30b1 100644
--- a/compiler/optimizing/instruction_simplifier_arm.cc
+++ b/compiler/optimizing/instruction_simplifier_arm.cc
@@ -37,9 +37,7 @@ class InstructionSimplifierArmVisitor : public HGraphVisitor {
private:
void RecordSimplification() {
- if (stats_ != nullptr) {
- stats_->RecordStat(kInstructionSimplificationsArch);
- }
+ MaybeRecordStat(stats_, MethodCompilationStat::kInstructionSimplificationsArch);
}
bool TryMergeIntoUsersShifterOperand(HInstruction* instruction);
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index 69e1463ac4..1c44e5ac49 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -37,9 +37,7 @@ class InstructionSimplifierArm64Visitor : public HGraphVisitor {
private:
void RecordSimplification() {
- if (stats_ != nullptr) {
- stats_->RecordStat(kInstructionSimplificationsArch);
- }
+ MaybeRecordStat(stats_, MethodCompilationStat::kInstructionSimplificationsArch);
}
bool TryMergeIntoUsersShifterOperand(HInstruction* instruction);
diff --git a/compiler/optimizing/instruction_simplifier_mips.cc b/compiler/optimizing/instruction_simplifier_mips.cc
index 6a0d8a60c4..fa97401a0c 100644
--- a/compiler/optimizing/instruction_simplifier_mips.cc
+++ b/compiler/optimizing/instruction_simplifier_mips.cc
@@ -33,9 +33,7 @@ class InstructionSimplifierMipsVisitor : public HGraphVisitor {
private:
void RecordSimplification() {
- if (stats_ != nullptr) {
- stats_->RecordStat(kInstructionSimplificationsArch);
- }
+ MaybeRecordStat(stats_, MethodCompilationStat::kInstructionSimplificationsArch);
}
bool TryExtractArrayAccessIndex(HInstruction* access,
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index dfae534555..77199242f5 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -104,7 +104,8 @@ static inline IntrinsicExceptions GetExceptions(Intrinsics i) {
return kCanThrow;
}
-static bool CheckInvokeType(Intrinsics intrinsic, HInvoke* invoke) {
+static bool CheckInvokeType(Intrinsics intrinsic, HInvoke* invoke)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Whenever the intrinsic is marked as static, report an error if we find an InvokeVirtual.
//
// Whenever the intrinsic is marked as direct and we find an InvokeVirtual, a devirtualization
@@ -130,7 +131,6 @@ static bool CheckInvokeType(Intrinsics intrinsic, HInvoke* invoke) {
}
if (invoke_type == kVirtual) {
ArtMethod* art_method = invoke->GetResolvedMethod();
- ScopedObjectAccess soa(Thread::Current());
return (art_method->IsFinal() || art_method->GetDeclaringClass()->IsFinal());
}
return false;
@@ -139,9 +139,39 @@ static bool CheckInvokeType(Intrinsics intrinsic, HInvoke* invoke) {
// Call might be devirtualized.
return (invoke_type == kVirtual || invoke_type == kDirect);
- default:
+ case kSuper:
+ case kInterface:
+ case kPolymorphic:
return false;
}
+ LOG(FATAL) << "Unknown intrinsic invoke type: " << intrinsic_type;
+ UNREACHABLE();
+}
+
+bool IntrinsicsRecognizer::Recognize(HInvoke* invoke, /*out*/ bool* wrong_invoke_type) {
+ ArtMethod* art_method = invoke->GetResolvedMethod();
+ *wrong_invoke_type = false;
+ if (art_method == nullptr || !art_method->IsIntrinsic()) {
+ return false;
+ }
+
+ // TODO: b/65872996 The intent is that polymorphic signature methods should
+ // be compiler intrinsics. At present, they are only interpreter intrinsics.
+ if (art_method->IsPolymorphicSignature()) {
+ return false;
+ }
+
+ Intrinsics intrinsic = static_cast<Intrinsics>(art_method->GetIntrinsic());
+ if (CheckInvokeType(intrinsic, invoke) == false) {
+ *wrong_invoke_type = true;
+ return false;
+ }
+
+ invoke->SetIntrinsic(intrinsic,
+ NeedsEnvironmentOrCache(intrinsic),
+ GetSideEffects(intrinsic),
+ GetExceptions(intrinsic));
+ return true;
}
void IntrinsicsRecognizer::Run() {
@@ -151,23 +181,14 @@ void IntrinsicsRecognizer::Run() {
inst_it.Advance()) {
HInstruction* inst = inst_it.Current();
if (inst->IsInvoke()) {
- HInvoke* invoke = inst->AsInvoke();
- ArtMethod* art_method = invoke->GetResolvedMethod();
- if (art_method != nullptr && art_method->IsIntrinsic()) {
- Intrinsics intrinsic = static_cast<Intrinsics>(art_method->GetIntrinsic());
- if (!CheckInvokeType(intrinsic, invoke)) {
- LOG(WARNING) << "Found an intrinsic with unexpected invoke type: "
- << static_cast<uint32_t>(intrinsic) << " for "
- << art_method->PrettyMethod()
- << invoke->DebugName();
- } else {
- invoke->SetIntrinsic(intrinsic,
- NeedsEnvironmentOrCache(intrinsic),
- GetSideEffects(intrinsic),
- GetExceptions(intrinsic));
- MaybeRecordStat(stats_,
- MethodCompilationStat::kIntrinsicRecognized);
- }
+ bool wrong_invoke_type = false;
+ if (Recognize(inst->AsInvoke(), &wrong_invoke_type)) {
+ MaybeRecordStat(stats_, MethodCompilationStat::kIntrinsicRecognized);
+ } else if (wrong_invoke_type) {
+ LOG(WARNING)
+ << "Found an intrinsic with unexpected invoke type: "
+ << inst->AsInvoke()->GetResolvedMethod()->PrettyMethod() << " "
+ << inst->DebugName();
}
}
}
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 818d7f63a3..c07a99032a 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -44,6 +44,12 @@ class IntrinsicsRecognizer : public HOptimization {
void Run() OVERRIDE;
+ // Static helper that recognizes intrinsic call. Returns true on success.
+ // If it fails due to invoke type mismatch, wrong_invoke_type is set.
+ // Useful to recognize intrinsics on individual calls outside this full pass.
+ static bool Recognize(HInvoke* invoke, /*out*/ bool* wrong_invoke_type)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
static constexpr const char* kIntrinsicsRecognizerPassName = "intrinsics_recognition";
private:
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 53f9ec413b..a281c4a310 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -112,7 +112,7 @@ class PassObserver : public ValueObject {
Mutex& dump_mutex)
: graph_(graph),
cached_method_name_(),
- timing_logger_enabled_(compiler_driver->GetDumpPasses()),
+ timing_logger_enabled_(compiler_driver->GetCompilerOptions().GetDumpTimings()),
timing_logger_(timing_logger_enabled_ ? GetMethodName() : "", true, true),
disasm_info_(graph->GetAllocator()),
visualizer_oss_(),
@@ -407,7 +407,7 @@ void OptimizingCompiler::Init() {
driver->GetCompilerOptions().GetDumpCfgAppend() ? std::ofstream::app : std::ofstream::out;
visualizer_output_.reset(new std::ofstream(cfg_file_name, cfg_file_mode));
}
- if (driver->GetDumpStats()) {
+ if (driver->GetCompilerOptions().GetDumpStats()) {
compilation_stats_.reset(new OptimizingCompilerStats());
}
}
@@ -738,7 +738,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
ArtMethod* method,
bool osr,
VariableSizedHandleScope* handles) const {
- MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptCompilation);
+ MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptBytecodeCompilation);
CompilerDriver* compiler_driver = GetCompilerDriver();
InstructionSet instruction_set = compiler_driver->GetInstructionSet();
const DexFile& dex_file = *dex_compilation_unit.GetDexFile();
@@ -757,8 +757,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
}
if (Compiler::IsPathologicalCase(*code_item, method_idx, dex_file)) {
- MaybeRecordStat(compilation_stats_.get(),
- MethodCompilationStat::kNotCompiledPathological);
+ MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kNotCompiledPathological);
return nullptr;
}
@@ -768,8 +767,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
const CompilerOptions& compiler_options = compiler_driver->GetCompilerOptions();
if ((compiler_options.GetCompilerFilter() == CompilerFilter::kSpace)
&& (code_item->insns_size_in_code_units_ > kSpaceFilterOptimizingThreshold)) {
- MaybeRecordStat(compilation_stats_.get(),
- MethodCompilationStat::kNotCompiledSpaceFilter);
+ MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kNotCompiledSpaceFilter);
return nullptr;
}
@@ -800,8 +798,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
compiler_driver->GetCompilerOptions(),
compilation_stats_.get()));
if (codegen.get() == nullptr) {
- MaybeRecordStat(compilation_stats_.get(),
- MethodCompilationStat::kNotCompiledNoCodegen);
+ MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kNotCompiledNoCodegen);
return nullptr;
}
codegen->GetAssembler()->cfi().SetEnabled(
@@ -873,6 +870,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
codegen->Compile(code_allocator);
pass_observer.DumpDisassembly();
+ MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledBytecode);
return codegen.release();
}
@@ -883,6 +881,7 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
const DexCompilationUnit& dex_compilation_unit,
ArtMethod* method,
VariableSizedHandleScope* handles) const {
+ MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptIntrinsicCompilation);
CompilerDriver* compiler_driver = GetCompilerDriver();
InstructionSet instruction_set = compiler_driver->GetInstructionSet();
const DexFile& dex_file = *dex_compilation_unit.GetDexFile();
@@ -894,8 +893,6 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
// Do not attempt to compile on architectures we do not support.
if (!IsInstructionSetSupported(instruction_set)) {
- MaybeRecordStat(compilation_stats_.get(),
- MethodCompilationStat::kNotCompiledUnsupportedIsa);
return nullptr;
}
@@ -920,8 +917,6 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
compiler_driver->GetCompilerOptions(),
compilation_stats_.get()));
if (codegen.get() == nullptr) {
- MaybeRecordStat(compilation_stats_.get(),
- MethodCompilationStat::kNotCompiledNoCodegen);
return nullptr;
}
codegen->GetAssembler()->cfi().SetEnabled(
@@ -979,6 +974,7 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
VLOG(compiler) << "Compiled intrinsic: " << method->GetIntrinsic()
<< " " << graph->PrettyMethod();
+ MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledIntrinsic);
return codegen.release();
}
@@ -1046,8 +1042,6 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
}
}
if (codegen.get() != nullptr) {
- MaybeRecordStat(compilation_stats_.get(),
- MethodCompilationStat::kCompiled);
compiled_method = Emit(&allocator,
&code_allocator,
codegen.get(),
@@ -1139,10 +1133,20 @@ CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
}
}
- return ArtQuickJniCompileMethod(GetCompilerDriver(),
- access_flags,
- method_idx,
- dex_file);
+ JniCompiledMethod jni_compiled_method = ArtQuickJniCompileMethod(
+ GetCompilerDriver(), access_flags, method_idx, dex_file);
+ MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledNativeStub);
+ return CompiledMethod::SwapAllocCompiledMethod(
+ GetCompilerDriver(),
+ jni_compiled_method.GetInstructionSet(),
+ jni_compiled_method.GetCode(),
+ jni_compiled_method.GetFrameSize(),
+ jni_compiled_method.GetCoreSpillMask(),
+ jni_compiled_method.GetFpSpillMask(),
+ /* method_info */ ArrayRef<const uint8_t>(),
+ /* vmap_table */ ArrayRef<const uint8_t>(),
+ jni_compiled_method.GetCfi(),
+ /* patches */ ArrayRef<const linker::LinkerPatch>());
}
Compiler* CreateOptimizingCompiler(CompilerDriver* driver) {
@@ -1237,6 +1241,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
self, class_linker->GetClassRoot(ClassLinker::kObjectArrayClass), number_of_roots)));
if (roots == nullptr) {
// Out of memory, just clear the exception to avoid any Java exception uncaught problems.
+ MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
DCHECK(self->IsExceptionPending());
self->ClearException();
return false;
@@ -1253,9 +1258,9 @@ bool OptimizingCompiler::JitCompile(Thread* self,
&method_info_data,
&roots_data);
if (stack_map_data == nullptr || roots_data == nullptr) {
+ MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
return false;
}
- MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiled);
codegen->BuildStackMaps(MemoryRegion(stack_map_data, stack_map_size),
MemoryRegion(method_info_data, method_info_size),
code_item);
@@ -1279,6 +1284,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
codegen->GetGraph()->GetCHASingleImplementationList());
if (code == nullptr) {
+ MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
code_cache->ClearData(self, stack_map_data, roots_data);
return false;
}
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index 07f9635aba..a2e92d2931 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -27,10 +27,13 @@
namespace art {
-enum MethodCompilationStat {
- kAttemptCompilation = 0,
+enum class MethodCompilationStat {
+ kAttemptBytecodeCompilation = 0,
+ kAttemptIntrinsicCompilation,
+ kCompiledNativeStub,
+ kCompiledIntrinsic,
+ kCompiledBytecode,
kCHAInline,
- kCompiled,
kInlinedInvoke,
kReplacedInvokeWithSimplePattern,
kInstructionSimplifications,
@@ -94,8 +97,10 @@ enum MethodCompilationStat {
kConstructorFenceRemovedLSE,
kConstructorFenceRemovedPFRA,
kConstructorFenceRemovedCFRE,
+ kJitOutOfMemoryForCommit,
kLastStat
};
+std::ostream& operator<<(std::ostream& os, const MethodCompilationStat& rhs);
class OptimizingCompilerStats {
public:
@@ -105,7 +110,15 @@ class OptimizingCompilerStats {
}
void RecordStat(MethodCompilationStat stat, uint32_t count = 1) {
- compile_stats_[stat] += count;
+ size_t stat_index = static_cast<size_t>(stat);
+ DCHECK_LT(stat_index, arraysize(compile_stats_));
+ compile_stats_[stat_index] += count;
+ }
+
+ uint32_t GetStat(MethodCompilationStat stat) const {
+ size_t stat_index = static_cast<size_t>(stat);
+ DCHECK_LT(stat_index, arraysize(compile_stats_));
+ return compile_stats_[stat_index];
}
void Log() const {
@@ -114,18 +127,29 @@ class OptimizingCompilerStats {
return;
}
- if (compile_stats_[kAttemptCompilation] == 0) {
+ uint32_t compiled_intrinsics = GetStat(MethodCompilationStat::kCompiledIntrinsic);
+ uint32_t compiled_native_stubs = GetStat(MethodCompilationStat::kCompiledNativeStub);
+ uint32_t bytecode_attempts =
+ GetStat(MethodCompilationStat::kAttemptBytecodeCompilation);
+ if (compiled_intrinsics == 0u && compiled_native_stubs == 0u && bytecode_attempts == 0u) {
LOG(INFO) << "Did not compile any method.";
} else {
- float compiled_percent =
- compile_stats_[kCompiled] * 100.0f / compile_stats_[kAttemptCompilation];
- LOG(INFO) << "Attempted compilation of " << compile_stats_[kAttemptCompilation]
- << " methods: " << std::fixed << std::setprecision(2)
- << compiled_percent << "% (" << compile_stats_[kCompiled] << ") compiled.";
-
- for (size_t i = 0; i < kLastStat; i++) {
+ uint32_t compiled_bytecode_methods =
+ GetStat(MethodCompilationStat::kCompiledBytecode);
+ // Successful intrinsic compilation preempts other compilation attempts but failed intrinsic
+ // compilation shall still count towards bytecode or native stub compilation attempts.
+ uint32_t num_compilation_attempts =
+ compiled_intrinsics + compiled_native_stubs + bytecode_attempts;
+ uint32_t num_successful_compilations =
+ compiled_intrinsics + compiled_native_stubs + compiled_bytecode_methods;
+ float compiled_percent = num_successful_compilations * 100.0f / num_compilation_attempts;
+ LOG(INFO) << "Attempted compilation of "
+ << num_compilation_attempts << " methods: " << std::fixed << std::setprecision(2)
+ << compiled_percent << "% (" << num_successful_compilations << ") compiled.";
+
+ for (size_t i = 0; i < arraysize(compile_stats_); ++i) {
if (compile_stats_[i] != 0) {
- LOG(INFO) << PrintMethodCompilationStat(static_cast<MethodCompilationStat>(i)) << ": "
+ LOG(INFO) << "OptStat#" << static_cast<MethodCompilationStat>(i) << ": "
<< compile_stats_[i];
}
}
@@ -133,7 +157,7 @@ class OptimizingCompilerStats {
}
void AddTo(OptimizingCompilerStats* other_stats) {
- for (size_t i = 0; i != kLastStat; ++i) {
+ for (size_t i = 0; i != arraysize(compile_stats_); ++i) {
uint32_t count = compile_stats_[i];
if (count != 0) {
other_stats->RecordStat(static_cast<MethodCompilationStat>(i), count);
@@ -142,91 +166,13 @@ class OptimizingCompilerStats {
}
void Reset() {
- for (size_t i = 0; i != kLastStat; ++i) {
- compile_stats_[i] = 0u;
+ for (std::atomic<uint32_t>& stat : compile_stats_) {
+ stat = 0u;
}
}
private:
- std::string PrintMethodCompilationStat(MethodCompilationStat stat) const {
- std::string name;
- switch (stat) {
- case kAttemptCompilation : name = "AttemptCompilation"; break;
- case kCHAInline : name = "CHAInline"; break;
- case kCompiled : name = "Compiled"; break;
- case kInlinedInvoke : name = "InlinedInvoke"; break;
- case kReplacedInvokeWithSimplePattern: name = "ReplacedInvokeWithSimplePattern"; break;
- case kInstructionSimplifications: name = "InstructionSimplifications"; break;
- case kInstructionSimplificationsArch: name = "InstructionSimplificationsArch"; break;
- case kUnresolvedMethod : name = "UnresolvedMethod"; break;
- case kUnresolvedField : name = "UnresolvedField"; break;
- case kUnresolvedFieldNotAFastAccess : name = "UnresolvedFieldNotAFastAccess"; break;
- case kRemovedCheckedCast: name = "RemovedCheckedCast"; break;
- case kRemovedDeadInstruction: name = "RemovedDeadInstruction"; break;
- case kRemovedNullCheck: name = "RemovedNullCheck"; break;
- case kNotCompiledSkipped: name = "NotCompiledSkipped"; break;
- case kNotCompiledInvalidBytecode: name = "NotCompiledInvalidBytecode"; break;
- case kNotCompiledThrowCatchLoop : name = "NotCompiledThrowCatchLoop"; break;
- case kNotCompiledAmbiguousArrayOp : name = "NotCompiledAmbiguousArrayOp"; break;
- case kNotCompiledHugeMethod : name = "NotCompiledHugeMethod"; break;
- case kNotCompiledLargeMethodNoBranches : name = "NotCompiledLargeMethodNoBranches"; break;
- case kNotCompiledMalformedOpcode : name = "NotCompiledMalformedOpcode"; break;
- case kNotCompiledNoCodegen : name = "NotCompiledNoCodegen"; break;
- case kNotCompiledPathological : name = "NotCompiledPathological"; break;
- case kNotCompiledSpaceFilter : name = "NotCompiledSpaceFilter"; break;
- case kNotCompiledUnhandledInstruction : name = "NotCompiledUnhandledInstruction"; break;
- case kNotCompiledUnsupportedIsa : name = "NotCompiledUnsupportedIsa"; break;
- case kNotCompiledVerificationError : name = "NotCompiledVerificationError"; break;
- case kNotCompiledVerifyAtRuntime : name = "NotCompiledVerifyAtRuntime"; break;
- case kInlinedMonomorphicCall: name = "InlinedMonomorphicCall"; break;
- case kInlinedPolymorphicCall: name = "InlinedPolymorphicCall"; break;
- case kMonomorphicCall: name = "MonomorphicCall"; break;
- case kPolymorphicCall: name = "PolymorphicCall"; break;
- case kMegamorphicCall: name = "MegamorphicCall"; break;
- case kBooleanSimplified : name = "BooleanSimplified"; break;
- case kIntrinsicRecognized : name = "IntrinsicRecognized"; break;
- case kLoopInvariantMoved : name = "LoopInvariantMoved"; break;
- case kLoopVectorized : name = "LoopVectorized"; break;
- case kLoopVectorizedIdiom : name = "LoopVectorizedIdiom"; break;
- case kSelectGenerated : name = "SelectGenerated"; break;
- case kRemovedInstanceOf: name = "RemovedInstanceOf"; break;
- case kInlinedInvokeVirtualOrInterface: name = "InlinedInvokeVirtualOrInterface"; break;
- case kImplicitNullCheckGenerated: name = "ImplicitNullCheckGenerated"; break;
- case kExplicitNullCheckGenerated: name = "ExplicitNullCheckGenerated"; break;
- case kSimplifyIf: name = "SimplifyIf"; break;
- case kInstructionSunk: name = "InstructionSunk"; break;
- case kNotInlinedUnresolvedEntrypoint: name = "NotInlinedUnresolvedEntrypoint"; break;
- case kNotInlinedDexCache: name = "NotInlinedDexCache"; break;
- case kNotInlinedStackMaps: name = "NotInlinedStackMaps"; break;
- case kNotInlinedEnvironmentBudget: name = "NotInlinedEnvironmentBudget"; break;
- case kNotInlinedInstructionBudget: name = "NotInlinedInstructionBudget"; break;
- case kNotInlinedLoopWithoutExit: name = "NotInlinedLoopWithoutExit"; break;
- case kNotInlinedIrreducibleLoop: name = "NotInlinedIrreducibleLoop"; break;
- case kNotInlinedAlwaysThrows: name = "NotInlinedAlwaysThrows"; break;
- case kNotInlinedInfiniteLoop: name = "NotInlinedInfiniteLoop"; break;
- case kNotInlinedTryCatch: name = "NotInlinedTryCatch"; break;
- case kNotInlinedRegisterAllocator: name = "NotInlinedRegisterAllocator"; break;
- case kNotInlinedCannotBuild: name = "NotInlinedCannotBuild"; break;
- case kNotInlinedNotVerified: name = "NotInlinedNotVerified"; break;
- case kNotInlinedCodeItem: name = "NotInlinedCodeItem"; break;
- case kNotInlinedWont: name = "NotInlinedWont"; break;
- case kNotInlinedRecursiveBudget: name = "NotInlinedRecursiveBudget"; break;
- case kNotInlinedProxy: name = "NotInlinedProxy"; break;
- case kConstructorFenceGeneratedNew: name = "ConstructorFenceGeneratedNew"; break;
- case kConstructorFenceGeneratedFinal: name = "ConstructorFenceGeneratedFinal"; break;
- case kConstructorFenceRemovedLSE: name = "ConstructorFenceRemovedLSE"; break;
- case kConstructorFenceRemovedPFRA: name = "ConstructorFenceRemovedPFRA"; break;
- case kConstructorFenceRemovedCFRE: name = "ConstructorFenceRemovedCFRE"; break;
-
- case kLastStat:
- LOG(FATAL) << "invalid stat "
- << static_cast<std::underlying_type<MethodCompilationStat>::type>(stat);
- UNREACHABLE();
- }
- return "OptStat#" + name;
- }
-
- std::atomic<uint32_t> compile_stats_[kLastStat];
+ std::atomic<uint32_t> compile_stats_[static_cast<size_t>(MethodCompilationStat::kLastStat)];
DISALLOW_COPY_AND_ASSIGN(OptimizingCompilerStats);
};