summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/Android.bp150
-rw-r--r--compiler/compiled_method.h2
-rw-r--r--compiler/compiler.h4
-rw-r--r--compiler/debug/dwarf/headers.h2
-rw-r--r--compiler/debug/elf_debug_writer.cc2
-rw-r--r--compiler/debug/elf_debug_writer.h2
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc2
-rw-r--r--compiler/dex/quick_compiler_callbacks.h15
-rw-r--r--compiler/dex/verified_method.cc4
-rw-r--r--compiler/driver/compiled_method_storage.h2
-rw-r--r--compiler/driver/compiler_driver.cc4
-rw-r--r--compiler/driver/compiler_driver.h2
-rw-r--r--compiler/elf_builder.h2
-rw-r--r--compiler/elf_writer.h2
-rw-r--r--compiler/image_writer.cc14
-rw-r--r--compiler/jni/quick/calling_convention.h2
-rw-r--r--compiler/linker/arm64/relative_patcher_arm64.h2
-rw-r--r--compiler/linker/relative_patcher.h2
-rw-r--r--compiler/linker/relative_patcher_test.h2
-rw-r--r--compiler/oat_test.cc2
-rw-r--r--compiler/oat_writer.cc19
-rw-r--r--compiler/oat_writer.h2
-rw-r--r--compiler/optimizing/bounds_check_elimination.cc13
-rw-r--r--compiler/optimizing/code_generator.cc106
-rw-r--r--compiler/optimizing/code_generator.h15
-rw-r--r--compiler/optimizing/code_generator_arm.cc297
-rw-r--r--compiler/optimizing/code_generator_arm.h20
-rw-r--r--compiler/optimizing/code_generator_arm64.cc103
-rw-r--r--compiler/optimizing/code_generator_arm64.h6
-rw-r--r--compiler/optimizing/code_generator_mips.cc335
-rw-r--r--compiler/optimizing/code_generator_mips.h39
-rw-r--r--compiler/optimizing/code_generator_mips64.cc38
-rw-r--r--compiler/optimizing/code_generator_mips64.h16
-rw-r--r--compiler/optimizing/code_generator_x86.cc268
-rw-r--r--compiler/optimizing/code_generator_x86.h17
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc292
-rw-r--r--compiler/optimizing/code_generator_x86_64.h18
-rw-r--r--compiler/optimizing/codegen_test.cc10
-rw-r--r--compiler/optimizing/common_arm64.h6
-rw-r--r--compiler/optimizing/dead_code_elimination.cc2
-rw-r--r--compiler/optimizing/induction_var_analysis.cc8
-rw-r--r--compiler/optimizing/induction_var_analysis_test.cc31
-rw-r--r--compiler/optimizing/induction_var_range.cc234
-rw-r--r--compiler/optimizing/induction_var_range.h64
-rw-r--r--compiler/optimizing/induction_var_range_test.cc134
-rw-r--r--compiler/optimizing/instruction_simplifier.h6
-rw-r--r--compiler/optimizing/instruction_simplifier_shared.cc3
-rw-r--r--compiler/optimizing/intrinsics_arm.cc2
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc2
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc6
-rw-r--r--compiler/optimizing/intrinsics_x86.cc2
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc2
-rw-r--r--compiler/optimizing/locations.cc4
-rw-r--r--compiler/optimizing/locations.h10
-rw-r--r--compiler/optimizing/nodes.cc107
-rw-r--r--compiler/optimizing/nodes.h101
-rw-r--r--compiler/optimizing/nodes_mips.h35
-rw-r--r--compiler/optimizing/nodes_shared.h12
-rw-r--r--compiler/optimizing/pc_relative_fixups_mips.cc19
-rw-r--r--compiler/optimizing/register_allocation_resolver.h2
-rw-r--r--compiler/optimizing/sharpening.cc38
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.cc110
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h6
-rw-r--r--compiler/utils/arm/assembler_arm.h5
-rw-r--r--compiler/utils/arm/assembler_thumb2.cc87
-rw-r--r--compiler/utils/arm/assembler_thumb2.h13
-rw-r--r--compiler/utils/arm/jni_macro_assembler_arm_vixl.cc18
-rw-r--r--compiler/utils/array_ref.h200
-rw-r--r--compiler/utils/assembler.h2
-rw-r--r--compiler/utils/dedupe_set_test.cc2
-rw-r--r--compiler/utils/intrusive_forward_list.h2
-rw-r--r--compiler/utils/jni_macro_assembler.h2
-rw-r--r--compiler/utils/mips/assembler_mips.cc238
-rw-r--r--compiler/utils/mips/assembler_mips.h148
-rw-r--r--compiler/utils/mips/assembler_mips32r6_test.cc43
-rw-r--r--compiler/utils/mips/assembler_mips_test.cc117
-rw-r--r--compiler/utils/swap_space.h6
-rw-r--r--compiler/utils/transform_array_ref.h196
-rw-r--r--compiler/utils/transform_array_ref_test.cc207
-rw-r--r--compiler/utils/transform_iterator.h178
-rw-r--r--compiler/utils/transform_iterator_test.cc531
-rw-r--r--compiler/utils/x86/assembler_x86.h2
-rw-r--r--compiler/utils/x86/jni_macro_assembler_x86.h2
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h2
-rw-r--r--compiler/utils/x86_64/jni_macro_assembler_x86_64.h2
85 files changed, 2216 insertions, 2566 deletions
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 969747505b..09c53b6889 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -280,3 +280,153 @@ art_cc_library {
},
shared_libs: ["libartd"],
}
+
+art_cc_library {
+ name: "libart-compiler-gtest",
+ defaults: ["libart-gtest-defaults"],
+ srcs: ["common_compiler_test.cc"],
+ shared_libs: [
+ "libartd-compiler",
+ "libart-runtime-gtest",
+ ],
+}
+
+art_cc_test {
+ name: "art_compiler_tests",
+ defaults: [
+ "art_gtest_defaults",
+ ],
+ srcs: [
+ "compiled_method_test.cc",
+ "debug/dwarf/dwarf_test.cc",
+ "driver/compiled_method_storage_test.cc",
+ "driver/compiler_driver_test.cc",
+ "elf_writer_test.cc",
+ "exception_test.cc",
+ "image_test.cc",
+ "jni/jni_compiler_test.cc",
+ "linker/multi_oat_relative_patcher_test.cc",
+ "linker/output_stream_test.cc",
+ "oat_test.cc",
+ "optimizing/bounds_check_elimination_test.cc",
+ "optimizing/dominator_test.cc",
+ "optimizing/find_loops_test.cc",
+ "optimizing/graph_checker_test.cc",
+ "optimizing/graph_test.cc",
+ "optimizing/gvn_test.cc",
+ "optimizing/induction_var_analysis_test.cc",
+ "optimizing/induction_var_range_test.cc",
+ "optimizing/licm_test.cc",
+ "optimizing/live_interval_test.cc",
+ "optimizing/nodes_test.cc",
+ "optimizing/parallel_move_test.cc",
+ "optimizing/pretty_printer_test.cc",
+ "optimizing/reference_type_propagation_test.cc",
+ "optimizing/side_effects_test.cc",
+ "optimizing/ssa_test.cc",
+ "optimizing/stack_map_test.cc",
+ "optimizing/suspend_check_test.cc",
+ "utils/dedupe_set_test.cc",
+ "utils/intrusive_forward_list_test.cc",
+ "utils/string_reference_test.cc",
+ "utils/swap_space_test.cc",
+ "utils/test_dex_file_builder_test.cc",
+
+ "jni/jni_cfi_test.cc",
+ "optimizing/codegen_test.cc",
+ "optimizing/optimizing_cfi_test.cc",
+ ],
+
+ codegen: {
+ arm: {
+ srcs: [
+ "linker/arm/relative_patcher_thumb2_test.cc",
+ "utils/arm/managed_register_arm_test.cc",
+ ],
+ },
+ arm64: {
+ srcs: [
+ "linker/arm64/relative_patcher_arm64_test.cc",
+ "utils/arm64/managed_register_arm64_test.cc",
+ ],
+ },
+ mips: {
+ srcs: [
+ "linker/mips/relative_patcher_mips_test.cc",
+ "linker/mips/relative_patcher_mips32r6_test.cc",
+ ],
+ },
+ x86: {
+ srcs: [
+ "linker/x86/relative_patcher_x86_test.cc",
+ "utils/x86/managed_register_x86_test.cc",
+
+ // These tests are testing architecture-independent
+ // functionality, but happen to use x86 codegen as part of the
+ // test.
+ "optimizing/constant_folding_test.cc",
+ "optimizing/dead_code_elimination_test.cc",
+ "optimizing/linearize_test.cc",
+ "optimizing/live_ranges_test.cc",
+ "optimizing/liveness_test.cc",
+ "optimizing/register_allocator_test.cc",
+ ],
+ },
+ x86_64: {
+ srcs: [
+ "linker/x86_64/relative_patcher_x86_64_test.cc",
+ ],
+ },
+ },
+
+ shared_libs: [
+ "libartd-compiler",
+ "libvixld-arm",
+ "libvixld-arm64",
+
+ "libbacktrace",
+ "libnativeloader",
+ ],
+}
+
+art_cc_test {
+ name: "art_compiler_host_tests",
+ device_supported: false,
+ defaults: [
+ "art_gtest_defaults",
+ ],
+ codegen: {
+ arm: {
+ srcs: [
+ "utils/arm/assembler_thumb2_test.cc",
+ "utils/assembler_thumb_test.cc",
+ ],
+ },
+ mips: {
+ srcs: [
+ "utils/mips/assembler_mips_test.cc",
+ "utils/mips/assembler_mips32r6_test.cc",
+ ],
+ },
+ mips64: {
+ srcs: [
+ "utils/mips64/assembler_mips64_test.cc",
+ ],
+ },
+ x86: {
+ srcs: [
+ "utils/x86/assembler_x86_test.cc",
+ ],
+ },
+ x86_64: {
+ srcs: [
+ "utils/x86_64/assembler_x86_64_test.cc",
+ ],
+ },
+ },
+ shared_libs: [
+ "libartd-compiler",
+ "libvixld-arm",
+ "libvixld-arm64",
+ ],
+}
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 2a81804f64..1a87448e80 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -23,10 +23,10 @@
#include <vector>
#include "arch/instruction_set.h"
+#include "base/array_ref.h"
#include "base/bit_utils.h"
#include "base/length_prefixed_array.h"
#include "method_reference.h"
-#include "utils/array_ref.h"
namespace art {
diff --git a/compiler/compiler.h b/compiler/compiler.h
index ed42958a76..9a69456b5a 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -25,10 +25,14 @@ namespace art {
namespace jit {
class JitCodeCache;
}
+namespace mirror {
+ class DexCache;
+}
class ArtMethod;
class CompilerDriver;
class CompiledMethod;
+template<class T> class Handle;
class OatWriter;
class Compiler {
diff --git a/compiler/debug/dwarf/headers.h b/compiler/debug/dwarf/headers.h
index 146d9fddf5..28f108423e 100644
--- a/compiler/debug/dwarf/headers.h
+++ b/compiler/debug/dwarf/headers.h
@@ -19,13 +19,13 @@
#include <cstdint>
+#include "base/array_ref.h"
#include "debug/dwarf/debug_frame_opcode_writer.h"
#include "debug/dwarf/debug_info_entry_writer.h"
#include "debug/dwarf/debug_line_opcode_writer.h"
#include "debug/dwarf/dwarf_constants.h"
#include "debug/dwarf/register.h"
#include "debug/dwarf/writer.h"
-#include "utils/array_ref.h"
namespace art {
namespace dwarf {
diff --git a/compiler/debug/elf_debug_writer.cc b/compiler/debug/elf_debug_writer.cc
index 5bfdd16083..d1c10a9246 100644
--- a/compiler/debug/elf_debug_writer.cc
+++ b/compiler/debug/elf_debug_writer.cc
@@ -18,6 +18,7 @@
#include <vector>
+#include "base/array_ref.h"
#include "debug/dwarf/dwarf_constants.h"
#include "debug/elf_compilation_unit.h"
#include "debug/elf_debug_frame_writer.h"
@@ -29,7 +30,6 @@
#include "debug/method_debug_info.h"
#include "elf_builder.h"
#include "linker/vector_output_stream.h"
-#include "utils/array_ref.h"
namespace art {
namespace debug {
diff --git a/compiler/debug/elf_debug_writer.h b/compiler/debug/elf_debug_writer.h
index b0542c7ac6..07f7229827 100644
--- a/compiler/debug/elf_debug_writer.h
+++ b/compiler/debug/elf_debug_writer.h
@@ -19,11 +19,11 @@
#include <vector>
+#include "base/array_ref.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "debug/dwarf/dwarf_constants.h"
#include "elf_builder.h"
-#include "utils/array_ref.h"
namespace art {
class OatHeader;
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index 3ce786e008..c902d289e9 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -90,7 +90,7 @@ class DexCompiler {
// Compiles a virtual method invocation into a quick virtual method invocation.
// The method index is replaced by the vtable index where the corresponding
- // AbstractMethod can be found. Therefore, this does not involve any resolution
+ // Executable can be found. Therefore, this does not involve any resolution
// at runtime.
// Since the method index is encoded with 16 bits, we can replace it only if the
// vtable index can be encoded with 16 bits too.
diff --git a/compiler/dex/quick_compiler_callbacks.h b/compiler/dex/quick_compiler_callbacks.h
index 1f696863b6..824194c7bd 100644
--- a/compiler/dex/quick_compiler_callbacks.h
+++ b/compiler/dex/quick_compiler_callbacks.h
@@ -29,8 +29,10 @@ class QuickCompilerCallbacks FINAL : public CompilerCallbacks {
QuickCompilerCallbacks(VerificationResults* verification_results,
DexFileToMethodInlinerMap* method_inliner_map,
CompilerCallbacks::CallbackMode mode)
- : CompilerCallbacks(mode), verification_results_(verification_results),
- method_inliner_map_(method_inliner_map) {
+ : CompilerCallbacks(mode),
+ verification_results_(verification_results),
+ method_inliner_map_(method_inliner_map),
+ verifier_deps_(nullptr) {
CHECK(verification_results != nullptr);
CHECK(method_inliner_map != nullptr);
}
@@ -47,9 +49,18 @@ class QuickCompilerCallbacks FINAL : public CompilerCallbacks {
return true;
}
+ verifier::VerifierDeps* GetVerifierDeps() const OVERRIDE {
+ return verifier_deps_;
+ }
+
+ void SetVerifierDeps(verifier::VerifierDeps* deps) {
+ verifier_deps_ = deps;
+ }
+
private:
VerificationResults* const verification_results_;
DexFileToMethodInlinerMap* const method_inliner_map_;
+ verifier::VerifierDeps* verifier_deps_;
};
} // namespace art
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 4bcd59ac90..e19fb7b300 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -231,7 +231,7 @@ void VerifiedMethod::GenerateSafeCastSet(verifier::MethodVerifier* method_verifi
inst->VRegA_21c()));
const verifier::RegType& cast_type =
method_verifier->ResolveCheckedClass(inst->VRegB_21c());
- is_safe_cast = cast_type.IsStrictlyAssignableFrom(reg_type);
+ is_safe_cast = cast_type.IsStrictlyAssignableFrom(reg_type, method_verifier);
} else {
const verifier::RegType& array_type(line->GetRegisterType(method_verifier,
inst->VRegB_23x()));
@@ -243,7 +243,7 @@ void VerifiedMethod::GenerateSafeCastSet(verifier::MethodVerifier* method_verifi
inst->VRegA_23x()));
const verifier::RegType& component_type = method_verifier->GetRegTypeCache()
->GetComponentType(array_type, method_verifier->GetClassLoader());
- is_safe_cast = component_type.IsStrictlyAssignableFrom(value_type);
+ is_safe_cast = component_type.IsStrictlyAssignableFrom(value_type, method_verifier);
}
}
if (is_safe_cast) {
diff --git a/compiler/driver/compiled_method_storage.h b/compiler/driver/compiled_method_storage.h
index 8674abf815..124b5a6e25 100644
--- a/compiler/driver/compiled_method_storage.h
+++ b/compiler/driver/compiled_method_storage.h
@@ -20,9 +20,9 @@
#include <iosfwd>
#include <memory>
+#include "base/array_ref.h"
#include "base/length_prefixed_array.h"
#include "base/macros.h"
-#include "utils/array_ref.h"
#include "utils/dedupe_set.h"
#include "utils/swap_space.h"
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 53e068edf2..a149c07beb 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -26,6 +26,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/array_ref.h"
#include "base/bit_vector.h"
#include "base/enums.h"
#include "base/stl_util.h"
@@ -67,7 +68,6 @@
#include "thread_pool.h"
#include "trampolines/trampoline_compiler.h"
#include "transaction.h"
-#include "utils/array_ref.h"
#include "utils/dex_cache_arrays_layout-inl.h"
#include "utils/swap_space.h"
#include "verifier/method_verifier.h"
@@ -2474,7 +2474,7 @@ class InitializeClassVisitor : public CompilationVisitor {
// mode which prevents the GC from visiting objects modified during the transaction.
// Ensure GC is not run so don't access freed objects when aborting transaction.
- ScopedAssertNoThreadSuspension ants(soa.Self(), "Transaction end");
+ ScopedAssertNoThreadSuspension ants("Transaction end");
runtime->ExitTransactionMode();
if (!success) {
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index fbc1edd0ea..ee21efa854 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -24,6 +24,7 @@
#include "arch/instruction_set.h"
#include "base/arena_allocator.h"
+#include "base/array_ref.h"
#include "base/bit_utils.h"
#include "base/mutex.h"
#include "base/timing_logger.h"
@@ -39,7 +40,6 @@
#include "runtime.h"
#include "safe_map.h"
#include "thread_pool.h"
-#include "utils/array_ref.h"
#include "utils/dex_cache_arrays_layout.h"
namespace art {
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index 7f2e1931d0..02831c9dc7 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -21,13 +21,13 @@
#include "arch/instruction_set.h"
#include "arch/mips/instruction_set_features_mips.h"
+#include "base/array_ref.h"
#include "base/bit_utils.h"
#include "base/casts.h"
#include "base/unix_file/fd_file.h"
#include "elf_utils.h"
#include "leb128.h"
#include "linker/error_delaying_output_stream.h"
-#include "utils/array_ref.h"
namespace art {
diff --git a/compiler/elf_writer.h b/compiler/elf_writer.h
index c9ea0083d5..f8f91029d4 100644
--- a/compiler/elf_writer.h
+++ b/compiler/elf_writer.h
@@ -22,10 +22,10 @@
#include <string>
#include <vector>
+#include "base/array_ref.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "os.h"
-#include "utils/array_ref.h"
namespace art {
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 7634510457..cdb57a98ad 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -48,12 +48,12 @@
#include "intern_table.h"
#include "linear_alloc.h"
#include "lock_word.h"
-#include "mirror/abstract_method.h"
#include "mirror/array-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache.h"
#include "mirror/dex_cache-inl.h"
+#include "mirror/executable.h"
#include "mirror/method.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
@@ -868,7 +868,7 @@ void ImageWriter::PruneNonImageClasses() {
// Clear references to removed classes from the DexCaches.
ArtMethod* resolution_method = runtime->GetResolutionMethod();
- ScopedAssertNoThreadSuspension sa(self, __FUNCTION__);
+ ScopedAssertNoThreadSuspension sa(__FUNCTION__);
ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_); // For ClassInClassTable
ReaderMutexLock mu2(self, *class_linker->DexLock());
for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
@@ -1989,14 +1989,10 @@ void ImageWriter::FixupObject(Object* orig, Object* copy) {
} else {
if (klass == mirror::Method::StaticClass() || klass == mirror::Constructor::StaticClass()) {
// Need to go update the ArtMethod.
- auto* dest = down_cast<mirror::AbstractMethod*>(copy);
- auto* src = down_cast<mirror::AbstractMethod*>(orig);
+ auto* dest = down_cast<mirror::Executable*>(copy);
+ auto* src = down_cast<mirror::Executable*>(orig);
ArtMethod* src_method = src->GetArtMethod();
- auto it = native_object_relocations_.find(src_method);
- CHECK(it != native_object_relocations_.end())
- << "Missing relocation for AbstractMethod.artMethod " << PrettyMethod(src_method);
- dest->SetArtMethod(
- reinterpret_cast<ArtMethod*>(global_image_begin_ + it->second.offset));
+ dest->SetArtMethod(GetImageMethodAddress(src_method));
} else if (!klass->IsArrayClass()) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
if (klass == class_linker->GetClassRoot(ClassLinker::kJavaLangDexCache)) {
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index 3d89146250..f541d8fa19 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -18,11 +18,11 @@
#define ART_COMPILER_JNI_QUICK_CALLING_CONVENTION_H_
#include "base/arena_object.h"
+#include "base/array_ref.h"
#include "base/enums.h"
#include "handle_scope.h"
#include "primitive.h"
#include "thread.h"
-#include "utils/array_ref.h"
#include "utils/managed_register.h"
namespace art {
diff --git a/compiler/linker/arm64/relative_patcher_arm64.h b/compiler/linker/arm64/relative_patcher_arm64.h
index 48ad1059b0..a4a80185dc 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.h
+++ b/compiler/linker/arm64/relative_patcher_arm64.h
@@ -17,8 +17,8 @@
#ifndef ART_COMPILER_LINKER_ARM64_RELATIVE_PATCHER_ARM64_H_
#define ART_COMPILER_LINKER_ARM64_RELATIVE_PATCHER_ARM64_H_
+#include "base/array_ref.h"
#include "linker/arm/relative_patcher_arm_base.h"
-#include "utils/array_ref.h"
namespace art {
namespace linker {
diff --git a/compiler/linker/relative_patcher.h b/compiler/linker/relative_patcher.h
index a22b9f2c2d..15e955b2c6 100644
--- a/compiler/linker/relative_patcher.h
+++ b/compiler/linker/relative_patcher.h
@@ -21,9 +21,9 @@
#include "arch/instruction_set.h"
#include "arch/instruction_set_features.h"
+#include "base/array_ref.h"
#include "base/macros.h"
#include "method_reference.h"
-#include "utils/array_ref.h"
namespace art {
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index d21f33e46f..304b31ca84 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -19,6 +19,7 @@
#include "arch/instruction_set.h"
#include "arch/instruction_set_features.h"
+#include "base/array_ref.h"
#include "base/macros.h"
#include "compiled_method.h"
#include "dex/quick/dex_file_to_method_inliner_map.h"
@@ -31,7 +32,6 @@
#include "method_reference.h"
#include "oat.h"
#include "oat_quick_method_header.h"
-#include "utils/array_ref.h"
#include "vector_output_stream.h"
namespace art {
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 78e9ca91b7..24d102d4c0 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -453,7 +453,7 @@ TEST_F(OatTest, OatHeaderSizeCheck) {
EXPECT_EQ(72U, sizeof(OatHeader));
EXPECT_EQ(4U, sizeof(OatMethodOffsets));
EXPECT_EQ(20U, sizeof(OatQuickMethodHeader));
- EXPECT_EQ(164 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
+ EXPECT_EQ(163 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
sizeof(QuickEntryPoints));
}
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 43e01d54a6..d629c0c887 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -259,7 +259,16 @@ class OatWriter::OatDexFile {
// Data to write to a separate section.
dchecked_vector<uint32_t> class_offsets_;
+ void InitTypeLookupTable(const DexFile& dex_file, uint8_t* storage) const {
+ lookup_table_.reset(TypeLookupTable::Create(dex_file, storage));
+ }
+
+ TypeLookupTable* GetTypeLookupTable() const {
+ return lookup_table_.get();
+ }
+
private:
+ mutable std::unique_ptr<TypeLookupTable> lookup_table_;
size_t GetClassOffsetsRawSize() const {
return class_offsets_.size() * sizeof(class_offsets_[0]);
}
@@ -994,7 +1003,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
out_(out),
file_offset_(file_offset),
soa_(Thread::Current()),
- no_thread_suspension_(soa_.Self(), "OatWriter patching"),
+ no_thread_suspension_("OatWriter patching"),
class_linker_(Runtime::Current()->GetClassLinker()),
dex_cache_(nullptr) {
patched_code_.reserve(16 * KB);
@@ -1036,7 +1045,7 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
// No thread suspension since dex_cache_ that may get invalidated if that occurs.
- ScopedAssertNoThreadSuspension tsc(Thread::Current(), __FUNCTION__);
+ ScopedAssertNoThreadSuspension tsc(__FUNCTION__);
if (compiled_method != nullptr) { // ie. not an abstract method
size_t file_offset = file_offset_;
OutputStream* out = out_;
@@ -2285,9 +2294,9 @@ bool OatWriter::WriteTypeLookupTables(
}
// Create the lookup table. When `nullptr` is given as the storage buffer,
- // TypeLookupTable allocates its own and DexFile takes ownership.
- opened_dex_files[i]->CreateTypeLookupTable(/* storage */ nullptr);
- TypeLookupTable* table = opened_dex_files[i]->GetTypeLookupTable();
+ // TypeLookupTable allocates its own and OatDexFile takes ownership.
+ oat_dex_file->InitTypeLookupTable(*opened_dex_files[i], /* storage */ nullptr);
+ TypeLookupTable* table = oat_dex_file->GetTypeLookupTable();
// Type tables are required to be 4 byte aligned.
size_t initial_offset = oat_size_;
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 77525f1a32..dd7d699eee 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -21,6 +21,7 @@
#include <cstddef>
#include <memory>
+#include "base/array_ref.h"
#include "base/dchecked_vector.h"
#include "linker/relative_patcher.h" // For linker::RelativePatcherTargetProvider.
#include "mem_map.h"
@@ -29,7 +30,6 @@
#include "oat.h"
#include "os.h"
#include "safe_map.h"
-#include "utils/array_ref.h"
namespace art {
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 8aefd9ea1f..994d394a2a 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -887,7 +887,7 @@ class BCEVisitor : public HGraphVisitor {
bool needs_finite_test = false;
bool needs_taken_test = false;
if (DynamicBCESeemsProfitable(loop, bounds_check->GetBlock()) &&
- induction_range_.CanGenerateCode(
+ induction_range_.CanGenerateRange(
bounds_check, index, &needs_finite_test, &needs_taken_test) &&
CanHandleInfiniteLoop(loop, index, needs_finite_test) &&
// Do this test last, since it may generate code.
@@ -1403,10 +1403,10 @@ class BCEVisitor : public HGraphVisitor {
// whether code generation on the original and, thus, related bounds check was possible.
// It handles either loop invariants (lower is not set) or unit strides.
if (other_c == max_c) {
- induction_range_.GenerateRangeCode(
+ induction_range_.GenerateRange(
other_bounds_check, other_index, GetGraph(), block, &max_lower, &max_upper);
} else if (other_c == min_c && base != nullptr) {
- induction_range_.GenerateRangeCode(
+ induction_range_.GenerateRange(
other_bounds_check, other_index, GetGraph(), block, &min_lower, &min_upper);
}
ReplaceInstruction(other_bounds_check, other_index);
@@ -1699,11 +1699,8 @@ class BCEVisitor : public HGraphVisitor {
// Insert the taken-test to see if the loop body is entered. If the
// loop isn't entered at all, it jumps around the deoptimization block.
if_block->AddInstruction(new (GetGraph()->GetArena()) HGoto()); // placeholder
- HInstruction* condition = nullptr;
- induction_range_.GenerateTakenTest(header->GetLastInstruction(),
- GetGraph(),
- if_block,
- &condition);
+ HInstruction* condition = induction_range_.GenerateTakenTest(
+ header->GetLastInstruction(), GetGraph(), if_block);
DCHECK(condition != nullptr);
if_block->RemoveInstruction(if_block->GetLastInstruction());
if_block->AddInstruction(new (GetGraph()->GetArena()) HIf(condition));
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 1444931b9c..cf633df496 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1090,13 +1090,6 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
}
}
-bool CodeGenerator::IsImplicitNullCheckAllowed(HNullCheck* null_check) const {
- return compiler_options_.GetImplicitNullChecks() &&
- // Null checks which might throw into a catch block need to save live
- // registers and therefore cannot be done implicitly.
- !null_check->CanThrowIntoCatchBlock();
-}
-
bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) {
HInstruction* first_next_not_move = null_check->GetNextDisregardingMoves();
@@ -1105,6 +1098,10 @@ bool CodeGenerator::CanMoveNullCheckToUser(HNullCheck* null_check) {
}
void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
+ if (!compiler_options_.GetImplicitNullChecks()) {
+ return;
+ }
+
// If we are from a static path don't record the pc as we can't throw NPE.
// NB: having the checks here makes the code much less verbose in the arch
// specific code generators.
@@ -1123,16 +1120,35 @@ void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
// and needs to record the pc.
if (first_prev_not_move != nullptr && first_prev_not_move->IsNullCheck()) {
HNullCheck* null_check = first_prev_not_move->AsNullCheck();
- if (IsImplicitNullCheckAllowed(null_check)) {
- // TODO: The parallel moves modify the environment. Their changes need to be
- // reverted otherwise the stack maps at the throw point will not be correct.
- RecordPcInfo(null_check, null_check->GetDexPc());
- }
+ // TODO: The parallel moves modify the environment. Their changes need to be
+ // reverted otherwise the stack maps at the throw point will not be correct.
+ RecordPcInfo(null_check, null_check->GetDexPc());
+ }
+}
+
+LocationSummary* CodeGenerator::CreateThrowingSlowPathLocations(HInstruction* instruction,
+ RegisterSet caller_saves) {
+ // Note: Using kNoCall allows the method to be treated as leaf (and eliminate the
+ // HSuspendCheck from entry block). However, it will still get a valid stack frame
+ // because the HNullCheck needs an environment.
+ LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
+ // When throwing from a try block, we may need to retrieve dalvik registers from
+ // physical registers and we also need to set up stack mask for GC. This is
+ // implicitly achieved by passing kCallOnSlowPath to the LocationSummary.
+ bool can_throw_into_catch_block = instruction->CanThrowIntoCatchBlock();
+ if (can_throw_into_catch_block) {
+ call_kind = LocationSummary::kCallOnSlowPath;
}
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ if (can_throw_into_catch_block && compiler_options_.GetImplicitNullChecks()) {
+ locations->SetCustomSlowPathCallerSaves(caller_saves); // Default: no caller-save registers.
+ }
+ DCHECK(!instruction->HasUses());
+ return locations;
}
void CodeGenerator::GenerateNullCheck(HNullCheck* instruction) {
- if (IsImplicitNullCheckAllowed(instruction)) {
+ if (compiler_options_.GetImplicitNullChecks()) {
MaybeRecordStat(kImplicitNullCheckGenerated);
GenerateImplicitNullCheck(instruction);
} else {
@@ -1172,37 +1188,51 @@ void CodeGenerator::EmitParallelMoves(Location from1,
GetMoveResolver()->EmitNativeCode(&parallel_move);
}
-void CodeGenerator::ValidateInvokeRuntime(HInstruction* instruction, SlowPathCode* slow_path) {
+void CodeGenerator::ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,
+ HInstruction* instruction,
+ SlowPathCode* slow_path) {
// Ensure that the call kind indication given to the register allocator is
- // coherent with the runtime call generated, and that the GC side effect is
- // set when required.
+ // coherent with the runtime call generated.
if (slow_path == nullptr) {
DCHECK(instruction->GetLocations()->WillCall())
<< "instruction->DebugName()=" << instruction->DebugName();
- DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()))
- << "instruction->DebugName()=" << instruction->DebugName()
- << " instruction->GetSideEffects().ToString()=" << instruction->GetSideEffects().ToString();
} else {
DCHECK(instruction->GetLocations()->CallsOnSlowPath() || slow_path->IsFatal())
<< "instruction->DebugName()=" << instruction->DebugName()
<< " slow_path->GetDescription()=" << slow_path->GetDescription();
- DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) ||
- // When (non-Baker) read barriers are enabled, some instructions
- // use a slow path to emit a read barrier, which does not trigger
- // GC.
- (kEmitCompilerReadBarrier &&
- !kUseBakerReadBarrier &&
- (instruction->IsInstanceFieldGet() ||
- instruction->IsStaticFieldGet() ||
- instruction->IsArrayGet() ||
- instruction->IsLoadClass() ||
- instruction->IsLoadString() ||
- instruction->IsInstanceOf() ||
- instruction->IsCheckCast() ||
- (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified()))))
- << "instruction->DebugName()=" << instruction->DebugName()
- << " instruction->GetSideEffects().ToString()=" << instruction->GetSideEffects().ToString()
- << " slow_path->GetDescription()=" << slow_path->GetDescription();
+ }
+
+ // Check that the GC side effect is set when required.
+ // TODO: Reverse EntrypointCanTriggerGC
+ if (EntrypointCanTriggerGC(entrypoint)) {
+ if (slow_path == nullptr) {
+ DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()))
+ << "instruction->DebugName()=" << instruction->DebugName()
+ << " instruction->GetSideEffects().ToString()="
+ << instruction->GetSideEffects().ToString();
+ } else {
+ DCHECK(instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()) ||
+ // When (non-Baker) read barriers are enabled, some instructions
+ // use a slow path to emit a read barrier, which does not trigger
+ // GC.
+ (kEmitCompilerReadBarrier &&
+ !kUseBakerReadBarrier &&
+ (instruction->IsInstanceFieldGet() ||
+ instruction->IsStaticFieldGet() ||
+ instruction->IsArrayGet() ||
+ instruction->IsLoadClass() ||
+ instruction->IsLoadString() ||
+ instruction->IsInstanceOf() ||
+ instruction->IsCheckCast() ||
+ (instruction->IsInvokeVirtual() && instruction->GetLocations()->Intrinsified()))))
+ << "instruction->DebugName()=" << instruction->DebugName()
+ << " instruction->GetSideEffects().ToString()="
+ << instruction->GetSideEffects().ToString()
+ << " slow_path->GetDescription()=" << slow_path->GetDescription();
+ }
+ } else {
+ // The GC side effect is not required for the instruction. But the instruction might still have
+ // it, for example if it calls other entrypoints requiring it.
}
// Check the coherency of leaf information.
@@ -1252,7 +1282,7 @@ void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* lo
}
const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
- for (size_t i : LowToHighBits(fp_spills)) {
+ for (uint32_t i : LowToHighBits(fp_spills)) {
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
saved_fpu_stack_offsets_[i] = stack_offset;
@@ -1271,7 +1301,7 @@ void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary*
}
const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
- for (size_t i : LowToHighBits(fp_spills)) {
+ for (uint32_t i : LowToHighBits(fp_spills)) {
DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, i);
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 0c60a98139..c0c798d862 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -313,6 +313,8 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
bool CanMoveNullCheckToUser(HNullCheck* null_check);
void MaybeRecordImplicitNullCheck(HInstruction* instruction);
+ LocationSummary* CreateThrowingSlowPathLocations(
+ HInstruction* instruction, RegisterSet caller_saves = RegisterSet::Empty());
void GenerateNullCheck(HNullCheck* null_check);
virtual void GenerateImplicitNullCheck(HNullCheck* null_check) = 0;
virtual void GenerateExplicitNullCheck(HNullCheck* null_check) = 0;
@@ -322,12 +324,6 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
// TODO: Replace with a catch-entering instruction that records the environment.
void RecordCatchBlockInfo();
- // Returns true if implicit null checks are allowed in the compiler options
- // and if the null check is not inside a try block. We currently cannot do
- // implicit null checks in that case because we need the NullCheckSlowPath to
- // save live registers, which may be needed by the runtime to set catch phis.
- bool IsImplicitNullCheckAllowed(HNullCheck* null_check) const;
-
// TODO: Avoid creating the `std::unique_ptr` here.
void AddSlowPath(SlowPathCode* slow_path) {
slow_paths_.push_back(std::unique_ptr<SlowPathCode>(slow_path));
@@ -409,7 +405,9 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
// Perfoms checks pertaining to an InvokeRuntime call.
- void ValidateInvokeRuntime(HInstruction* instruction, SlowPathCode* slow_path);
+ void ValidateInvokeRuntime(QuickEntrypointEnum entrypoint,
+ HInstruction* instruction,
+ SlowPathCode* slow_path);
// Perfoms checks pertaining to an InvokeRuntimeWithoutRecordingPcInfo call.
static void ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction* instruction,
@@ -582,6 +580,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
core_spill_mask_(0),
fpu_spill_mask_(0),
first_register_slot_in_slow_path_(0),
+ allocated_registers_(RegisterSet::Empty()),
blocked_core_registers_(graph->GetArena()->AllocArray<bool>(number_of_core_registers,
kArenaAllocCodeGenerator)),
blocked_fpu_registers_(graph->GetArena()->AllocArray<bool>(number_of_fpu_registers,
@@ -713,6 +712,8 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
bool is_leaf_;
// Whether an instruction in the graph accesses the current method.
+ // TODO: Rename: this actually indicates that some instruction in the method
+ // needs the environment including a valid stack frame.
bool requires_current_method_;
friend class OptimizingCFITest;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 3cc2598f8f..a052873afd 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -63,9 +63,188 @@ static constexpr uint32_t kPackedSwitchCompareJumpThreshold = 7;
#define __ down_cast<ArmAssembler*>(codegen->GetAssembler())-> // NOLINT
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, x).Int32Value()
-class NullCheckSlowPathARM : public SlowPathCode {
+static constexpr int kRegListThreshold = 4;
+
+// SaveLiveRegisters and RestoreLiveRegisters from SlowPathCodeARM operate on sets of S registers,
+// for each live D registers they treat two corresponding S registers as live ones.
+//
+// Two following functions (SaveContiguousSRegisterList, RestoreContiguousSRegisterList) build
+// from a list of contiguous S registers a list of contiguous D registers (processing first/last
+// S registers corner cases) and save/restore this new list treating them as D registers.
+// - decreasing code size
+// - avoiding hazards on Cortex-A57, when a pair of S registers for an actual live D register is
+// restored and then used in regular non SlowPath code as D register.
+//
+// For the following example (v means the S register is live):
+// D names: | D0 | D1 | D2 | D4 | ...
+// S names: | S0 | S1 | S2 | S3 | S4 | S5 | S6 | S7 | ...
+// Live? | | v | v | v | v | v | v | | ...
+//
+// S1 and S6 will be saved/restored independently; D registers list (D1, D2) will be processed
+// as D registers.
+static size_t SaveContiguousSRegisterList(size_t first,
+ size_t last,
+ CodeGenerator* codegen,
+ size_t stack_offset) {
+ DCHECK_LE(first, last);
+ if ((first == last) && (first == 0)) {
+ stack_offset += codegen->SaveFloatingPointRegister(stack_offset, first);
+ return stack_offset;
+ }
+ if (first % 2 == 1) {
+ stack_offset += codegen->SaveFloatingPointRegister(stack_offset, first++);
+ }
+
+ bool save_last = false;
+ if (last % 2 == 0) {
+ save_last = true;
+ --last;
+ }
+
+ if (first < last) {
+ DRegister d_reg = static_cast<DRegister>(first / 2);
+ DCHECK_EQ((last - first + 1) % 2, 0u);
+ size_t number_of_d_regs = (last - first + 1) / 2;
+
+ if (number_of_d_regs == 1) {
+ __ StoreDToOffset(d_reg, SP, stack_offset);
+ } else if (number_of_d_regs > 1) {
+ __ add(IP, SP, ShifterOperand(stack_offset));
+ __ vstmiad(IP, d_reg, number_of_d_regs);
+ }
+ stack_offset += number_of_d_regs * kArmWordSize * 2;
+ }
+
+ if (save_last) {
+ stack_offset += codegen->SaveFloatingPointRegister(stack_offset, last + 1);
+ }
+
+ return stack_offset;
+}
+
+static size_t RestoreContiguousSRegisterList(size_t first,
+ size_t last,
+ CodeGenerator* codegen,
+ size_t stack_offset) {
+ DCHECK_LE(first, last);
+ if ((first == last) && (first == 0)) {
+ stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, first);
+ return stack_offset;
+ }
+ if (first % 2 == 1) {
+ stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, first++);
+ }
+
+ bool restore_last = false;
+ if (last % 2 == 0) {
+ restore_last = true;
+ --last;
+ }
+
+ if (first < last) {
+ DRegister d_reg = static_cast<DRegister>(first / 2);
+ DCHECK_EQ((last - first + 1) % 2, 0u);
+ size_t number_of_d_regs = (last - first + 1) / 2;
+ if (number_of_d_regs == 1) {
+ __ LoadDFromOffset(d_reg, SP, stack_offset);
+ } else if (number_of_d_regs > 1) {
+ __ add(IP, SP, ShifterOperand(stack_offset));
+ __ vldmiad(IP, d_reg, number_of_d_regs);
+ }
+ stack_offset += number_of_d_regs * kArmWordSize * 2;
+ }
+
+ if (restore_last) {
+ stack_offset += codegen->RestoreFloatingPointRegister(stack_offset, last + 1);
+ }
+
+ return stack_offset;
+}
+
+void SlowPathCodeARM::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
+ size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
+ size_t orig_offset = stack_offset;
+
+ const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+ for (uint32_t i : LowToHighBits(core_spills)) {
+ // If the register holds an object, update the stack mask.
+ if (locations->RegisterContainsObject(i)) {
+ locations->SetStackBit(stack_offset / kVRegSize);
+ }
+ DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+ DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
+ saved_core_stack_offsets_[i] = stack_offset;
+ stack_offset += kArmWordSize;
+ }
+
+ int reg_num = POPCOUNT(core_spills);
+ if (reg_num != 0) {
+ if (reg_num > kRegListThreshold) {
+ __ StoreList(RegList(core_spills), orig_offset);
+ } else {
+ stack_offset = orig_offset;
+ for (uint32_t i : LowToHighBits(core_spills)) {
+ stack_offset += codegen->SaveCoreRegister(stack_offset, i);
+ }
+ }
+ }
+
+ uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+ orig_offset = stack_offset;
+ for (uint32_t i : LowToHighBits(fp_spills)) {
+ DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
+ saved_fpu_stack_offsets_[i] = stack_offset;
+ stack_offset += kArmWordSize;
+ }
+
+ stack_offset = orig_offset;
+ while (fp_spills != 0u) {
+ uint32_t begin = CTZ(fp_spills);
+ uint32_t tmp = fp_spills + (1u << begin);
+ fp_spills &= tmp; // Clear the contiguous range of 1s.
+ uint32_t end = (tmp == 0u) ? 32u : CTZ(tmp); // CTZ(0) is undefined.
+ stack_offset = SaveContiguousSRegisterList(begin, end - 1, codegen, stack_offset);
+ }
+ DCHECK_LE(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+}
+
+void SlowPathCodeARM::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
+ size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
+ size_t orig_offset = stack_offset;
+
+ const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+ for (uint32_t i : LowToHighBits(core_spills)) {
+ DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+ DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
+ stack_offset += kArmWordSize;
+ }
+
+ int reg_num = POPCOUNT(core_spills);
+ if (reg_num != 0) {
+ if (reg_num > kRegListThreshold) {
+ __ LoadList(RegList(core_spills), orig_offset);
+ } else {
+ stack_offset = orig_offset;
+ for (uint32_t i : LowToHighBits(core_spills)) {
+ stack_offset += codegen->RestoreCoreRegister(stack_offset, i);
+ }
+ }
+ }
+
+ uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+ while (fp_spills != 0u) {
+ uint32_t begin = CTZ(fp_spills);
+ uint32_t tmp = fp_spills + (1u << begin);
+ fp_spills &= tmp; // Clear the contiguous range of 1s.
+ uint32_t end = (tmp == 0u) ? 32u : CTZ(tmp); // CTZ(0) is undefined.
+ stack_offset = RestoreContiguousSRegisterList(begin, end - 1, codegen, stack_offset);
+ }
+ DCHECK_LE(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+}
+
+class NullCheckSlowPathARM : public SlowPathCodeARM {
public:
- explicit NullCheckSlowPathARM(HNullCheck* instruction) : SlowPathCode(instruction) {}
+ explicit NullCheckSlowPathARM(HNullCheck* instruction) : SlowPathCodeARM(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
@@ -89,17 +268,13 @@ class NullCheckSlowPathARM : public SlowPathCode {
DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM);
};
-class DivZeroCheckSlowPathARM : public SlowPathCode {
+class DivZeroCheckSlowPathARM : public SlowPathCodeARM {
public:
- explicit DivZeroCheckSlowPathARM(HDivZeroCheck* instruction) : SlowPathCode(instruction) {}
+ explicit DivZeroCheckSlowPathARM(HDivZeroCheck* instruction) : SlowPathCodeARM(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
- if (instruction_->CanThrowIntoCatchBlock()) {
- // Live registers will be restored in the catch block if caught.
- SaveLiveRegisters(codegen, instruction_->GetLocations());
- }
arm_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
@@ -112,10 +287,10 @@ class DivZeroCheckSlowPathARM : public SlowPathCode {
DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM);
};
-class SuspendCheckSlowPathARM : public SlowPathCode {
+class SuspendCheckSlowPathARM : public SlowPathCodeARM {
public:
SuspendCheckSlowPathARM(HSuspendCheck* instruction, HBasicBlock* successor)
- : SlowPathCode(instruction), successor_(successor) {}
+ : SlowPathCodeARM(instruction), successor_(successor) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
@@ -150,10 +325,10 @@ class SuspendCheckSlowPathARM : public SlowPathCode {
DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM);
};
-class BoundsCheckSlowPathARM : public SlowPathCode {
+class BoundsCheckSlowPathARM : public SlowPathCodeARM {
public:
explicit BoundsCheckSlowPathARM(HBoundsCheck* instruction)
- : SlowPathCode(instruction) {}
+ : SlowPathCodeARM(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
@@ -190,13 +365,13 @@ class BoundsCheckSlowPathARM : public SlowPathCode {
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM);
};
-class LoadClassSlowPathARM : public SlowPathCode {
+class LoadClassSlowPathARM : public SlowPathCodeARM {
public:
LoadClassSlowPathARM(HLoadClass* cls,
HInstruction* at,
uint32_t dex_pc,
bool do_clinit)
- : SlowPathCode(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ : SlowPathCodeARM(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
@@ -247,10 +422,10 @@ class LoadClassSlowPathARM : public SlowPathCode {
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM);
};
-class TypeCheckSlowPathARM : public SlowPathCode {
+class TypeCheckSlowPathARM : public SlowPathCodeARM {
public:
TypeCheckSlowPathARM(HInstruction* instruction, bool is_fatal)
- : SlowPathCode(instruction), is_fatal_(is_fatal) {}
+ : SlowPathCodeARM(instruction), is_fatal_(is_fatal) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
@@ -307,10 +482,10 @@ class TypeCheckSlowPathARM : public SlowPathCode {
DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM);
};
-class DeoptimizationSlowPathARM : public SlowPathCode {
+class DeoptimizationSlowPathARM : public SlowPathCodeARM {
public:
explicit DeoptimizationSlowPathARM(HDeoptimize* instruction)
- : SlowPathCode(instruction) {}
+ : SlowPathCodeARM(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
@@ -325,9 +500,9 @@ class DeoptimizationSlowPathARM : public SlowPathCode {
DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM);
};
-class ArraySetSlowPathARM : public SlowPathCode {
+class ArraySetSlowPathARM : public SlowPathCodeARM {
public:
- explicit ArraySetSlowPathARM(HInstruction* instruction) : SlowPathCode(instruction) {}
+ explicit ArraySetSlowPathARM(HInstruction* instruction) : SlowPathCodeARM(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
@@ -367,10 +542,10 @@ class ArraySetSlowPathARM : public SlowPathCode {
};
// Slow path marking an object during a read barrier.
-class ReadBarrierMarkSlowPathARM : public SlowPathCode {
+class ReadBarrierMarkSlowPathARM : public SlowPathCodeARM {
public:
ReadBarrierMarkSlowPathARM(HInstruction* instruction, Location obj)
- : SlowPathCode(instruction), obj_(obj) {
+ : SlowPathCodeARM(instruction), obj_(obj) {
DCHECK(kEmitCompilerReadBarrier);
}
@@ -434,7 +609,7 @@ class ReadBarrierMarkSlowPathARM : public SlowPathCode {
};
// Slow path generating a read barrier for a heap reference.
-class ReadBarrierForHeapReferenceSlowPathARM : public SlowPathCode {
+class ReadBarrierForHeapReferenceSlowPathARM : public SlowPathCodeARM {
public:
ReadBarrierForHeapReferenceSlowPathARM(HInstruction* instruction,
Location out,
@@ -442,7 +617,7 @@ class ReadBarrierForHeapReferenceSlowPathARM : public SlowPathCode {
Location obj,
uint32_t offset,
Location index)
- : SlowPathCode(instruction),
+ : SlowPathCodeARM(instruction),
out_(out),
ref_(ref),
obj_(obj),
@@ -614,10 +789,10 @@ class ReadBarrierForHeapReferenceSlowPathARM : public SlowPathCode {
};
// Slow path generating a read barrier for a GC root.
-class ReadBarrierForRootSlowPathARM : public SlowPathCode {
+class ReadBarrierForRootSlowPathARM : public SlowPathCodeARM {
public:
ReadBarrierForRootSlowPathARM(HInstruction* instruction, Location out, Location root)
- : SlowPathCode(instruction), out_(out), root_(root) {
+ : SlowPathCodeARM(instruction), out_(out), root_(root) {
DCHECK(kEmitCompilerReadBarrier);
}
@@ -1177,7 +1352,7 @@ void CodeGeneratorARM::InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- ValidateInvokeRuntime(instruction, slow_path);
+ ValidateInvokeRuntime(entrypoint, instruction, slow_path);
GenerateInvokeRuntime(GetThreadOffset<kArmPointerSize>(entrypoint).Int32Value());
if (EntrypointRequiresStackMap(entrypoint)) {
RecordPcInfo(instruction, dex_pc, slow_path);
@@ -1502,14 +1677,14 @@ void InstructionCodeGeneratorARM::VisitIf(HIf* if_instr) {
void LocationsBuilderARM::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
}
void InstructionCodeGeneratorARM::VisitDeoptimize(HDeoptimize* deoptimize) {
- SlowPathCode* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathARM>(deoptimize);
+ SlowPathCodeARM* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathARM>(deoptimize);
GenerateTestAndBranch(deoptimize,
/* condition_input_index */ 0,
slow_path->GetEntryLabel(),
@@ -3085,18 +3260,12 @@ void InstructionCodeGeneratorARM::VisitRem(HRem* rem) {
}
void LocationsBuilderARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void InstructionCodeGeneratorARM::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM(instruction);
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM(instruction);
codegen_->AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -3931,7 +4100,7 @@ void LocationsBuilderARM::HandleFieldGet(HInstruction* instruction, const FieldI
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::RequiresRegister());
@@ -4251,14 +4420,8 @@ void InstructionCodeGeneratorARM::VisitUnresolvedStaticFieldSet(
}
void LocationsBuilderARM::VisitNullCheck(HNullCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
locations->SetInAt(0, Location::RequiresRegister());
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void CodeGeneratorARM::GenerateImplicitNullCheck(HNullCheck* instruction) {
@@ -4272,7 +4435,7 @@ void CodeGeneratorARM::GenerateImplicitNullCheck(HNullCheck* instruction) {
}
void CodeGeneratorARM::GenerateExplicitNullCheck(HNullCheck* instruction) {
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM(instruction);
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM(instruction);
AddSlowPath(slow_path);
LocationSummary* locations = instruction->GetLocations();
@@ -4410,7 +4573,7 @@ void LocationsBuilderARM::VisitArrayGet(HArrayGet* instruction) {
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
@@ -4690,7 +4853,7 @@ void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
Label done;
- SlowPathCode* slow_path = nullptr;
+ SlowPathCodeARM* slow_path = nullptr;
if (may_need_runtime_call_for_type_check) {
slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathARM(instruction);
@@ -4895,20 +5058,18 @@ void InstructionCodeGeneratorARM::VisitIntermediateAddress(HIntermediateAddress*
}
void LocationsBuilderARM::VisitBoundsCheck(HBoundsCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ RegisterSet caller_saves = RegisterSet::Empty();
+ InvokeRuntimeCallingConvention calling_convention;
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void InstructionCodeGeneratorARM::VisitBoundsCheck(HBoundsCheck* instruction) {
LocationSummary* locations = instruction->GetLocations();
- SlowPathCode* slow_path =
+ SlowPathCodeARM* slow_path =
new (GetGraph()->GetArena()) BoundsCheckSlowPathARM(instruction);
codegen_->AddSlowPath(slow_path);
@@ -4947,7 +5108,7 @@ void InstructionCodeGeneratorARM::VisitParallelMove(HParallelMove* instruction)
void LocationsBuilderARM::VisitSuspendCheck(HSuspendCheck* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
void InstructionCodeGeneratorARM::VisitSuspendCheck(HSuspendCheck* instruction) {
@@ -5269,7 +5430,7 @@ void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
@@ -5377,7 +5538,7 @@ void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
@@ -5402,7 +5563,7 @@ void LocationsBuilderARM::VisitClinitCheck(HClinitCheck* check) {
void InstructionCodeGeneratorARM::VisitClinitCheck(HClinitCheck* check) {
// We assume the class is not null.
- SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
check->GetLoadClass(), check, check->GetDexPc(), true);
codegen_->AddSlowPath(slow_path);
GenerateClassInitializationCheck(slow_path,
@@ -5410,7 +5571,7 @@ void InstructionCodeGeneratorARM::VisitClinitCheck(HClinitCheck* check) {
}
void InstructionCodeGeneratorARM::GenerateClassInitializationCheck(
- SlowPathCode* slow_path, Register class_reg) {
+ SlowPathCodeARM* slow_path, Register class_reg) {
__ LoadFromOffset(kLoadWord, IP, class_reg, mirror::Class::StatusOffset().Int32Value());
__ cmp(IP, ShifterOperand(mirror::Class::kStatusInitialized));
__ b(slow_path->GetEntryLabel(), LT);
@@ -5573,7 +5734,7 @@ void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
if (baker_read_barrier_slow_path) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -5603,7 +5764,7 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
Label done, zero;
- SlowPathCode* slow_path = nullptr;
+ SlowPathCodeARM* slow_path = nullptr;
// Return 0 if `obj` is null.
// avoid null check if we know obj is not null.
@@ -5795,7 +5956,7 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
!instruction->CanThrowIntoCatchBlock();
- SlowPathCode* type_check_slow_path =
+ SlowPathCodeARM* type_check_slow_path =
new (GetGraph()->GetArena()) TypeCheckSlowPathARM(instruction,
is_type_check_slow_path_fatal);
codegen_->AddSlowPath(type_check_slow_path);
@@ -6289,7 +6450,7 @@ void InstructionCodeGeneratorARM::GenerateGcRootFieldLoad(HInstruction* instruct
"have different sizes.");
// Slow path marking the GC root `root`.
- SlowPathCode* slow_path =
+ SlowPathCodeARM* slow_path =
new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARM(instruction, root);
codegen_->AddSlowPath(slow_path);
@@ -6430,7 +6591,7 @@ void CodeGeneratorARM::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* i
__ MaybeUnpoisonHeapReference(ref_reg);
// Slow path marking the object `ref` when it is gray.
- SlowPathCode* slow_path =
+ SlowPathCodeARM* slow_path =
new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARM(instruction, ref);
AddSlowPath(slow_path);
@@ -6466,7 +6627,7 @@ void CodeGeneratorARM::GenerateReadBarrierSlow(HInstruction* instruction,
// not used by the artReadBarrierSlow entry point.
//
// TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
- SlowPathCode* slow_path = new (GetGraph()->GetArena())
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena())
ReadBarrierForHeapReferenceSlowPathARM(instruction, out, ref, obj, offset, index);
AddSlowPath(slow_path);
@@ -6501,7 +6662,7 @@ void CodeGeneratorARM::GenerateReadBarrierForRootSlow(HInstruction* instruction,
//
// Note that GC roots are not affected by heap poisoning, so we do
// not need to do anything special for this here.
- SlowPathCode* slow_path =
+ SlowPathCodeARM* slow_path =
new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathARM(instruction, out, root);
AddSlowPath(slow_path);
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index ce9d7e6056..424a1a1455 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -50,6 +50,18 @@ static constexpr SRegister kRuntimeParameterFpuRegisters[] = { S0, S1, S2, S3 };
static constexpr size_t kRuntimeParameterFpuRegistersLength =
arraysize(kRuntimeParameterFpuRegisters);
+class SlowPathCodeARM : public SlowPathCode {
+ public:
+ explicit SlowPathCodeARM(HInstruction* instruction) : SlowPathCode(instruction) {}
+
+ void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) FINAL;
+ void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) FINAL;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARM);
+};
+
+
class InvokeRuntimeCallingConvention : public CallingConvention<Register, SRegister> {
public:
InvokeRuntimeCallingConvention()
@@ -216,7 +228,7 @@ class InstructionCodeGeneratorARM : public InstructionCodeGenerator {
// is the block to branch to if the suspend check is not needed, and after
// the suspend call.
void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
- void GenerateClassInitializationCheck(SlowPathCode* slow_path, Register class_reg);
+ void GenerateClassInitializationCheck(SlowPathCodeARM* slow_path, Register class_reg);
void GenerateAndConst(Register out, Register first, uint32_t value);
void GenerateOrrConst(Register out, Register first, uint32_t value);
void GenerateEorConst(Register out, Register first, uint32_t value);
@@ -556,10 +568,10 @@ class CodeGeneratorARM : public CodeGenerator {
// artReadBarrierForRootSlow.
void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
- void GenerateNop();
+ void GenerateNop() OVERRIDE;
- void GenerateImplicitNullCheck(HNullCheck* instruction);
- void GenerateExplicitNullCheck(HNullCheck* instruction);
+ void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+ void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
private:
Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, Register temp);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 179bf76f5b..a29e9f3e80 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -139,18 +139,18 @@ Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type retur
// Calculate memory accessing operand for save/restore live registers.
static void SaveRestoreLiveRegistersHelper(CodeGenerator* codegen,
- RegisterSet* register_set,
+ LocationSummary* locations,
int64_t spill_offset,
bool is_save) {
- DCHECK(ArtVixlRegCodeCoherentForRegSet(register_set->GetCoreRegisters(),
+ const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+ const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+ DCHECK(ArtVixlRegCodeCoherentForRegSet(core_spills,
codegen->GetNumberOfCoreRegisters(),
- register_set->GetFloatingPointRegisters(),
+ fp_spills,
codegen->GetNumberOfFloatingPointRegisters()));
- CPURegList core_list = CPURegList(CPURegister::kRegister, kXRegSize,
- register_set->GetCoreRegisters() & (~callee_saved_core_registers.GetList()));
- CPURegList fp_list = CPURegList(CPURegister::kFPRegister, kDRegSize,
- register_set->GetFloatingPointRegisters() & (~callee_saved_fp_registers.GetList()));
+ CPURegList core_list = CPURegList(CPURegister::kRegister, kXRegSize, core_spills);
+ CPURegList fp_list = CPURegList(CPURegister::kFPRegister, kDRegSize, fp_spills);
MacroAssembler* masm = down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler();
UseScratchRegisterScope temps(masm);
@@ -184,38 +184,35 @@ static void SaveRestoreLiveRegistersHelper(CodeGenerator* codegen,
}
void SlowPathCodeARM64::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
- RegisterSet* register_set = locations->GetLiveRegisters();
size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
- for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
- if (!codegen->IsCoreCalleeSaveRegister(i) && register_set->ContainsCoreRegister(i)) {
- // If the register holds an object, update the stack mask.
- if (locations->RegisterContainsObject(i)) {
- locations->SetStackBit(stack_offset / kVRegSize);
- }
- DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
- DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
- saved_core_stack_offsets_[i] = stack_offset;
- stack_offset += kXRegSizeInBytes;
+ const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true);
+ for (uint32_t i : LowToHighBits(core_spills)) {
+ // If the register holds an object, update the stack mask.
+ if (locations->RegisterContainsObject(i)) {
+ locations->SetStackBit(stack_offset / kVRegSize);
}
+ DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+ DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
+ saved_core_stack_offsets_[i] = stack_offset;
+ stack_offset += kXRegSizeInBytes;
}
- for (size_t i = 0, e = codegen->GetNumberOfFloatingPointRegisters(); i < e; ++i) {
- if (!codegen->IsFloatingPointCalleeSaveRegister(i) &&
- register_set->ContainsFloatingPointRegister(i)) {
- DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
- DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
- saved_fpu_stack_offsets_[i] = stack_offset;
- stack_offset += kDRegSizeInBytes;
- }
+ const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false);
+ for (uint32_t i : LowToHighBits(fp_spills)) {
+ DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+ DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
+ saved_fpu_stack_offsets_[i] = stack_offset;
+ stack_offset += kDRegSizeInBytes;
}
- SaveRestoreLiveRegistersHelper(codegen, register_set,
+ SaveRestoreLiveRegistersHelper(codegen,
+ locations,
codegen->GetFirstRegisterSlotInSlowPath(), true /* is_save */);
}
void SlowPathCodeARM64::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
- RegisterSet* register_set = locations->GetLiveRegisters();
- SaveRestoreLiveRegistersHelper(codegen, register_set,
+ SaveRestoreLiveRegistersHelper(codegen,
+ locations,
codegen->GetFirstRegisterSlotInSlowPath(), false /* is_save */);
}
@@ -261,10 +258,6 @@ class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
- if (instruction_->CanThrowIntoCatchBlock()) {
- // Live registers will be restored in the catch block if caught.
- SaveLiveRegisters(codegen, instruction_->GetLocations());
- }
arm64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
@@ -448,7 +441,7 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
}
const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARM64"; }
- bool IsFatal() const { return is_fatal_; }
+ bool IsFatal() const OVERRIDE { return is_fatal_; }
private:
const bool is_fatal_;
@@ -1452,7 +1445,7 @@ void CodeGeneratorARM64::InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- ValidateInvokeRuntime(instruction, slow_path);
+ ValidateInvokeRuntime(entrypoint, instruction, slow_path);
GenerateInvokeRuntime(GetThreadOffset<kArm64PointerSize>(entrypoint).Int32Value());
if (EntrypointRequiresStackMap(entrypoint)) {
RecordPcInfo(instruction, dex_pc, slow_path);
@@ -1608,7 +1601,7 @@ void LocationsBuilderARM64::HandleFieldGet(HInstruction* instruction) {
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::RequiresRegister());
if (Primitive::IsFloatingPointType(instruction->GetType())) {
@@ -2036,7 +2029,7 @@ void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) {
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
@@ -2306,15 +2299,13 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
}
void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ RegisterSet caller_saves = RegisterSet::Empty();
+ InvokeRuntimeCallingConvention calling_convention;
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode()));
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1).GetCode()));
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction));
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
@@ -2685,14 +2676,8 @@ void InstructionCodeGeneratorARM64::VisitDiv(HDiv* div) {
}
void LocationsBuilderARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
@@ -2924,7 +2909,7 @@ void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) {
void LocationsBuilderARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -3077,7 +3062,7 @@ void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
if (baker_read_barrier_slow_path) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
@@ -3944,7 +3929,7 @@ void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
@@ -4384,14 +4369,8 @@ void InstructionCodeGeneratorARM64::VisitBooleanNot(HBooleanNot* instruction) {
}
void LocationsBuilderARM64::VisitNullCheck(HNullCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
locations->SetInAt(0, Location::RequiresRegister());
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void CodeGeneratorARM64::GenerateImplicitNullCheck(HNullCheck* instruction) {
@@ -4677,7 +4656,7 @@ void InstructionCodeGeneratorARM64::VisitUnresolvedStaticFieldSet(
void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index f0d79106dc..f1dc7eecb5 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -644,10 +644,10 @@ class CodeGeneratorARM64 : public CodeGenerator {
// artReadBarrierForRootSlow.
void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
- void GenerateNop();
+ void GenerateNop() OVERRIDE;
- void GenerateImplicitNullCheck(HNullCheck* instruction);
- void GenerateExplicitNullCheck(HNullCheck* instruction);
+ void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+ void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
private:
using Uint64ToLiteralMap = ArenaSafeMap<uint64_t, vixl::aarch64::Literal<uint64_t>*>;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index f07f8a0d91..2211ea3846 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -194,10 +194,6 @@ class DivZeroCheckSlowPathMIPS : public SlowPathCodeMIPS {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
- if (instruction_->CanThrowIntoCatchBlock()) {
- // Live registers will be restored in the catch block if caught.
- SaveLiveRegisters(codegen, instruction_->GetLocations());
- }
mips_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
@@ -905,7 +901,7 @@ void CodeGeneratorMIPS::MoveConstant(Location destination, HConstant* c) {
} else {
DCHECK(destination.IsStackSlot())
<< "Cannot move " << c->DebugName() << " to " << destination;
- __ StoreConst32ToOffset(value, SP, destination.GetStackIndex(), TMP);
+ __ StoreConstToOffset(kStoreWord, value, SP, destination.GetStackIndex(), TMP);
}
} else if (c->IsLongConstant()) {
// Move 64 bit constant.
@@ -917,7 +913,7 @@ void CodeGeneratorMIPS::MoveConstant(Location destination, HConstant* c) {
} else {
DCHECK(destination.IsDoubleStackSlot())
<< "Cannot move " << c->DebugName() << " to " << destination;
- __ StoreConst64ToOffset(value, SP, destination.GetStackIndex(), TMP);
+ __ StoreConstToOffset(kStoreDoubleword, value, SP, destination.GetStackIndex(), TMP);
}
} else if (c->IsFloatConstant()) {
// Move 32 bit float constant.
@@ -927,7 +923,7 @@ void CodeGeneratorMIPS::MoveConstant(Location destination, HConstant* c) {
} else {
DCHECK(destination.IsStackSlot())
<< "Cannot move " << c->DebugName() << " to " << destination;
- __ StoreConst32ToOffset(value, SP, destination.GetStackIndex(), TMP);
+ __ StoreConstToOffset(kStoreWord, value, SP, destination.GetStackIndex(), TMP);
}
} else {
// Move 64 bit double constant.
@@ -939,7 +935,7 @@ void CodeGeneratorMIPS::MoveConstant(Location destination, HConstant* c) {
} else {
DCHECK(destination.IsDoubleStackSlot())
<< "Cannot move " << c->DebugName() << " to " << destination;
- __ StoreConst64ToOffset(value, SP, destination.GetStackIndex(), TMP);
+ __ StoreConstToOffset(kStoreDoubleword, value, SP, destination.GetStackIndex(), TMP);
}
}
}
@@ -1224,7 +1220,7 @@ void CodeGeneratorMIPS::InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- ValidateInvokeRuntime(instruction, slow_path);
+ ValidateInvokeRuntime(entrypoint, instruction, slow_path);
bool reordering = __ SetReorder(false);
__ LoadFromOffset(kLoadWord, T9, TR, GetThreadOffset<kMipsPointerSize>(entrypoint).Int32Value());
__ Jalr(T9);
@@ -1960,6 +1956,25 @@ void InstructionCodeGeneratorMIPS::VisitArrayLength(HArrayLength* instruction) {
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
+Location LocationsBuilderMIPS::RegisterOrZeroConstant(HInstruction* instruction) {
+ return (instruction->IsConstant() && instruction->AsConstant()->IsZeroBitPattern())
+ ? Location::ConstantLocation(instruction->AsConstant())
+ : Location::RequiresRegister();
+}
+
+Location LocationsBuilderMIPS::FpuRegisterOrConstantForStore(HInstruction* instruction) {
+ // We can store 0.0 directly (from the ZERO register) without loading it into an FPU register.
+ // We can store a non-zero float or double constant without first loading it into the FPU,
+ // but we should only prefer this if the constant has a single use.
+ if (instruction->IsConstant() &&
+ (instruction->AsConstant()->IsZeroBitPattern() ||
+ instruction->GetUses().HasExactlyOneElement())) {
+ return Location::ConstantLocation(instruction->AsConstant());
+ // Otherwise fall through and require an FPU register for the constant.
+ }
+ return Location::RequiresFpuRegister();
+}
+
void LocationsBuilderMIPS::VisitArraySet(HArraySet* instruction) {
bool needs_runtime_call = instruction->NeedsTypeCheck();
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
@@ -1974,9 +1989,9 @@ void LocationsBuilderMIPS::VisitArraySet(HArraySet* instruction) {
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
- locations->SetInAt(2, Location::RequiresFpuRegister());
+ locations->SetInAt(2, FpuRegisterOrConstantForStore(instruction->InputAt(2)));
} else {
- locations->SetInAt(2, Location::RequiresRegister());
+ locations->SetInAt(2, RegisterOrZeroConstant(instruction->InputAt(2)));
}
}
}
@@ -1985,24 +2000,29 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
LocationSummary* locations = instruction->GetLocations();
Register obj = locations->InAt(0).AsRegister<Register>();
Location index = locations->InAt(1);
+ Location value_location = locations->InAt(2);
Primitive::Type value_type = instruction->GetComponentType();
bool needs_runtime_call = locations->WillCall();
bool needs_write_barrier =
CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
auto null_checker = GetImplicitNullChecker(instruction);
+ Register base_reg = index.IsConstant() ? obj : TMP;
switch (value_type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
- Register value = locations->InAt(2).AsRegister<Register>();
if (index.IsConstant()) {
- size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
- __ StoreToOffset(kStoreByte, value, obj, offset, null_checker);
+ data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1;
} else {
- __ Addu(TMP, obj, index.AsRegister<Register>());
- __ StoreToOffset(kStoreByte, value, TMP, data_offset, null_checker);
+ __ Addu(base_reg, obj, index.AsRegister<Register>());
+ }
+ if (value_location.IsConstant()) {
+ int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
+ __ StoreConstToOffset(kStoreByte, value, base_reg, data_offset, TMP, null_checker);
+ } else {
+ Register value = value_location.AsRegister<Register>();
+ __ StoreToOffset(kStoreByte, value, base_reg, data_offset, null_checker);
}
break;
}
@@ -2010,15 +2030,18 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
- Register value = locations->InAt(2).AsRegister<Register>();
if (index.IsConstant()) {
- size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
- __ StoreToOffset(kStoreHalfword, value, obj, offset, null_checker);
+ data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2;
} else {
- __ Sll(TMP, index.AsRegister<Register>(), TIMES_2);
- __ Addu(TMP, obj, TMP);
- __ StoreToOffset(kStoreHalfword, value, TMP, data_offset, null_checker);
+ __ Sll(base_reg, index.AsRegister<Register>(), TIMES_2);
+ __ Addu(base_reg, obj, base_reg);
+ }
+ if (value_location.IsConstant()) {
+ int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
+ __ StoreConstToOffset(kStoreHalfword, value, base_reg, data_offset, TMP, null_checker);
+ } else {
+ Register value = value_location.AsRegister<Register>();
+ __ StoreToOffset(kStoreHalfword, value, base_reg, data_offset, null_checker);
}
break;
}
@@ -2027,20 +2050,23 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimNot: {
if (!needs_runtime_call) {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- Register value = locations->InAt(2).AsRegister<Register>();
if (index.IsConstant()) {
- size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- __ StoreToOffset(kStoreWord, value, obj, offset, null_checker);
+ data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
} else {
- DCHECK(index.IsRegister()) << index;
- __ Sll(TMP, index.AsRegister<Register>(), TIMES_4);
- __ Addu(TMP, obj, TMP);
- __ StoreToOffset(kStoreWord, value, TMP, data_offset, null_checker);
+ __ Sll(base_reg, index.AsRegister<Register>(), TIMES_4);
+ __ Addu(base_reg, obj, base_reg);
}
- if (needs_write_barrier) {
- DCHECK_EQ(value_type, Primitive::kPrimNot);
- codegen_->MarkGCCard(obj, value);
+ if (value_location.IsConstant()) {
+ int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
+ __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
+ DCHECK(!needs_write_barrier);
+ } else {
+ Register value = value_location.AsRegister<Register>();
+ __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
+ if (needs_write_barrier) {
+ DCHECK_EQ(value_type, Primitive::kPrimNot);
+ codegen_->MarkGCCard(obj, value);
+ }
}
} else {
DCHECK_EQ(value_type, Primitive::kPrimNot);
@@ -2052,47 +2078,54 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimLong: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
- Register value = locations->InAt(2).AsRegisterPairLow<Register>();
if (index.IsConstant()) {
- size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- __ StoreToOffset(kStoreDoubleword, value, obj, offset, null_checker);
+ data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8;
} else {
- __ Sll(TMP, index.AsRegister<Register>(), TIMES_8);
- __ Addu(TMP, obj, TMP);
- __ StoreToOffset(kStoreDoubleword, value, TMP, data_offset, null_checker);
+ __ Sll(base_reg, index.AsRegister<Register>(), TIMES_8);
+ __ Addu(base_reg, obj, base_reg);
+ }
+ if (value_location.IsConstant()) {
+ int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
+ __ StoreConstToOffset(kStoreDoubleword, value, base_reg, data_offset, TMP, null_checker);
+ } else {
+ Register value = value_location.AsRegisterPairLow<Register>();
+ __ StoreToOffset(kStoreDoubleword, value, base_reg, data_offset, null_checker);
}
break;
}
case Primitive::kPrimFloat: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
- FRegister value = locations->InAt(2).AsFpuRegister<FRegister>();
- DCHECK(locations->InAt(2).IsFpuRegister());
if (index.IsConstant()) {
- size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- __ StoreSToOffset(value, obj, offset, null_checker);
+ data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
} else {
- __ Sll(TMP, index.AsRegister<Register>(), TIMES_4);
- __ Addu(TMP, obj, TMP);
- __ StoreSToOffset(value, TMP, data_offset, null_checker);
+ __ Sll(base_reg, index.AsRegister<Register>(), TIMES_4);
+ __ Addu(base_reg, obj, base_reg);
+ }
+ if (value_location.IsConstant()) {
+ int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
+ __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
+ } else {
+ FRegister value = value_location.AsFpuRegister<FRegister>();
+ __ StoreSToOffset(value, base_reg, data_offset, null_checker);
}
break;
}
case Primitive::kPrimDouble: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
- FRegister value = locations->InAt(2).AsFpuRegister<FRegister>();
- DCHECK(locations->InAt(2).IsFpuRegister());
if (index.IsConstant()) {
- size_t offset =
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- __ StoreDToOffset(value, obj, offset, null_checker);
+ data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8;
} else {
- __ Sll(TMP, index.AsRegister<Register>(), TIMES_8);
- __ Addu(TMP, obj, TMP);
- __ StoreDToOffset(value, TMP, data_offset, null_checker);
+ __ Sll(base_reg, index.AsRegister<Register>(), TIMES_8);
+ __ Addu(base_reg, obj, base_reg);
+ }
+ if (value_location.IsConstant()) {
+ int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
+ __ StoreConstToOffset(kStoreDoubleword, value, base_reg, data_offset, TMP, null_checker);
+ } else {
+ FRegister value = value_location.AsFpuRegister<FRegister>();
+ __ StoreDToOffset(value, base_reg, data_offset, null_checker);
}
break;
}
@@ -2104,15 +2137,13 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
}
void LocationsBuilderMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ RegisterSet caller_saves = RegisterSet::Empty();
+ InvokeRuntimeCallingConvention calling_convention;
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void InstructionCodeGeneratorMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
@@ -2627,14 +2658,8 @@ void InstructionCodeGeneratorMIPS::VisitDiv(HDiv* instruction) {
}
void LocationsBuilderMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void InstructionCodeGeneratorMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
@@ -3688,7 +3713,7 @@ void InstructionCodeGeneratorMIPS::VisitIf(HIf* if_instr) {
void LocationsBuilderMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -3888,9 +3913,9 @@ void LocationsBuilderMIPS::HandleFieldSet(HInstruction* instruction, const Field
}
} else {
if (Primitive::IsFloatingPointType(field_type)) {
- locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetInAt(1, FpuRegisterOrConstantForStore(instruction->InputAt(1)));
} else {
- locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, RegisterOrZeroConstant(instruction->InputAt(1)));
}
}
}
@@ -3901,6 +3926,7 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
Primitive::Type type = field_info.GetFieldType();
LocationSummary* locations = instruction->GetLocations();
Register obj = locations->InAt(0).AsRegister<Register>();
+ Location value_location = locations->InAt(1);
StoreOperandType store_type = kStoreByte;
bool is_volatile = field_info.IsVolatile();
uint32_t offset = field_info.GetFieldOffset().Uint32Value();
@@ -3941,24 +3967,24 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
if (type == Primitive::kPrimDouble) {
// Pass FP parameters in core registers.
- Location in = locations->InAt(1);
- if (in.IsFpuRegister()) {
- __ Mfc1(locations->GetTemp(1).AsRegister<Register>(), in.AsFpuRegister<FRegister>());
+ if (value_location.IsFpuRegister()) {
+ __ Mfc1(locations->GetTemp(1).AsRegister<Register>(),
+ value_location.AsFpuRegister<FRegister>());
__ MoveFromFpuHigh(locations->GetTemp(2).AsRegister<Register>(),
- in.AsFpuRegister<FRegister>());
- } else if (in.IsDoubleStackSlot()) {
+ value_location.AsFpuRegister<FRegister>());
+ } else if (value_location.IsDoubleStackSlot()) {
__ LoadFromOffset(kLoadWord,
locations->GetTemp(1).AsRegister<Register>(),
SP,
- in.GetStackIndex());
+ value_location.GetStackIndex());
__ LoadFromOffset(kLoadWord,
locations->GetTemp(2).AsRegister<Register>(),
SP,
- in.GetStackIndex() + 4);
+ value_location.GetStackIndex() + 4);
} else {
- DCHECK(in.IsConstant());
- DCHECK(in.GetConstant()->IsDoubleConstant());
- int64_t value = bit_cast<int64_t, double>(in.GetConstant()->AsDoubleConstant()->GetValue());
+ DCHECK(value_location.IsConstant());
+ DCHECK(value_location.GetConstant()->IsDoubleConstant());
+ int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
__ LoadConst64(locations->GetTemp(2).AsRegister<Register>(),
locations->GetTemp(1).AsRegister<Register>(),
value);
@@ -3967,19 +3993,19 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
codegen_->InvokeRuntime(kQuickA64Store, instruction, dex_pc);
CheckEntrypointTypes<kQuickA64Store, void, volatile int64_t *, int64_t>();
} else {
- if (!Primitive::IsFloatingPointType(type)) {
+ if (value_location.IsConstant()) {
+ int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
+ __ StoreConstToOffset(store_type, value, obj, offset, TMP, null_checker);
+ } else if (!Primitive::IsFloatingPointType(type)) {
Register src;
if (type == Primitive::kPrimLong) {
- DCHECK(locations->InAt(1).IsRegisterPair());
- src = locations->InAt(1).AsRegisterPairLow<Register>();
+ src = value_location.AsRegisterPairLow<Register>();
} else {
- DCHECK(locations->InAt(1).IsRegister());
- src = locations->InAt(1).AsRegister<Register>();
+ src = value_location.AsRegister<Register>();
}
__ StoreToOffset(store_type, src, obj, offset, null_checker);
} else {
- DCHECK(locations->InAt(1).IsFpuRegister());
- FRegister src = locations->InAt(1).AsFpuRegister<FRegister>();
+ FRegister src = value_location.AsFpuRegister<FRegister>();
if (type == Primitive::kPrimFloat) {
__ StoreSToOffset(src, obj, offset, null_checker);
} else {
@@ -3990,8 +4016,7 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
// TODO: memory barriers?
if (CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1))) {
- DCHECK(locations->InAt(1).IsRegister());
- Register src = locations->InAt(1).AsRegister<Register>();
+ Register src = value_location.AsRegister<Register>();
codegen_->MarkGCCard(obj, src);
}
@@ -5075,14 +5100,8 @@ void InstructionCodeGeneratorMIPS::VisitBooleanNot(HBooleanNot* instruction) {
}
void LocationsBuilderMIPS::VisitNullCheck(HNullCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
locations->SetInAt(0, Location::RequiresRegister());
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void CodeGeneratorMIPS::GenerateImplicitNullCheck(HNullCheck* instruction) {
@@ -5376,7 +5395,7 @@ void InstructionCodeGeneratorMIPS::VisitUnresolvedStaticFieldSet(
void LocationsBuilderMIPS::VisitSuspendCheck(HSuspendCheck* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
void InstructionCodeGeneratorMIPS::VisitSuspendCheck(HSuspendCheck* instruction) {
@@ -5824,13 +5843,11 @@ void LocationsBuilderMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) {
locations->SetInAt(0, Location::RequiresRegister());
}
-void InstructionCodeGeneratorMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) {
- int32_t lower_bound = switch_instr->GetStartValue();
- int32_t num_entries = switch_instr->GetNumEntries();
- LocationSummary* locations = switch_instr->GetLocations();
- Register value_reg = locations->InAt(0).AsRegister<Register>();
- HBasicBlock* default_block = switch_instr->GetDefaultBlock();
-
+void InstructionCodeGeneratorMIPS::GenPackedSwitchWithCompares(Register value_reg,
+ int32_t lower_bound,
+ uint32_t num_entries,
+ HBasicBlock* switch_block,
+ HBasicBlock* default_block) {
// Create a set of compare/jumps.
Register temp_reg = TMP;
__ Addiu32(temp_reg, value_reg, -lower_bound);
@@ -5839,7 +5856,7 @@ void InstructionCodeGeneratorMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr
// this case, index >= num_entries must be true. So that we can save one branch instruction.
__ Bltz(temp_reg, codegen_->GetLabelOf(default_block));
- const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
+ const ArenaVector<HBasicBlock*>& successors = switch_block->GetSuccessors();
// Jump to successors[0] if value == lower_bound.
__ Beqz(temp_reg, codegen_->GetLabelOf(successors[0]));
int32_t last_index = 0;
@@ -5857,11 +5874,107 @@ void InstructionCodeGeneratorMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr
}
// And the default for any other value.
- if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
+ if (!codegen_->GoesToNextBlock(switch_block, default_block)) {
__ B(codegen_->GetLabelOf(default_block));
}
}
+void InstructionCodeGeneratorMIPS::GenTableBasedPackedSwitch(Register value_reg,
+ Register constant_area,
+ int32_t lower_bound,
+ uint32_t num_entries,
+ HBasicBlock* switch_block,
+ HBasicBlock* default_block) {
+ // Create a jump table.
+ std::vector<MipsLabel*> labels(num_entries);
+ const ArenaVector<HBasicBlock*>& successors = switch_block->GetSuccessors();
+ for (uint32_t i = 0; i < num_entries; i++) {
+ labels[i] = codegen_->GetLabelOf(successors[i]);
+ }
+ JumpTable* table = __ CreateJumpTable(std::move(labels));
+
+ // Is the value in range?
+ __ Addiu32(TMP, value_reg, -lower_bound);
+ if (IsInt<16>(static_cast<int32_t>(num_entries))) {
+ __ Sltiu(AT, TMP, num_entries);
+ __ Beqz(AT, codegen_->GetLabelOf(default_block));
+ } else {
+ __ LoadConst32(AT, num_entries);
+ __ Bgeu(TMP, AT, codegen_->GetLabelOf(default_block));
+ }
+
+ // We are in the range of the table.
+ // Load the target address from the jump table, indexing by the value.
+ __ LoadLabelAddress(AT, constant_area, table->GetLabel());
+ __ Sll(TMP, TMP, 2);
+ __ Addu(TMP, TMP, AT);
+ __ Lw(TMP, TMP, 0);
+ // Compute the absolute target address by adding the table start address
+ // (the table contains offsets to targets relative to its start).
+ __ Addu(TMP, TMP, AT);
+ // And jump.
+ __ Jr(TMP);
+ __ NopIfNoReordering();
+}
+
+void InstructionCodeGeneratorMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) {
+ int32_t lower_bound = switch_instr->GetStartValue();
+ uint32_t num_entries = switch_instr->GetNumEntries();
+ LocationSummary* locations = switch_instr->GetLocations();
+ Register value_reg = locations->InAt(0).AsRegister<Register>();
+ HBasicBlock* switch_block = switch_instr->GetBlock();
+ HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+
+ if (codegen_->GetInstructionSetFeatures().IsR6() &&
+ num_entries > kPackedSwitchJumpTableThreshold) {
+ // R6 uses PC-relative addressing to access the jump table.
+ // R2, OTOH, requires an HMipsComputeBaseMethodAddress input to access
+ // the jump table and it is implemented by changing HPackedSwitch to
+ // HMipsPackedSwitch, which bears HMipsComputeBaseMethodAddress.
+ // See VisitMipsPackedSwitch() for the table-based implementation on R2.
+ GenTableBasedPackedSwitch(value_reg,
+ ZERO,
+ lower_bound,
+ num_entries,
+ switch_block,
+ default_block);
+ } else {
+ GenPackedSwitchWithCompares(value_reg,
+ lower_bound,
+ num_entries,
+ switch_block,
+ default_block);
+ }
+}
+
+void LocationsBuilderMIPS::VisitMipsPackedSwitch(HMipsPackedSwitch* switch_instr) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+ // Constant area pointer (HMipsComputeBaseMethodAddress).
+ locations->SetInAt(1, Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorMIPS::VisitMipsPackedSwitch(HMipsPackedSwitch* switch_instr) {
+ int32_t lower_bound = switch_instr->GetStartValue();
+ uint32_t num_entries = switch_instr->GetNumEntries();
+ LocationSummary* locations = switch_instr->GetLocations();
+ Register value_reg = locations->InAt(0).AsRegister<Register>();
+ Register constant_area = locations->InAt(1).AsRegister<Register>();
+ HBasicBlock* switch_block = switch_instr->GetBlock();
+ HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+
+ // This is an R2-only path. HPackedSwitch has been changed to
+ // HMipsPackedSwitch, which bears HMipsComputeBaseMethodAddress
+ // required to address the jump table relative to PC.
+ GenTableBasedPackedSwitch(value_reg,
+ constant_area,
+ lower_bound,
+ num_entries,
+ switch_block,
+ default_block);
+}
+
void LocationsBuilderMIPS::VisitMipsComputeBaseMethodAddress(
HMipsComputeBaseMethodAddress* insn) {
LocationSummary* locations =
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 003998129e..553a7e6674 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -191,6 +191,8 @@ class LocationsBuilderMIPS : public HGraphVisitor {
void HandleShift(HBinaryOperation* operation);
void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
+ Location RegisterOrZeroConstant(HInstruction* instruction);
+ Location FpuRegisterOrConstantForStore(HInstruction* instruction);
InvokeDexCallingConventionVisitorMIPS parameter_visitor_;
@@ -218,6 +220,14 @@ class InstructionCodeGeneratorMIPS : public InstructionCodeGenerator {
MipsAssembler* GetAssembler() const { return assembler_; }
+ // Compare-and-jump packed switch generates approx. 3 + 2.5 * N 32-bit
+ // instructions for N cases.
+ // Table-based packed switch generates approx. 11 32-bit instructions
+ // and N 32-bit data words for N cases.
+ // At N = 6 they come out as 18 and 17 32-bit words respectively.
+ // We switch to the table-based method starting with 7 cases.
+ static constexpr uint32_t kPackedSwitchJumpTableThreshold = 6;
+
private:
void GenerateClassInitializationCheck(SlowPathCodeMIPS* slow_path, Register class_reg);
void GenerateMemoryBarrier(MemBarrierKind kind);
@@ -262,6 +272,17 @@ class InstructionCodeGeneratorMIPS : public InstructionCodeGenerator {
void GenerateDivRemIntegral(HBinaryOperation* instruction);
void HandleGoto(HInstruction* got, HBasicBlock* successor);
auto GetImplicitNullChecker(HInstruction* instruction);
+ void GenPackedSwitchWithCompares(Register value_reg,
+ int32_t lower_bound,
+ uint32_t num_entries,
+ HBasicBlock* switch_block,
+ HBasicBlock* default_block);
+ void GenTableBasedPackedSwitch(Register value_reg,
+ Register constant_area,
+ int32_t lower_bound,
+ uint32_t num_entries,
+ HBasicBlock* switch_block,
+ HBasicBlock* default_block);
MipsAssembler* const assembler_;
CodeGeneratorMIPS* const codegen_;
@@ -310,10 +331,10 @@ class CodeGeneratorMIPS : public CodeGenerator {
void SetupBlockedRegisters() const OVERRIDE;
- size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id);
- size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id);
- size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id);
- size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id);
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
void ClobberRA() {
clobbered_ra_ = true;
}
@@ -344,7 +365,7 @@ class CodeGeneratorMIPS : public CodeGenerator {
void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
- void MoveConstant(Location destination, int32_t value);
+ void MoveConstant(Location destination, int32_t value) OVERRIDE;
void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
@@ -356,7 +377,7 @@ class CodeGeneratorMIPS : public CodeGenerator {
ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
- bool NeedsTwoRegisters(Primitive::Type type) const {
+ bool NeedsTwoRegisters(Primitive::Type type) const OVERRIDE {
return type == Primitive::kPrimLong;
}
@@ -384,9 +405,9 @@ class CodeGeneratorMIPS : public CodeGenerator {
UNIMPLEMENTED(FATAL) << "Not implemented on MIPS";
}
- void GenerateNop();
- void GenerateImplicitNullCheck(HNullCheck* instruction);
- void GenerateExplicitNullCheck(HNullCheck* instruction);
+ void GenerateNop() OVERRIDE;
+ void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+ void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
// The PcRelativePatchInfo is used for PC-relative addressing of dex cache arrays
// and boot image strings. The only difference is the interpretation of the offset_or_index.
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 664d498b18..5039fad708 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -150,10 +150,6 @@ class DivZeroCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
- if (instruction_->CanThrowIntoCatchBlock()) {
- // Live registers will be restored in the catch block if caught.
- SaveLiveRegisters(codegen, instruction_->GetLocations());
- }
mips64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
@@ -946,7 +942,7 @@ void CodeGeneratorMIPS64::InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- ValidateInvokeRuntime(instruction, slow_path);
+ ValidateInvokeRuntime(entrypoint, instruction, slow_path);
// TODO: anything related to T9/GP/GOT/PIC/.so's?
__ LoadFromOffset(kLoadDoubleword,
T9,
@@ -1558,15 +1554,13 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
}
void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ RegisterSet caller_saves = RegisterSet::Empty();
+ InvokeRuntimeCallingConvention calling_convention;
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
@@ -2110,14 +2104,8 @@ void InstructionCodeGeneratorMIPS64::VisitDiv(HDiv* instruction) {
}
void LocationsBuilderMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
@@ -2630,7 +2618,7 @@ void InstructionCodeGeneratorMIPS64::VisitIf(HIf* if_instr) {
void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
@@ -3461,14 +3449,8 @@ void InstructionCodeGeneratorMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
}
void LocationsBuilderMIPS64::VisitNullCheck(HNullCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
locations->SetInAt(0, Location::RequiresRegister());
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void CodeGeneratorMIPS64::GenerateImplicitNullCheck(HNullCheck* instruction) {
@@ -3748,7 +3730,7 @@ void InstructionCodeGeneratorMIPS64::VisitUnresolvedStaticFieldSet(
void LocationsBuilderMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
void InstructionCodeGeneratorMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 3910530eb5..2dd409a224 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -285,10 +285,10 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
void SetupBlockedRegisters() const OVERRIDE;
- size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id);
- size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id);
- size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id);
- size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id);
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
@@ -327,7 +327,7 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
- bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const { return false; }
+ bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE { return false; }
// Check if the desired_string_load_kind is supported. If it is, return it,
// otherwise return a fall-back kind that should be used instead.
@@ -353,9 +353,9 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
UNIMPLEMENTED(FATAL) << "Not implemented on MIPS64";
}
- void GenerateNop();
- void GenerateImplicitNullCheck(HNullCheck* instruction);
- void GenerateExplicitNullCheck(HNullCheck* instruction);
+ void GenerateNop() OVERRIDE;
+ void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+ void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
private:
// Labels for each block that will be compiled.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index e18b366411..cc9fe832f1 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -84,10 +84,6 @@ class DivZeroCheckSlowPathX86 : public SlowPathCode {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
- if (instruction_->CanThrowIntoCatchBlock()) {
- // Live registers will be restored in the catch block if caught.
- SaveLiveRegisters(codegen, instruction_->GetLocations());
- }
x86_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
@@ -754,7 +750,7 @@ void CodeGeneratorX86::InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- ValidateInvokeRuntime(instruction, slow_path);
+ ValidateInvokeRuntime(entrypoint, instruction, slow_path);
GenerateInvokeRuntime(GetThreadOffset<kX86PointerSize>(entrypoint).Int32Value());
if (EntrypointRequiresStackMap(entrypoint)) {
RecordPcInfo(instruction, dex_pc, slow_path);
@@ -1069,15 +1065,11 @@ void CodeGeneratorX86::Move64(Location destination, Location source) {
__ movsd(Address(ESP, destination.GetStackIndex()), source.AsFpuRegister<XmmRegister>());
} else if (source.IsConstant()) {
HConstant* constant = source.GetConstant();
- int64_t value;
- if (constant->IsLongConstant()) {
- value = constant->AsLongConstant()->GetValue();
- } else {
- DCHECK(constant->IsDoubleConstant());
- value = bit_cast<int64_t, double>(constant->AsDoubleConstant()->GetValue());
- }
+ DCHECK(constant->IsLongConstant() || constant->IsDoubleConstant());
+ int64_t value = GetInt64ValueOf(constant);
__ movl(Address(ESP, destination.GetStackIndex()), Immediate(Low32Bits(value)));
- __ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)), Immediate(High32Bits(value)));
+ __ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)),
+ Immediate(High32Bits(value)));
} else {
DCHECK(source.IsDoubleStackSlot()) << source;
EmitParallelMoves(
@@ -1427,14 +1419,7 @@ void InstructionCodeGeneratorX86::GenerateTestAndBranch(HInstruction* instructio
Location lhs = condition->GetLocations()->InAt(0);
Location rhs = condition->GetLocations()->InAt(1);
// LHS is guaranteed to be in a register (see LocationsBuilderX86::HandleCondition).
- if (rhs.IsRegister()) {
- __ cmpl(lhs.AsRegister<Register>(), rhs.AsRegister<Register>());
- } else if (rhs.IsConstant()) {
- int32_t constant = CodeGenerator::GetInt32ValueOf(rhs.GetConstant());
- codegen_->Compare32BitValue(lhs.AsRegister<Register>(), constant);
- } else {
- __ cmpl(lhs.AsRegister<Register>(), Address(ESP, rhs.GetStackIndex()));
- }
+ codegen_->GenerateIntCompare(lhs, rhs);
if (true_target == nullptr) {
__ j(X86Condition(condition->GetOppositeCondition()), false_target);
} else {
@@ -1469,7 +1454,7 @@ void InstructionCodeGeneratorX86::VisitIf(HIf* if_instr) {
void LocationsBuilderX86::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
locations->SetInAt(0, Location::Any());
}
@@ -1528,18 +1513,6 @@ void LocationsBuilderX86::VisitSelect(HSelect* select) {
locations->SetOut(Location::SameAsFirstInput());
}
-void CodeGeneratorX86::GenerateIntCompare(Location lhs, Location rhs) {
- Register lhs_reg = lhs.AsRegister<Register>();
- if (rhs.IsConstant()) {
- int32_t value = CodeGenerator::GetInt32ValueOf(rhs.GetConstant());
- Compare32BitValue(lhs_reg, value);
- } else if (rhs.IsStackSlot()) {
- assembler_.cmpl(lhs_reg, Address(ESP, rhs.GetStackIndex()));
- } else {
- assembler_.cmpl(lhs_reg, rhs.AsRegister<Register>());
- }
-}
-
void InstructionCodeGeneratorX86::VisitSelect(HSelect* select) {
LocationSummary* locations = select->GetLocations();
DCHECK(locations->InAt(0).Equals(locations->Out()));
@@ -3571,10 +3544,7 @@ void InstructionCodeGeneratorX86::VisitRem(HRem* rem) {
}
void LocationsBuilderX86::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
switch (instruction->GetType()) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte:
@@ -3594,9 +3564,6 @@ void LocationsBuilderX86::VisitDivZeroCheck(HDivZeroCheck* instruction) {
default:
LOG(FATAL) << "Unexpected type for HDivZeroCheck " << instruction->GetType();
}
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void InstructionCodeGeneratorX86::VisitDivZeroCheck(HDivZeroCheck* instruction) {
@@ -3621,7 +3588,7 @@ void InstructionCodeGeneratorX86::VisitDivZeroCheck(HDivZeroCheck* instruction)
} else {
DCHECK(value.IsConstant()) << value;
if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
- __ jmp(slow_path->GetEntryLabel());
+ __ jmp(slow_path->GetEntryLabel());
}
}
break;
@@ -4540,7 +4507,7 @@ void LocationsBuilderX86::HandleFieldGet(HInstruction* instruction, const FieldI
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::RequiresRegister());
@@ -4950,17 +4917,11 @@ void InstructionCodeGeneratorX86::VisitUnresolvedStaticFieldSet(
}
void LocationsBuilderX86::VisitNullCheck(HNullCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
- Location loc = codegen_->IsImplicitNullCheckAllowed(instruction)
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
+ Location loc = codegen_->GetCompilerOptions().GetImplicitNullChecks()
? Location::RequiresRegister()
: Location::Any();
locations->SetInAt(0, loc);
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void CodeGeneratorX86::GenerateImplicitNullCheck(HNullCheck* instruction) {
@@ -5007,7 +4968,7 @@ void LocationsBuilderX86::VisitArrayGet(HArrayGet* instruction) {
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
@@ -5039,56 +5000,31 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
switch (type) {
case Primitive::kPrimBoolean: {
Register out = out_loc.AsRegister<Register>();
- if (index.IsConstant()) {
- __ movzxb(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
- } else {
- __ movzxb(out, Address(obj, index.AsRegister<Register>(), TIMES_1, data_offset));
- }
+ __ movzxb(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_1, data_offset));
break;
}
case Primitive::kPrimByte: {
Register out = out_loc.AsRegister<Register>();
- if (index.IsConstant()) {
- __ movsxb(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
- } else {
- __ movsxb(out, Address(obj, index.AsRegister<Register>(), TIMES_1, data_offset));
- }
+ __ movsxb(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_1, data_offset));
break;
}
case Primitive::kPrimShort: {
Register out = out_loc.AsRegister<Register>();
- if (index.IsConstant()) {
- __ movsxw(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
- } else {
- __ movsxw(out, Address(obj, index.AsRegister<Register>(), TIMES_2, data_offset));
- }
+ __ movsxw(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_2, data_offset));
break;
}
case Primitive::kPrimChar: {
Register out = out_loc.AsRegister<Register>();
- if (index.IsConstant()) {
- __ movzxw(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
- } else {
- __ movzxw(out, Address(obj, index.AsRegister<Register>(), TIMES_2, data_offset));
- }
+ __ movzxw(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_2, data_offset));
break;
}
case Primitive::kPrimInt: {
Register out = out_loc.AsRegister<Register>();
- if (index.IsConstant()) {
- __ movl(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset));
- } else {
- __ movl(out, Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset));
- }
+ __ movl(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_4, data_offset));
break;
}
@@ -5105,21 +5041,16 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
instruction, out_loc, obj, data_offset, index, /* needs_null_check */ true);
} else {
Register out = out_loc.AsRegister<Register>();
+ __ movl(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_4, data_offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ // If read barriers are enabled, emit read barriers other than
+ // Baker's using a slow path (and also unpoison the loaded
+ // reference, if heap poisoning is enabled).
if (index.IsConstant()) {
uint32_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- __ movl(out, Address(obj, offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- // If read barriers are enabled, emit read barriers other than
- // Baker's using a slow path (and also unpoison the loaded
- // reference, if heap poisoning is enabled).
codegen_->MaybeGenerateReadBarrierSlow(instruction, out_loc, out_loc, obj_loc, offset);
} else {
- __ movl(out, Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- // If read barriers are enabled, emit read barriers other than
- // Baker's using a slow path (and also unpoison the loaded
- // reference, if heap poisoning is enabled).
codegen_->MaybeGenerateReadBarrierSlow(
instruction, out_loc, out_loc, obj_loc, data_offset, index);
}
@@ -5129,40 +5060,23 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimLong: {
DCHECK_NE(obj, out_loc.AsRegisterPairLow<Register>());
- if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- __ movl(out_loc.AsRegisterPairLow<Register>(), Address(obj, offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- __ movl(out_loc.AsRegisterPairHigh<Register>(), Address(obj, offset + kX86WordSize));
- } else {
- __ movl(out_loc.AsRegisterPairLow<Register>(),
- Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- __ movl(out_loc.AsRegisterPairHigh<Register>(),
- Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize));
- }
+ __ movl(out_loc.AsRegisterPairLow<Register>(),
+ CodeGeneratorX86::ArrayAddress(obj, index, TIMES_8, data_offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ movl(out_loc.AsRegisterPairHigh<Register>(),
+ CodeGeneratorX86::ArrayAddress(obj, index, TIMES_8, data_offset + kX86WordSize));
break;
}
case Primitive::kPrimFloat: {
XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
- if (index.IsConstant()) {
- __ movss(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset));
- } else {
- __ movss(out, Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset));
- }
+ __ movss(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_4, data_offset));
break;
}
case Primitive::kPrimDouble: {
XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
- if (index.IsConstant()) {
- __ movsd(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset));
- } else {
- __ movsd(out, Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset));
- }
+ __ movsd(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_8, data_offset));
break;
}
@@ -5233,9 +5147,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte: {
uint32_t offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
- Address address = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + offset)
- : Address(array, index.AsRegister<Register>(), TIMES_1, offset);
+ Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_1, offset);
if (value.IsRegister()) {
__ movb(address, value.AsRegister<ByteRegister>());
} else {
@@ -5248,9 +5160,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
uint32_t offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
- Address address = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + offset)
- : Address(array, index.AsRegister<Register>(), TIMES_2, offset);
+ Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_2, offset);
if (value.IsRegister()) {
__ movw(address, value.AsRegister<Register>());
} else {
@@ -5262,9 +5172,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimNot: {
uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- Address address = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
- : Address(array, index.AsRegister<Register>(), TIMES_4, offset);
+ Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_4, offset);
if (!value.IsRegister()) {
// Just setting null.
@@ -5360,9 +5268,7 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimInt: {
uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- Address address = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
- : Address(array, index.AsRegister<Register>(), TIMES_4, offset);
+ Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_4, offset);
if (value.IsRegister()) {
__ movl(address, value.AsRegister<Register>());
} else {
@@ -5376,44 +5282,27 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimLong: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
- if (index.IsConstant()) {
- size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- if (value.IsRegisterPair()) {
- __ movl(Address(array, offset), value.AsRegisterPairLow<Register>());
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- __ movl(Address(array, offset + kX86WordSize), value.AsRegisterPairHigh<Register>());
- } else {
- DCHECK(value.IsConstant());
- int64_t val = value.GetConstant()->AsLongConstant()->GetValue();
- __ movl(Address(array, offset), Immediate(Low32Bits(val)));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- __ movl(Address(array, offset + kX86WordSize), Immediate(High32Bits(val)));
- }
+ if (value.IsRegisterPair()) {
+ __ movl(CodeGeneratorX86::ArrayAddress(array, index, TIMES_8, data_offset),
+ value.AsRegisterPairLow<Register>());
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ movl(CodeGeneratorX86::ArrayAddress(array, index, TIMES_8, data_offset + kX86WordSize),
+ value.AsRegisterPairHigh<Register>());
} else {
- if (value.IsRegisterPair()) {
- __ movl(Address(array, index.AsRegister<Register>(), TIMES_8, data_offset),
- value.AsRegisterPairLow<Register>());
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- __ movl(Address(array, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize),
- value.AsRegisterPairHigh<Register>());
- } else {
- DCHECK(value.IsConstant());
- int64_t val = value.GetConstant()->AsLongConstant()->GetValue();
- __ movl(Address(array, index.AsRegister<Register>(), TIMES_8, data_offset),
- Immediate(Low32Bits(val)));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- __ movl(Address(array, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize),
- Immediate(High32Bits(val)));
- }
+ DCHECK(value.IsConstant());
+ int64_t val = value.GetConstant()->AsLongConstant()->GetValue();
+ __ movl(CodeGeneratorX86::ArrayAddress(array, index, TIMES_8, data_offset),
+ Immediate(Low32Bits(val)));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ __ movl(CodeGeneratorX86::ArrayAddress(array, index, TIMES_8, data_offset + kX86WordSize),
+ Immediate(High32Bits(val)));
}
break;
}
case Primitive::kPrimFloat: {
uint32_t offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
- Address address = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
- : Address(array, index.AsRegister<Register>(), TIMES_4, offset);
+ Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_4, offset);
if (value.IsFpuRegister()) {
__ movss(address, value.AsFpuRegister<XmmRegister>());
} else {
@@ -5427,17 +5316,13 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimDouble: {
uint32_t offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
- Address address = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + offset)
- : Address(array, index.AsRegister<Register>(), TIMES_8, offset);
+ Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_8, offset);
if (value.IsFpuRegister()) {
__ movsd(address, value.AsFpuRegister<XmmRegister>());
} else {
DCHECK(value.IsConstant());
- Address address_hi = index.IsConstant() ?
- Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) +
- offset + kX86WordSize) :
- Address(array, index.AsRegister<Register>(), TIMES_8, offset + kX86WordSize);
+ Address address_hi =
+ CodeGeneratorX86::ArrayAddress(array, index, TIMES_8, offset + kX86WordSize);
int64_t v = bit_cast<int64_t, double>(value.GetConstant()->AsDoubleConstant()->GetValue());
__ movl(address, Immediate(Low32Bits(v)));
codegen_->MaybeRecordImplicitNullCheck(instruction);
@@ -5474,18 +5359,16 @@ void InstructionCodeGeneratorX86::VisitArrayLength(HArrayLength* instruction) {
}
void LocationsBuilderX86::VisitBoundsCheck(HBoundsCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ RegisterSet caller_saves = RegisterSet::Empty();
+ InvokeRuntimeCallingConvention calling_convention;
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
HInstruction* length = instruction->InputAt(1);
if (!length->IsEmittedAtUseSite()) {
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
}
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void InstructionCodeGeneratorX86::VisitBoundsCheck(HBoundsCheck* instruction) {
@@ -5531,13 +5414,7 @@ void InstructionCodeGeneratorX86::VisitBoundsCheck(HBoundsCheck* instruction) {
}
codegen_->MaybeRecordImplicitNullCheck(array_length);
} else {
- Register length = length_loc.AsRegister<Register>();
- if (index_loc.IsConstant()) {
- int32_t value = CodeGenerator::GetInt32ValueOf(index_loc.GetConstant());
- __ cmpl(length, Immediate(value));
- } else {
- __ cmpl(length, index_loc.AsRegister<Register>());
- }
+ codegen_->GenerateIntCompare(length_loc, index_loc);
}
codegen_->AddSlowPath(slow_path);
__ j(kBelowEqual, slow_path->GetEntryLabel());
@@ -5555,7 +5432,7 @@ void InstructionCodeGeneratorX86::VisitParallelMove(HParallelMove* instruction)
void LocationsBuilderX86::VisitSuspendCheck(HSuspendCheck* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
void InstructionCodeGeneratorX86::VisitSuspendCheck(HSuspendCheck* instruction) {
@@ -5913,7 +5790,7 @@ void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) {
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
@@ -6210,7 +6087,7 @@ void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
if (baker_read_barrier_slow_path) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::Any());
@@ -6915,9 +6792,7 @@ void CodeGeneratorX86::GenerateArrayLoadWithBakerReadBarrier(HInstruction* instr
"art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
// /* HeapReference<Object> */ ref =
// *(obj + data_offset + index * sizeof(HeapReference<Object>))
- Address src = index.IsConstant() ?
- Address(obj, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset) :
- Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset);
+ Address src = CodeGeneratorX86::ArrayAddress(obj, index, TIMES_4, data_offset);
GenerateReferenceLoadWithBakerReadBarrier(instruction, ref, obj, src, needs_null_check);
}
@@ -7398,6 +7273,27 @@ void CodeGeneratorX86::Compare32BitValue(Register dest, int32_t value) {
}
}
+void CodeGeneratorX86::GenerateIntCompare(Location lhs, Location rhs) {
+ Register lhs_reg = lhs.AsRegister<Register>();
+ if (rhs.IsConstant()) {
+ int32_t value = CodeGenerator::GetInt32ValueOf(rhs.GetConstant());
+ Compare32BitValue(lhs_reg, value);
+ } else if (rhs.IsStackSlot()) {
+ __ cmpl(lhs_reg, Address(ESP, rhs.GetStackIndex()));
+ } else {
+ __ cmpl(lhs_reg, rhs.AsRegister<Register>());
+ }
+}
+
+Address CodeGeneratorX86::ArrayAddress(Register obj,
+ Location index,
+ ScaleFactor scale,
+ uint32_t data_offset) {
+ return index.IsConstant() ?
+ Address(obj, (index.GetConstant()->AsIntConstant()->GetValue() << scale) + data_offset) :
+ Address(obj, index.AsRegister<Register>(), scale, data_offset);
+}
+
Address CodeGeneratorX86::LiteralCaseTable(HX86PackedSwitch* switch_instr,
Register reg,
Register value) {
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index e2250981bb..5866e65d88 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -427,8 +427,6 @@ class CodeGeneratorX86 : public CodeGenerator {
Register value,
bool value_can_be_null);
- void GenerateIntCompare(Location lhs, Location rhs);
-
void GenerateMemoryBarrier(MemBarrierKind kind);
Label* GetLabelOf(HBasicBlock* block) const {
@@ -474,6 +472,15 @@ class CodeGeneratorX86 : public CodeGenerator {
// Compare a register with a 32-bit value in the most efficient manner.
void Compare32BitValue(Register dest, int32_t value);
+ // Compare int values. Supports only register locations for `lhs`.
+ void GenerateIntCompare(Location lhs, Location rhs);
+
+ // Construct address for array access.
+ static Address ArrayAddress(Register obj,
+ Location index,
+ ScaleFactor scale,
+ uint32_t data_offset);
+
Address LiteralCaseTable(HX86PackedSwitch* switch_instr, Register reg, Register value);
void Finalize(CodeAllocator* allocator) OVERRIDE;
@@ -561,9 +568,9 @@ class CodeGeneratorX86 : public CodeGenerator {
}
}
- void GenerateNop();
- void GenerateImplicitNullCheck(HNullCheck* instruction);
- void GenerateExplicitNullCheck(HNullCheck* instruction);
+ void GenerateNop() OVERRIDE;
+ void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+ void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
// When we don't know the proper offset for the value, we use kDummy32BitOffset.
// The correct value will be inserted when processing Assembler fixups.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 15307fe50c..1d87bf6198 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -88,10 +88,6 @@ class DivZeroCheckSlowPathX86_64 : public SlowPathCode {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
- if (instruction_->CanThrowIntoCatchBlock()) {
- // Live registers will be restored in the catch block if caught.
- SaveLiveRegisters(codegen, instruction_->GetLocations());
- }
x86_64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
}
@@ -981,7 +977,7 @@ void CodeGeneratorX86_64::InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
SlowPathCode* slow_path) {
- ValidateInvokeRuntime(instruction, slow_path);
+ ValidateInvokeRuntime(entrypoint, instruction, slow_path);
GenerateInvokeRuntime(GetThreadOffset<kX86_64PointerSize>(entrypoint).Int32Value());
if (EntrypointRequiresStackMap(entrypoint)) {
RecordPcInfo(instruction, dex_pc, slow_path);
@@ -1204,13 +1200,8 @@ void CodeGeneratorX86_64::Move(Location destination, Location source) {
source.AsFpuRegister<XmmRegister>());
} else if (source.IsConstant()) {
HConstant* constant = source.GetConstant();
- int64_t value;
- if (constant->IsDoubleConstant()) {
- value = bit_cast<int64_t, double>(constant->AsDoubleConstant()->GetValue());
- } else {
- DCHECK(constant->IsLongConstant());
- value = constant->AsLongConstant()->GetValue();
- }
+ DCHECK(constant->IsLongConstant() || constant->IsDoubleConstant());
+ int64_t value = GetInt64ValueOf(constant);
Store64BitValueToStack(destination, value);
} else {
DCHECK(source.IsDoubleStackSlot());
@@ -1309,31 +1300,11 @@ void InstructionCodeGeneratorX86_64::GenerateCompareTest(HCondition* condition)
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- CpuRegister left_reg = left.AsRegister<CpuRegister>();
- if (right.IsConstant()) {
- int32_t value = CodeGenerator::GetInt32ValueOf(right.GetConstant());
- if (value == 0) {
- __ testl(left_reg, left_reg);
- } else {
- __ cmpl(left_reg, Immediate(value));
- }
- } else if (right.IsStackSlot()) {
- __ cmpl(left_reg, Address(CpuRegister(RSP), right.GetStackIndex()));
- } else {
- __ cmpl(left_reg, right.AsRegister<CpuRegister>());
- }
+ codegen_->GenerateIntCompare(left, right);
break;
}
case Primitive::kPrimLong: {
- CpuRegister left_reg = left.AsRegister<CpuRegister>();
- if (right.IsConstant()) {
- int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
- codegen_->Compare64BitValue(left_reg, value);
- } else if (right.IsDoubleStackSlot()) {
- __ cmpq(left_reg, Address(CpuRegister(RSP), right.GetStackIndex()));
- } else {
- __ cmpq(left_reg, right.AsRegister<CpuRegister>());
- }
+ codegen_->GenerateLongCompare(left, right);
break;
}
case Primitive::kPrimFloat: {
@@ -1488,15 +1459,7 @@ void InstructionCodeGeneratorX86_64::GenerateTestAndBranch(HInstruction* instruc
Location lhs = condition->GetLocations()->InAt(0);
Location rhs = condition->GetLocations()->InAt(1);
- if (rhs.IsRegister()) {
- __ cmpl(lhs.AsRegister<CpuRegister>(), rhs.AsRegister<CpuRegister>());
- } else if (rhs.IsConstant()) {
- int32_t constant = CodeGenerator::GetInt32ValueOf(rhs.GetConstant());
- codegen_->Compare32BitValue(lhs.AsRegister<CpuRegister>(), constant);
- } else {
- __ cmpl(lhs.AsRegister<CpuRegister>(),
- Address(CpuRegister(RSP), rhs.GetStackIndex()));
- }
+ codegen_->GenerateIntCompare(lhs, rhs);
if (true_target == nullptr) {
__ j(X86_64IntegerCondition(condition->GetOppositeCondition()), false_target);
} else {
@@ -1531,7 +1494,7 @@ void InstructionCodeGeneratorX86_64::VisitIf(HIf* if_instr) {
void LocationsBuilderX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
locations->SetInAt(0, Location::Any());
}
@@ -1696,28 +1659,14 @@ void InstructionCodeGeneratorX86_64::HandleCondition(HCondition* cond) {
// Clear output register: setcc only sets the low byte.
__ xorl(reg, reg);
- if (rhs.IsRegister()) {
- __ cmpl(lhs.AsRegister<CpuRegister>(), rhs.AsRegister<CpuRegister>());
- } else if (rhs.IsConstant()) {
- int32_t constant = CodeGenerator::GetInt32ValueOf(rhs.GetConstant());
- codegen_->Compare32BitValue(lhs.AsRegister<CpuRegister>(), constant);
- } else {
- __ cmpl(lhs.AsRegister<CpuRegister>(), Address(CpuRegister(RSP), rhs.GetStackIndex()));
- }
+ codegen_->GenerateIntCompare(lhs, rhs);
__ setcc(X86_64IntegerCondition(cond->GetCondition()), reg);
return;
case Primitive::kPrimLong:
// Clear output register: setcc only sets the low byte.
__ xorl(reg, reg);
- if (rhs.IsRegister()) {
- __ cmpq(lhs.AsRegister<CpuRegister>(), rhs.AsRegister<CpuRegister>());
- } else if (rhs.IsConstant()) {
- int64_t value = rhs.GetConstant()->AsLongConstant()->GetValue();
- codegen_->Compare64BitValue(lhs.AsRegister<CpuRegister>(), value);
- } else {
- __ cmpq(lhs.AsRegister<CpuRegister>(), Address(CpuRegister(RSP), rhs.GetStackIndex()));
- }
+ codegen_->GenerateLongCompare(lhs, rhs);
__ setcc(X86_64IntegerCondition(cond->GetCondition()), reg);
return;
case Primitive::kPrimFloat: {
@@ -1885,27 +1834,11 @@ void InstructionCodeGeneratorX86_64::VisitCompare(HCompare* compare) {
case Primitive::kPrimShort:
case Primitive::kPrimChar:
case Primitive::kPrimInt: {
- CpuRegister left_reg = left.AsRegister<CpuRegister>();
- if (right.IsConstant()) {
- int32_t value = right.GetConstant()->AsIntConstant()->GetValue();
- codegen_->Compare32BitValue(left_reg, value);
- } else if (right.IsStackSlot()) {
- __ cmpl(left_reg, Address(CpuRegister(RSP), right.GetStackIndex()));
- } else {
- __ cmpl(left_reg, right.AsRegister<CpuRegister>());
- }
+ codegen_->GenerateIntCompare(left, right);
break;
}
case Primitive::kPrimLong: {
- CpuRegister left_reg = left.AsRegister<CpuRegister>();
- if (right.IsConstant()) {
- int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
- codegen_->Compare64BitValue(left_reg, value);
- } else if (right.IsDoubleStackSlot()) {
- __ cmpq(left_reg, Address(CpuRegister(RSP), right.GetStackIndex()));
- } else {
- __ cmpq(left_reg, right.AsRegister<CpuRegister>());
- }
+ codegen_->GenerateLongCompare(left, right);
break;
}
case Primitive::kPrimFloat: {
@@ -3681,14 +3614,8 @@ void InstructionCodeGeneratorX86_64::VisitRem(HRem* rem) {
}
void LocationsBuilderX86_64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
locations->SetInAt(0, Location::Any());
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
@@ -3714,7 +3641,7 @@ void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instructio
} else {
DCHECK(value.IsConstant()) << value;
if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
- __ jmp(slow_path->GetEntryLabel());
+ __ jmp(slow_path->GetEntryLabel());
}
}
break;
@@ -3729,7 +3656,7 @@ void InstructionCodeGeneratorX86_64::VisitDivZeroCheck(HDivZeroCheck* instructio
} else {
DCHECK(value.IsConstant()) << value;
if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
- __ jmp(slow_path->GetEntryLabel());
+ __ jmp(slow_path->GetEntryLabel());
}
}
break;
@@ -4084,7 +4011,7 @@ void LocationsBuilderX86_64::HandleFieldGet(HInstruction* instruction) {
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::RequiresRegister());
if (Primitive::IsFloatingPointType(instruction->GetType())) {
@@ -4459,17 +4386,11 @@ void InstructionCodeGeneratorX86_64::VisitUnresolvedStaticFieldSet(
}
void LocationsBuilderX86_64::VisitNullCheck(HNullCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
- Location loc = codegen_->IsImplicitNullCheckAllowed(instruction)
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
+ Location loc = codegen_->GetCompilerOptions().GetImplicitNullChecks()
? Location::RequiresRegister()
: Location::Any();
locations->SetInAt(0, loc);
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void CodeGeneratorX86_64::GenerateImplicitNullCheck(HNullCheck* instruction) {
@@ -4516,7 +4437,7 @@ void LocationsBuilderX86_64::VisitArrayGet(HArrayGet* instruction) {
LocationSummary::kCallOnSlowPath :
LocationSummary::kNoCall);
if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
@@ -4544,56 +4465,31 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
switch (type) {
case Primitive::kPrimBoolean: {
CpuRegister out = out_loc.AsRegister<CpuRegister>();
- if (index.IsConstant()) {
- __ movzxb(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
- } else {
- __ movzxb(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_1, data_offset));
- }
+ __ movzxb(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_1, data_offset));
break;
}
case Primitive::kPrimByte: {
CpuRegister out = out_loc.AsRegister<CpuRegister>();
- if (index.IsConstant()) {
- __ movsxb(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
- } else {
- __ movsxb(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_1, data_offset));
- }
+ __ movsxb(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_1, data_offset));
break;
}
case Primitive::kPrimShort: {
CpuRegister out = out_loc.AsRegister<CpuRegister>();
- if (index.IsConstant()) {
- __ movsxw(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
- } else {
- __ movsxw(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_2, data_offset));
- }
+ __ movsxw(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_2, data_offset));
break;
}
case Primitive::kPrimChar: {
CpuRegister out = out_loc.AsRegister<CpuRegister>();
- if (index.IsConstant()) {
- __ movzxw(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
- } else {
- __ movzxw(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_2, data_offset));
- }
+ __ movzxw(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_2, data_offset));
break;
}
case Primitive::kPrimInt: {
CpuRegister out = out_loc.AsRegister<CpuRegister>();
- if (index.IsConstant()) {
- __ movl(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset));
- } else {
- __ movl(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset));
- }
+ __ movl(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_4, data_offset));
break;
}
@@ -4610,21 +4506,16 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
instruction, out_loc, obj, data_offset, index, /* needs_null_check */ true);
} else {
CpuRegister out = out_loc.AsRegister<CpuRegister>();
+ __ movl(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_4, data_offset));
+ codegen_->MaybeRecordImplicitNullCheck(instruction);
+ // If read barriers are enabled, emit read barriers other than
+ // Baker's using a slow path (and also unpoison the loaded
+ // reference, if heap poisoning is enabled).
if (index.IsConstant()) {
uint32_t offset =
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
- __ movl(out, Address(obj, offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- // If read barriers are enabled, emit read barriers other than
- // Baker's using a slow path (and also unpoison the loaded
- // reference, if heap poisoning is enabled).
codegen_->MaybeGenerateReadBarrierSlow(instruction, out_loc, out_loc, obj_loc, offset);
} else {
- __ movl(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset));
- codegen_->MaybeRecordImplicitNullCheck(instruction);
- // If read barriers are enabled, emit read barriers other than
- // Baker's using a slow path (and also unpoison the loaded
- // reference, if heap poisoning is enabled).
codegen_->MaybeGenerateReadBarrierSlow(
instruction, out_loc, out_loc, obj_loc, data_offset, index);
}
@@ -4634,34 +4525,19 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
case Primitive::kPrimLong: {
CpuRegister out = out_loc.AsRegister<CpuRegister>();
- if (index.IsConstant()) {
- __ movq(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset));
- } else {
- __ movq(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset));
- }
+ __ movq(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_8, data_offset));
break;
}
case Primitive::kPrimFloat: {
XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
- if (index.IsConstant()) {
- __ movss(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset));
- } else {
- __ movss(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset));
- }
+ __ movss(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_4, data_offset));
break;
}
case Primitive::kPrimDouble: {
XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
- if (index.IsConstant()) {
- __ movsd(out, Address(obj,
- (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset));
- } else {
- __ movsd(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset));
- }
+ __ movsd(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_8, data_offset));
break;
}
@@ -4724,9 +4600,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte: {
uint32_t offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
- Address address = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + offset)
- : Address(array, index.AsRegister<CpuRegister>(), TIMES_1, offset);
+ Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_1, offset);
if (value.IsRegister()) {
__ movb(address, value.AsRegister<CpuRegister>());
} else {
@@ -4739,9 +4613,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
uint32_t offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
- Address address = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + offset)
- : Address(array, index.AsRegister<CpuRegister>(), TIMES_2, offset);
+ Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_2, offset);
if (value.IsRegister()) {
__ movw(address, value.AsRegister<CpuRegister>());
} else {
@@ -4754,9 +4626,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimNot: {
uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- Address address = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
- : Address(array, index.AsRegister<CpuRegister>(), TIMES_4, offset);
+ Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_4, offset);
if (!value.IsRegister()) {
// Just setting null.
@@ -4852,9 +4722,7 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimInt: {
uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- Address address = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
- : Address(array, index.AsRegister<CpuRegister>(), TIMES_4, offset);
+ Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_4, offset);
if (value.IsRegister()) {
__ movl(address, value.AsRegister<CpuRegister>());
} else {
@@ -4868,18 +4736,14 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimLong: {
uint32_t offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
- Address address = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + offset)
- : Address(array, index.AsRegister<CpuRegister>(), TIMES_8, offset);
+ Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_8, offset);
if (value.IsRegister()) {
__ movq(address, value.AsRegister<CpuRegister>());
codegen_->MaybeRecordImplicitNullCheck(instruction);
} else {
int64_t v = value.GetConstant()->AsLongConstant()->GetValue();
- Address address_high = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) +
- offset + sizeof(int32_t))
- : Address(array, index.AsRegister<CpuRegister>(), TIMES_8, offset + sizeof(int32_t));
+ Address address_high =
+ CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_8, offset + sizeof(int32_t));
codegen_->MoveInt64ToAddress(address, address_high, v, instruction);
}
break;
@@ -4887,15 +4751,12 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimFloat: {
uint32_t offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
- Address address = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
- : Address(array, index.AsRegister<CpuRegister>(), TIMES_4, offset);
+ Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_4, offset);
if (value.IsFpuRegister()) {
__ movss(address, value.AsFpuRegister<XmmRegister>());
} else {
DCHECK(value.IsConstant());
- int32_t v =
- bit_cast<int32_t, float>(value.GetConstant()->AsFloatConstant()->GetValue());
+ int32_t v = bit_cast<int32_t, float>(value.GetConstant()->AsFloatConstant()->GetValue());
__ movl(address, Immediate(v));
}
codegen_->MaybeRecordImplicitNullCheck(instruction);
@@ -4904,19 +4765,15 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
case Primitive::kPrimDouble: {
uint32_t offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
- Address address = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + offset)
- : Address(array, index.AsRegister<CpuRegister>(), TIMES_8, offset);
+ Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_8, offset);
if (value.IsFpuRegister()) {
__ movsd(address, value.AsFpuRegister<XmmRegister>());
codegen_->MaybeRecordImplicitNullCheck(instruction);
} else {
int64_t v =
bit_cast<int64_t, double>(value.GetConstant()->AsDoubleConstant()->GetValue());
- Address address_high = index.IsConstant()
- ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) +
- offset + sizeof(int32_t))
- : Address(array, index.AsRegister<CpuRegister>(), TIMES_8, offset + sizeof(int32_t));
+ Address address_high =
+ CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_8, offset + sizeof(int32_t));
codegen_->MoveInt64ToAddress(address, address_high, v, instruction);
}
break;
@@ -4951,18 +4808,16 @@ void InstructionCodeGeneratorX86_64::VisitArrayLength(HArrayLength* instruction)
}
void LocationsBuilderX86_64::VisitBoundsCheck(HBoundsCheck* instruction) {
- LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ RegisterSet caller_saves = RegisterSet::Empty();
+ InvokeRuntimeCallingConvention calling_convention;
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
HInstruction* length = instruction->InputAt(1);
if (!length->IsEmittedAtUseSite()) {
locations->SetInAt(1, Location::RegisterOrConstant(length));
}
- if (instruction->HasUses()) {
- locations->SetOut(Location::SameAsFirstInput());
- }
}
void InstructionCodeGeneratorX86_64::VisitBoundsCheck(HBoundsCheck* instruction) {
@@ -5007,13 +4862,7 @@ void InstructionCodeGeneratorX86_64::VisitBoundsCheck(HBoundsCheck* instruction)
}
codegen_->MaybeRecordImplicitNullCheck(array_length);
} else {
- CpuRegister length = length_loc.AsRegister<CpuRegister>();
- if (index_loc.IsConstant()) {
- int32_t value = CodeGenerator::GetInt32ValueOf(index_loc.GetConstant());
- __ cmpl(length, Immediate(value));
- } else {
- __ cmpl(length, index_loc.AsRegister<CpuRegister>());
- }
+ codegen_->GenerateIntCompare(length_loc, index_loc);
}
codegen_->AddSlowPath(slow_path);
__ j(kBelowEqual, slow_path->GetEntryLabel());
@@ -5051,7 +4900,7 @@ void InstructionCodeGeneratorX86_64::VisitParallelMove(HParallelMove* instructio
void LocationsBuilderX86_64::VisitSuspendCheck(HSuspendCheck* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
void InstructionCodeGeneratorX86_64::VisitSuspendCheck(HSuspendCheck* instruction) {
@@ -5352,7 +5201,7 @@ void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
@@ -5627,7 +5476,7 @@ void LocationsBuilderX86_64::VisitInstanceOf(HInstanceOf* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
if (baker_read_barrier_slow_path) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::Any());
@@ -6367,9 +6216,7 @@ void CodeGeneratorX86_64::GenerateArrayLoadWithBakerReadBarrier(HInstruction* in
"art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
// /* HeapReference<Object> */ ref =
// *(obj + data_offset + index * sizeof(HeapReference<Object>))
- Address src = index.IsConstant() ?
- Address(obj, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset) :
- Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset);
+ Address src = CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_4, data_offset);
GenerateReferenceLoadWithBakerReadBarrier(instruction, ref, obj, src, needs_null_check);
}
@@ -6674,6 +6521,39 @@ void CodeGeneratorX86_64::Compare64BitValue(CpuRegister dest, int64_t value) {
}
}
+void CodeGeneratorX86_64::GenerateIntCompare(Location lhs, Location rhs) {
+ CpuRegister lhs_reg = lhs.AsRegister<CpuRegister>();
+ if (rhs.IsConstant()) {
+ int32_t value = CodeGenerator::GetInt32ValueOf(rhs.GetConstant());
+ Compare32BitValue(lhs_reg, value);
+ } else if (rhs.IsStackSlot()) {
+ __ cmpl(lhs_reg, Address(CpuRegister(RSP), rhs.GetStackIndex()));
+ } else {
+ __ cmpl(lhs_reg, rhs.AsRegister<CpuRegister>());
+ }
+}
+
+void CodeGeneratorX86_64::GenerateLongCompare(Location lhs, Location rhs) {
+ CpuRegister lhs_reg = lhs.AsRegister<CpuRegister>();
+ if (rhs.IsConstant()) {
+ int64_t value = rhs.GetConstant()->AsLongConstant()->GetValue();
+ Compare64BitValue(lhs_reg, value);
+ } else if (rhs.IsDoubleStackSlot()) {
+ __ cmpq(lhs_reg, Address(CpuRegister(RSP), rhs.GetStackIndex()));
+ } else {
+ __ cmpq(lhs_reg, rhs.AsRegister<CpuRegister>());
+ }
+}
+
+Address CodeGeneratorX86_64::ArrayAddress(CpuRegister obj,
+ Location index,
+ ScaleFactor scale,
+ uint32_t data_offset) {
+ return index.IsConstant() ?
+ Address(obj, (index.GetConstant()->AsIntConstant()->GetValue() << scale) + data_offset) :
+ Address(obj, index.AsRegister<CpuRegister>(), scale, data_offset);
+}
+
void CodeGeneratorX86_64::Store64BitValueToStack(Location dest, int64_t value) {
DCHECK(dest.IsDoubleStackSlot());
if (IsInt<32>(value)) {
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index d93908343d..7108676b8e 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -510,6 +510,18 @@ class CodeGeneratorX86_64 : public CodeGenerator {
void Compare32BitValue(CpuRegister dest, int32_t value);
void Compare64BitValue(CpuRegister dest, int64_t value);
+ // Compare int values. Supports only register locations for `lhs`.
+ void GenerateIntCompare(Location lhs, Location rhs);
+
+ // Compare long values. Supports only register locations for `lhs`.
+ void GenerateLongCompare(Location lhs, Location rhs);
+
+ // Construct address for array access.
+ static Address ArrayAddress(CpuRegister obj,
+ Location index,
+ ScaleFactor scale,
+ uint32_t data_offset);
+
Address LiteralCaseTable(HPackedSwitch* switch_instr);
// Store a 64 bit value into a DoubleStackSlot in the most efficient manner.
@@ -533,9 +545,9 @@ class CodeGeneratorX86_64 : public CodeGenerator {
}
}
- void GenerateNop();
- void GenerateImplicitNullCheck(HNullCheck* instruction);
- void GenerateExplicitNullCheck(HNullCheck* instruction);
+ void GenerateNop() OVERRIDE;
+ void GenerateImplicitNullCheck(HNullCheck* instruction) OVERRIDE;
+ void GenerateExplicitNullCheck(HNullCheck* instruction) OVERRIDE;
// When we don't know the proper offset for the value, we use kDummy32BitOffset.
// We will fix this up in the linker later to have the right value.
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index e8d6bae1b5..f19faa324c 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -1039,17 +1039,7 @@ TEST_F(CodegenTest, ComparisonsInt) {
}
TEST_F(CodegenTest, ComparisonsLong) {
- // TODO: make MIPS work for long
- if (kRuntimeISA == kMips || kRuntimeISA == kMips64) {
- return;
- }
-
for (CodegenTargetConfig target_config : GetTargetConfigs()) {
- if ((target_config.GetInstructionSet() == kMips) ||
- (target_config.GetInstructionSet() == kMips64)) {
- continue;
- }
-
for (int64_t i = -1; i <= 1; i++) {
for (int64_t j = -1; j <= 1; j++) {
for (int cond = kCondFirst; cond <= kCondLast; cond++) {
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index eda0971ecc..776a483d43 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -273,9 +273,9 @@ inline Location ARM64EncodableConstantOrRegister(HInstruction* constant,
// only SP/WSP and ZXR/WZR codes are different between art and vixl.
// Note: This function is only used for debug checks.
inline bool ArtVixlRegCodeCoherentForRegSet(uint32_t art_core_registers,
- size_t num_core,
- uint32_t art_fpu_registers,
- size_t num_fpu) {
+ size_t num_core,
+ uint32_t art_fpu_registers,
+ size_t num_fpu) {
// The register masks won't work if the number of register is larger than 32.
DCHECK_GE(sizeof(art_core_registers) * 8, num_core);
DCHECK_GE(sizeof(art_fpu_registers) * 8, num_fpu);
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index e1bde7c737..aa3f26809a 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -16,7 +16,7 @@
#include "dead_code_elimination.h"
-#include "utils/array_ref.h"
+#include "base/array_ref.h"
#include "base/bit_vector-inl.h"
#include "ssa_phi_elimination.h"
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index 129c2a94b5..c501ccf80f 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -714,10 +714,12 @@ void HInductionVarAnalysis::VisitTripCount(HLoopInformation* loop,
case kCondGE: op = kGE; break;
default: LOG(FATAL) << "CONDITION UNREACHABLE";
}
+ // Associate trip count with control instruction, rather than the condition (even
+ // though it's its use) since former provides a convenient use-free placeholder.
+ HInstruction* control = loop->GetHeader()->GetLastInstruction();
InductionInfo* taken_test = CreateInvariantOp(op, lower_expr, upper_expr);
- AssignInfo(loop,
- loop->GetHeader()->GetLastInstruction(),
- CreateTripCount(tcKind, trip_count, taken_test, type));
+ DCHECK(control->IsIf());
+ AssignInfo(loop, control, CreateTripCount(tcKind, trip_count, taken_test, type));
}
bool HInductionVarAnalysis::IsTaken(InductionInfo* lower_expr,
diff --git a/compiler/optimizing/induction_var_analysis_test.cc b/compiler/optimizing/induction_var_analysis_test.cc
index 580d24b74b..292bc4e06e 100644
--- a/compiler/optimizing/induction_var_analysis_test.cc
+++ b/compiler/optimizing/induction_var_analysis_test.cc
@@ -157,6 +157,13 @@ class InductionVarAnalysisTest : public CommonCompilerTest {
iva_->LookupInfo(loop_body_[d]->GetLoopInformation(), instruction));
}
+ // Returns induction information of the trip-count of loop at depth d.
+ std::string GetTripCount(int d) {
+ HInstruction* control = loop_header_[d]->GetLastInstruction();
+ DCHECK(control->IsIf());
+ return GetInductionInfo(control, d);
+ }
+
// Returns true if instructions have identical induction.
bool HaveSameInduction(HInstruction* instruction1, HInstruction* instruction2) {
return HInductionVarAnalysis::InductionEqual(
@@ -239,8 +246,7 @@ TEST_F(InductionVarAnalysisTest, FindBasicInduction) {
EXPECT_FALSE(HaveSameInduction(store->InputAt(1), increment_[0]));
// Trip-count.
- EXPECT_STREQ("((100) (TC-loop) ((0) < (100)))",
- GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str());
+ EXPECT_STREQ("((100) (TC-loop) ((0) < (100)))", GetTripCount(0).c_str());
}
TEST_F(InductionVarAnalysisTest, FindDerivedInduction) {
@@ -579,8 +585,7 @@ TEST_F(InductionVarAnalysisTest, FindDeepLoopInduction) {
}
EXPECT_STREQ("((1) * i + (1)):PrimInt", GetInductionInfo(increment_[d], d).c_str());
// Trip-count.
- EXPECT_STREQ("((100) (TC-loop) ((0) < (100)))",
- GetInductionInfo(loop_header_[d]->GetLastInstruction(), d).c_str());
+ EXPECT_STREQ("((100) (TC-loop) ((0) < (100)))", GetTripCount(d).c_str());
}
}
@@ -607,8 +612,7 @@ TEST_F(InductionVarAnalysisTest, ByteInductionIntLoopControl) {
EXPECT_FALSE(HaveSameInduction(store1->InputAt(1), store2->InputAt(1)));
// Trip-count.
- EXPECT_STREQ("((100) (TC-loop) ((0) < (100)))",
- GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str());
+ EXPECT_STREQ("((100) (TC-loop) ((0) < (100)))", GetTripCount(0).c_str());
}
TEST_F(InductionVarAnalysisTest, ByteLoopControl1) {
@@ -626,8 +630,7 @@ TEST_F(InductionVarAnalysisTest, ByteLoopControl1) {
EXPECT_STREQ("((1) * i + ((-128) + (1))):PrimByte", GetInductionInfo(increment_[0], 0).c_str());
// Trip-count.
- EXPECT_STREQ("(((127) - (-128)) (TC-loop) ((-128) < (127)))",
- GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str());
+ EXPECT_STREQ("(((127) - (-128)) (TC-loop) ((-128) < (127)))", GetTripCount(0).c_str());
}
TEST_F(InductionVarAnalysisTest, ByteLoopControl2) {
@@ -645,7 +648,7 @@ TEST_F(InductionVarAnalysisTest, ByteLoopControl2) {
EXPECT_STREQ("((1) * i + ((-128) + (1))):PrimByte", GetInductionInfo(increment_[0], 0).c_str());
// Trip-count undefined.
- EXPECT_STREQ("", GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str());
+ EXPECT_STREQ("", GetTripCount(0).c_str());
}
TEST_F(InductionVarAnalysisTest, ShortLoopControl1) {
@@ -664,8 +667,7 @@ TEST_F(InductionVarAnalysisTest, ShortLoopControl1) {
EXPECT_STREQ("((1) * i + ((-32768) + (1))):PrimShort",
GetInductionInfo(increment_[0], 0).c_str());
// Trip-count.
- EXPECT_STREQ("(((32767) - (-32768)) (TC-loop) ((-32768) < (32767)))",
- GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str());
+ EXPECT_STREQ("(((32767) - (-32768)) (TC-loop) ((-32768) < (32767)))", GetTripCount(0).c_str());
}
TEST_F(InductionVarAnalysisTest, ShortLoopControl2) {
@@ -684,7 +686,7 @@ TEST_F(InductionVarAnalysisTest, ShortLoopControl2) {
EXPECT_STREQ("((1) * i + ((-32768) + (1))):PrimShort",
GetInductionInfo(increment_[0], 0).c_str());
// Trip-count undefined.
- EXPECT_STREQ("", GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str());
+ EXPECT_STREQ("", GetTripCount(0).c_str());
}
TEST_F(InductionVarAnalysisTest, CharLoopControl1) {
@@ -701,8 +703,7 @@ TEST_F(InductionVarAnalysisTest, CharLoopControl1) {
EXPECT_STREQ("((1) * i + (1)):PrimChar", GetInductionInfo(increment_[0], 0).c_str());
// Trip-count.
- EXPECT_STREQ("((65535) (TC-loop) ((0) < (65535)))",
- GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str());
+ EXPECT_STREQ("((65535) (TC-loop) ((0) < (65535)))", GetTripCount(0).c_str());
}
TEST_F(InductionVarAnalysisTest, CharLoopControl2) {
@@ -719,7 +720,7 @@ TEST_F(InductionVarAnalysisTest, CharLoopControl2) {
EXPECT_STREQ("((1) * i + (1)):PrimChar", GetInductionInfo(increment_[0], 0).c_str());
// Trip-count undefined.
- EXPECT_STREQ("", GetInductionInfo(loop_header_[0]->GetLastInstruction(), 0).c_str());
+ EXPECT_STREQ("", GetTripCount(0).c_str());
}
} // namespace art
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index 5e587e0810..cd8b7c7960 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -106,6 +106,12 @@ static HInstruction* Insert(HBasicBlock* block, HInstruction* instruction) {
return instruction;
}
+/** Helper method to obtain loop's control instruction. */
+static HInstruction* GetLoopControl(HLoopInformation* loop) {
+ DCHECK(loop != nullptr);
+ return loop->GetHeader()->GetLastInstruction();
+}
+
//
// Public class methods.
//
@@ -143,42 +149,134 @@ bool InductionVarRange::GetInductionRange(HInstruction* context,
// Find range.
chase_hint_ = chase_hint;
bool in_body = context->GetBlock() != loop->GetHeader();
+ int64_t stride_value = 0;
*min_val = GetVal(info, trip, in_body, /* is_min */ true);
*max_val = SimplifyMax(GetVal(info, trip, in_body, /* is_min */ false));
- *needs_finite_test = NeedsTripCount(info) && IsUnsafeTripCount(trip);
+ *needs_finite_test = NeedsTripCount(info, &stride_value) && IsUnsafeTripCount(trip);
return true;
}
-bool InductionVarRange::CanGenerateCode(HInstruction* context,
- HInstruction* instruction,
- /*out*/bool* needs_finite_test,
- /*out*/bool* needs_taken_test) {
+bool InductionVarRange::CanGenerateRange(HInstruction* context,
+ HInstruction* instruction,
+ /*out*/bool* needs_finite_test,
+ /*out*/bool* needs_taken_test) {
+ bool is_last_value = false;
+ int64_t stride_value = 0;
return GenerateCode(context,
instruction,
- nullptr, nullptr, nullptr, nullptr, nullptr, // nothing generated yet
+ is_last_value,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr, // nothing generated yet
+ &stride_value,
needs_finite_test,
- needs_taken_test);
-}
-
-void InductionVarRange::GenerateRangeCode(HInstruction* context,
- HInstruction* instruction,
- HGraph* graph,
- HBasicBlock* block,
- /*out*/HInstruction** lower,
- /*out*/HInstruction** upper) {
+ needs_taken_test)
+ && (stride_value == -1 ||
+ stride_value == 0 ||
+ stride_value == 1); // avoid wrap-around anomalies.
+}
+
+void InductionVarRange::GenerateRange(HInstruction* context,
+ HInstruction* instruction,
+ HGraph* graph,
+ HBasicBlock* block,
+ /*out*/HInstruction** lower,
+ /*out*/HInstruction** upper) {
+ bool is_last_value = false;
+ int64_t stride_value = 0;
bool b1, b2; // unused
- if (!GenerateCode(context, instruction, graph, block, lower, upper, nullptr, &b1, &b2)) {
- LOG(FATAL) << "Failed precondition: GenerateCode()";
- }
-}
-
-void InductionVarRange::GenerateTakenTest(HInstruction* context,
- HGraph* graph,
- HBasicBlock* block,
- /*out*/HInstruction** taken_test) {
+ if (!GenerateCode(context,
+ instruction,
+ is_last_value,
+ graph,
+ block,
+ lower,
+ upper,
+ nullptr,
+ &stride_value,
+ &b1,
+ &b2)) {
+ LOG(FATAL) << "Failed precondition: CanGenerateRange()";
+ }
+}
+
+HInstruction* InductionVarRange::GenerateTakenTest(HInstruction* context,
+ HGraph* graph,
+ HBasicBlock* block) {
+ HInstruction* taken_test = nullptr;
+ bool is_last_value = false;
+ int64_t stride_value = 0;
+ bool b1, b2; // unused
+ if (!GenerateCode(context,
+ context,
+ is_last_value,
+ graph,
+ block,
+ nullptr,
+ nullptr,
+ &taken_test,
+ &stride_value,
+ &b1,
+ &b2)) {
+ LOG(FATAL) << "Failed precondition: CanGenerateRange()";
+ }
+ return taken_test;
+}
+
+bool InductionVarRange::CanGenerateLastValue(HInstruction* instruction) {
+ bool is_last_value = true;
+ int64_t stride_value = 0;
+ bool needs_finite_test = false;
+ bool needs_taken_test = false;
+ return GenerateCode(instruction,
+ instruction,
+ is_last_value,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr, // nothing generated yet
+ &stride_value,
+ &needs_finite_test,
+ &needs_taken_test)
+ && !needs_finite_test && !needs_taken_test;
+}
+
+HInstruction* InductionVarRange::GenerateLastValue(HInstruction* instruction,
+ HGraph* graph,
+ HBasicBlock* block) {
+ HInstruction* last_value = nullptr;
+ bool is_last_value = true;
+ int64_t stride_value = 0;
bool b1, b2; // unused
- if (!GenerateCode(context, context, graph, block, nullptr, nullptr, taken_test, &b1, &b2)) {
- LOG(FATAL) << "Failed precondition: GenerateCode()";
+ if (!GenerateCode(instruction,
+ instruction,
+ is_last_value,
+ graph,
+ block,
+ &last_value,
+ &last_value,
+ nullptr,
+ &stride_value,
+ &b1,
+ &b2)) {
+ LOG(FATAL) << "Failed precondition: CanGenerateLastValue()";
+ }
+ return last_value;
+}
+
+void InductionVarRange::Replace(HInstruction* instruction,
+ HInstruction* fetch,
+ HInstruction* replacement) {
+ for (HLoopInformation* lp = instruction->GetBlock()->GetLoopInformation(); // closest enveloping loop
+ lp != nullptr;
+ lp = lp->GetPreHeader()->GetLoopInformation()) {
+ // Update instruction's information.
+ ReplaceInduction(induction_analysis_->LookupInfo(lp, instruction), fetch, replacement);
+ // Update loop's trip-count information.
+ ReplaceInduction(induction_analysis_->LookupInfo(lp, GetLoopControl(lp)), fetch, replacement);
}
}
@@ -221,13 +319,13 @@ bool InductionVarRange::HasInductionInfo(
/*out*/ HLoopInformation** loop,
/*out*/ HInductionVarAnalysis::InductionInfo** info,
/*out*/ HInductionVarAnalysis::InductionInfo** trip) const {
- HLoopInformation* l = context->GetBlock()->GetLoopInformation(); // closest enveloping loop
- if (l != nullptr) {
- HInductionVarAnalysis::InductionInfo* i = induction_analysis_->LookupInfo(l, instruction);
+ HLoopInformation* lp = context->GetBlock()->GetLoopInformation(); // closest enveloping loop
+ if (lp != nullptr) {
+ HInductionVarAnalysis::InductionInfo* i = induction_analysis_->LookupInfo(lp, instruction);
if (i != nullptr) {
- *loop = l;
+ *loop = lp;
*info = i;
- *trip = induction_analysis_->LookupInfo(l, l->GetHeader()->GetLastInstruction());
+ *trip = induction_analysis_->LookupInfo(lp, GetLoopControl(lp));
return true;
}
}
@@ -260,12 +358,13 @@ bool InductionVarRange::HasFetchInLoop(HInductionVarAnalysis::InductionInfo* inf
return false;
}
-bool InductionVarRange::NeedsTripCount(HInductionVarAnalysis::InductionInfo* info) const {
+bool InductionVarRange::NeedsTripCount(HInductionVarAnalysis::InductionInfo* info,
+ int64_t* stride_value) const {
if (info != nullptr) {
if (info->induction_class == HInductionVarAnalysis::kLinear) {
- return true;
+ return IsConstant(info->op_a, kExact, stride_value);
} else if (info->induction_class == HInductionVarAnalysis::kWrapAround) {
- return NeedsTripCount(info->op_b);
+ return NeedsTripCount(info->op_b, stride_value);
}
}
return false;
@@ -618,11 +717,13 @@ InductionVarRange::Value InductionVarRange::MergeVal(Value v1, Value v2, bool is
bool InductionVarRange::GenerateCode(HInstruction* context,
HInstruction* instruction,
+ bool is_last_value,
HGraph* graph,
HBasicBlock* block,
/*out*/HInstruction** lower,
/*out*/HInstruction** upper,
/*out*/HInstruction** taken_test,
+ /*out*/int64_t* stride_value,
/*out*/bool* needs_finite_test,
/*out*/bool* needs_taken_test) const {
HLoopInformation* loop = nullptr;
@@ -637,8 +738,19 @@ bool InductionVarRange::GenerateCode(HInstruction* context,
// code does not use the trip-count explicitly (since there could be an implicit relation
// between e.g. an invariant subscript and a not-taken condition).
bool in_body = context->GetBlock() != loop->GetHeader();
- *needs_finite_test = NeedsTripCount(info) && IsUnsafeTripCount(trip);
+ *stride_value = 0;
+ *needs_finite_test = NeedsTripCount(info, stride_value) && IsUnsafeTripCount(trip);
*needs_taken_test = IsBodyTripCount(trip);
+ // Handle last value request.
+ if (is_last_value) {
+ if (info->induction_class != HInductionVarAnalysis::kLinear) {
+ return false;
+ } else if (*stride_value > 0) {
+ lower = nullptr;
+ } else {
+ upper = nullptr;
+ }
+ }
// Code generation for taken test: generate the code when requested or otherwise analyze
// if code generation is feasible when taken test is needed.
if (taken_test != nullptr) {
@@ -666,6 +778,10 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info,
bool in_body,
bool is_min) const {
if (info != nullptr) {
+ // If during codegen, the result is not needed (nullptr), simply return success.
+ if (graph != nullptr && result == nullptr) {
+ return true;
+ }
// Verify type safety.
Primitive::Type type = Primitive::kPrimInt;
if (info->type != type) {
@@ -757,25 +873,29 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info,
}
break;
case HInductionVarAnalysis::kLinear: {
- // Linear induction a * i + b, for normalized 0 <= i < TC. Restrict to unit stride only
- // to avoid arithmetic wrap-around situations that are hard to guard against.
+ // Linear induction a * i + b, for normalized 0 <= i < TC. For ranges, this should
+ // be restricted to a unit stride to avoid arithmetic wrap-around situations that
+ // are harder to guard against. For a last value, requesting min/max based on any
+ // stride yields right value.
int64_t stride_value = 0;
if (IsConstant(info->op_a, kExact, &stride_value)) {
- if (stride_value == 1 || stride_value == -1) {
- const bool is_min_a = stride_value == 1 ? is_min : !is_min;
- if (GenerateCode(trip, trip, graph, block, &opa, in_body, is_min_a) &&
- GenerateCode(info->op_b, trip, graph, block, &opb, in_body, is_min)) {
- if (graph != nullptr) {
- HInstruction* oper;
- if (stride_value == 1) {
- oper = new (graph->GetArena()) HAdd(type, opa, opb);
- } else {
- oper = new (graph->GetArena()) HSub(type, opb, opa);
- }
- *result = Insert(block, oper);
+ const bool is_min_a = stride_value >= 0 ? is_min : !is_min;
+ if (GenerateCode(trip, trip, graph, block, &opa, in_body, is_min_a) &&
+ GenerateCode(info->op_b, trip, graph, block, &opb, in_body, is_min)) {
+ if (graph != nullptr) {
+ HInstruction* oper;
+ if (stride_value == 1) {
+ oper = new (graph->GetArena()) HAdd(type, opa, opb);
+ } else if (stride_value == -1) {
+ oper = new (graph->GetArena()) HSub(type, opb, opa);
+ } else {
+ HInstruction* mul = new (graph->GetArena()) HMul(
+ type, graph->GetIntConstant(stride_value), opa);
+ oper = new (graph->GetArena()) HAdd(type, Insert(block, mul), opb);
}
- return true;
+ *result = Insert(block, oper);
}
+ return true;
}
}
break;
@@ -800,4 +920,18 @@ bool InductionVarRange::GenerateCode(HInductionVarAnalysis::InductionInfo* info,
return false;
}
+void InductionVarRange::ReplaceInduction(HInductionVarAnalysis::InductionInfo* info,
+ HInstruction* fetch,
+ HInstruction* replacement) {
+ if (info != nullptr) {
+ if (info->induction_class == HInductionVarAnalysis::kInvariant &&
+ info->operation == HInductionVarAnalysis::kFetch &&
+ info->fetch == fetch) {
+ info->fetch = replacement;
+ }
+ ReplaceInduction(info->op_a, fetch, replacement);
+ ReplaceInduction(info->op_b, fetch, replacement);
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/induction_var_range.h b/compiler/optimizing/induction_var_range.h
index 00aaa167f8..63850b34b8 100644
--- a/compiler/optimizing/induction_var_range.h
+++ b/compiler/optimizing/induction_var_range.h
@@ -76,10 +76,10 @@ class InductionVarRange {
* and need_taken test flags denote if an additional finite-test and/or taken-test
* are needed to protect the range evaluation inside its loop.
*/
- bool CanGenerateCode(HInstruction* context,
- HInstruction* instruction,
- /*out*/ bool* needs_finite_test,
- /*out*/ bool* needs_taken_test);
+ bool CanGenerateRange(HInstruction* context,
+ HInstruction* instruction,
+ /*out*/ bool* needs_finite_test,
+ /*out*/ bool* needs_taken_test);
/**
* Generates the actual code in the HIR for the lower and upper bound expressions on the
@@ -94,25 +94,42 @@ class InductionVarRange {
* lower: add x, 0
* upper: add x, 5
*
- * Precondition: CanGenerateCode() returns true.
+ * Precondition: CanGenerateRange() returns true.
*/
- void GenerateRangeCode(HInstruction* context,
- HInstruction* instruction,
- HGraph* graph,
- HBasicBlock* block,
- /*out*/ HInstruction** lower,
- /*out*/ HInstruction** upper);
+ void GenerateRange(HInstruction* context,
+ HInstruction* instruction,
+ HGraph* graph,
+ HBasicBlock* block,
+ /*out*/ HInstruction** lower,
+ /*out*/ HInstruction** upper);
/**
* Generates explicit taken-test for the loop in the given context. Code is generated in
- * given block and graph. The taken-test is returned in parameter test.
+ * given block and graph. Returns generated taken-test.
*
- * Precondition: CanGenerateCode() returns true and needs_taken_test is set.
+ * Precondition: CanGenerateRange() returns true and needs_taken_test is set.
*/
- void GenerateTakenTest(HInstruction* context,
- HGraph* graph,
- HBasicBlock* block,
- /*out*/ HInstruction** taken_test);
+ HInstruction* GenerateTakenTest(HInstruction* context, HGraph* graph, HBasicBlock* block);
+
+ /**
+ * Returns true if induction analysis is able to generate code for last value of
+ * the given instruction inside the closest enveloping loop.
+ */
+ bool CanGenerateLastValue(HInstruction* instruction);
+
+ /**
+ * Generates last value of the given instruction in the closest enveloping loop.
+ * Code is generated in given block and graph. Returns generated last value.
+ *
+ * Precondition: CanGenerateLastValue() returns true.
+ */
+ HInstruction* GenerateLastValue(HInstruction* instruction, HGraph* graph, HBasicBlock* block);
+
+ /**
+ * Updates all matching fetches with the given replacement in all induction information
+ * that is associated with the given instruction.
+ */
+ void Replace(HInstruction* instruction, HInstruction* fetch, HInstruction* replacement);
private:
/*
@@ -140,7 +157,8 @@ class InductionVarRange {
/*out*/ HInductionVarAnalysis::InductionInfo** trip) const;
bool HasFetchInLoop(HInductionVarAnalysis::InductionInfo* info) const;
- bool NeedsTripCount(HInductionVarAnalysis::InductionInfo* info) const;
+ bool NeedsTripCount(HInductionVarAnalysis::InductionInfo* info,
+ /*out*/ int64_t* stride_value) const;
bool IsBodyTripCount(HInductionVarAnalysis::InductionInfo* trip) const;
bool IsUnsafeTripCount(HInductionVarAnalysis::InductionInfo* trip) const;
bool IsWellBehavedTripCount(HInductionVarAnalysis::InductionInfo* trip) const;
@@ -186,17 +204,19 @@ class InductionVarRange {
Value MergeVal(Value v1, Value v2, bool is_min) const;
/**
- * Generates code for lower/upper/taken-test in the HIR. Returns true on success.
- * With values nullptr, the method can be used to determine if code generation
+ * Generates code for lower/upper/taken-test or last value in the HIR. Returns true on
+ * success. With values nullptr, the method can be used to determine if code generation
* would be successful without generating actual code yet.
*/
bool GenerateCode(HInstruction* context,
HInstruction* instruction,
+ bool is_last_val,
HGraph* graph,
HBasicBlock* block,
/*out*/ HInstruction** lower,
/*out*/ HInstruction** upper,
/*out*/ HInstruction** taken_test,
+ /*out*/ int64_t* stride_value,
/*out*/ bool* needs_finite_test,
/*out*/ bool* needs_taken_test) const;
@@ -208,6 +228,10 @@ class InductionVarRange {
bool in_body,
bool is_min) const;
+ void ReplaceInduction(HInductionVarAnalysis::InductionInfo* info,
+ HInstruction* fetch,
+ HInstruction* replacement);
+
/** Results of prior induction variable analysis. */
HInductionVarAnalysis* induction_analysis_;
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index 4ea170f659..8bbdd4acb7 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -75,34 +75,34 @@ class InductionVarRangeTest : public CommonCompilerTest {
// Control flow.
loop_preheader_ = new (&allocator_) HBasicBlock(graph_);
graph_->AddBlock(loop_preheader_);
- HBasicBlock* loop_header = new (&allocator_) HBasicBlock(graph_);
- graph_->AddBlock(loop_header);
- HBasicBlock* loop_body = new (&allocator_) HBasicBlock(graph_);
- graph_->AddBlock(loop_body);
+ loop_header_ = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(loop_header_);
+ loop_body_ = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(loop_body_);
HBasicBlock* return_block = new (&allocator_) HBasicBlock(graph_);
graph_->AddBlock(return_block);
entry_block_->AddSuccessor(loop_preheader_);
- loop_preheader_->AddSuccessor(loop_header);
- loop_header->AddSuccessor(loop_body);
- loop_header->AddSuccessor(return_block);
- loop_body->AddSuccessor(loop_header);
+ loop_preheader_->AddSuccessor(loop_header_);
+ loop_header_->AddSuccessor(loop_body_);
+ loop_header_->AddSuccessor(return_block);
+ loop_body_->AddSuccessor(loop_header_);
return_block->AddSuccessor(exit_block_);
// Instructions.
loop_preheader_->AddInstruction(new (&allocator_) HGoto());
HPhi* phi = new (&allocator_) HPhi(&allocator_, 0, 0, Primitive::kPrimInt);
- loop_header->AddPhi(phi);
+ loop_header_->AddPhi(phi);
phi->AddInput(graph_->GetIntConstant(lower)); // i = l
if (stride > 0) {
condition_ = new (&allocator_) HLessThan(phi, upper); // i < u
} else {
condition_ = new (&allocator_) HGreaterThan(phi, upper); // i > u
}
- loop_header->AddInstruction(condition_);
- loop_header->AddInstruction(new (&allocator_) HIf(condition_));
+ loop_header_->AddInstruction(condition_);
+ loop_header_->AddInstruction(new (&allocator_) HIf(condition_));
increment_ = new (&allocator_) HAdd(Primitive::kPrimInt, phi, graph_->GetIntConstant(stride));
- loop_body->AddInstruction(increment_); // i += s
+ loop_body_->AddInstruction(increment_); // i += s
phi->AddInput(increment_);
- loop_body->AddInstruction(new (&allocator_) HGoto());
+ loop_body_->AddInstruction(new (&allocator_) HGoto());
return_block->AddInstruction(new (&allocator_) HReturnVoid());
exit_block_->AddInstruction(new (&allocator_) HExit());
}
@@ -192,7 +192,8 @@ class InductionVarRangeTest : public CommonCompilerTest {
//
bool NeedsTripCount(HInductionVarAnalysis::InductionInfo* info) {
- return range_.NeedsTripCount(info);
+ int64_t s = 0;
+ return range_.NeedsTripCount(info, &s);
}
bool IsBodyTripCount(HInductionVarAnalysis::InductionInfo* trip) {
@@ -251,6 +252,8 @@ class InductionVarRangeTest : public CommonCompilerTest {
HBasicBlock* entry_block_;
HBasicBlock* exit_block_;
HBasicBlock* loop_preheader_;
+ HBasicBlock* loop_header_;
+ HBasicBlock* loop_body_;
HInductionVarAnalysis* iva_;
InductionVarRange range_;
@@ -600,15 +603,19 @@ TEST_F(InductionVarRangeTest, ConstantTripCountUp) {
Value v1, v2;
bool needs_finite_test = true;
+ bool needs_taken_test = true;
+
+ HInstruction* phi = condition_->InputAt(0);
+ HInstruction* exit = exit_block_->GetLastInstruction();
// In context of header: known.
- range_.GetInductionRange(condition_, condition_->InputAt(0), x_, &v1, &v2, &needs_finite_test);
+ range_.GetInductionRange(condition_, phi, x_, &v1, &v2, &needs_finite_test);
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(0), v1);
ExpectEqual(Value(1000), v2);
// In context of loop-body: known.
- range_.GetInductionRange(increment_, condition_->InputAt(0), x_, &v1, &v2, &needs_finite_test);
+ range_.GetInductionRange(increment_, phi, x_, &v1, &v2, &needs_finite_test);
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(0), v1);
ExpectEqual(Value(999), v2);
@@ -616,6 +623,20 @@ TEST_F(InductionVarRangeTest, ConstantTripCountUp) {
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(1), v1);
ExpectEqual(Value(1000), v2);
+
+ // Induction vs. no-induction.
+ EXPECT_TRUE(range_.CanGenerateRange(increment_, phi, &needs_finite_test, &needs_taken_test));
+ EXPECT_TRUE(range_.CanGenerateLastValue(phi));
+ EXPECT_FALSE(range_.CanGenerateRange(exit, exit, &needs_finite_test, &needs_taken_test));
+ EXPECT_FALSE(range_.CanGenerateLastValue(exit));
+
+ // Last value (unsimplified).
+ HInstruction* last = range_.GenerateLastValue(phi, graph_, loop_preheader_);
+ ASSERT_TRUE(last->IsAdd());
+ ASSERT_TRUE(last->InputAt(0)->IsIntConstant());
+ EXPECT_EQ(1000, last->InputAt(0)->AsIntConstant()->GetValue());
+ ASSERT_TRUE(last->InputAt(1)->IsIntConstant());
+ EXPECT_EQ(0, last->InputAt(1)->AsIntConstant()->GetValue());
}
TEST_F(InductionVarRangeTest, ConstantTripCountDown) {
@@ -624,15 +645,19 @@ TEST_F(InductionVarRangeTest, ConstantTripCountDown) {
Value v1, v2;
bool needs_finite_test = true;
+ bool needs_taken_test = true;
+
+ HInstruction* phi = condition_->InputAt(0);
+ HInstruction* exit = exit_block_->GetLastInstruction();
// In context of header: known.
- range_.GetInductionRange(condition_, condition_->InputAt(0), x_, &v1, &v2, &needs_finite_test);
+ range_.GetInductionRange(condition_, phi, x_, &v1, &v2, &needs_finite_test);
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(0), v1);
ExpectEqual(Value(1000), v2);
// In context of loop-body: known.
- range_.GetInductionRange(increment_, condition_->InputAt(0), x_, &v1, &v2, &needs_finite_test);
+ range_.GetInductionRange(increment_, phi, x_, &v1, &v2, &needs_finite_test);
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(1), v1);
ExpectEqual(Value(1000), v2);
@@ -640,6 +665,25 @@ TEST_F(InductionVarRangeTest, ConstantTripCountDown) {
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(0), v1);
ExpectEqual(Value(999), v2);
+
+ // Induction vs. no-induction.
+ EXPECT_TRUE(range_.CanGenerateRange(increment_, phi, &needs_finite_test, &needs_taken_test));
+ EXPECT_TRUE(range_.CanGenerateLastValue(phi));
+ EXPECT_FALSE(range_.CanGenerateRange(exit, exit, &needs_finite_test, &needs_taken_test));
+ EXPECT_FALSE(range_.CanGenerateLastValue(exit));
+
+ // Last value (unsimplified).
+ HInstruction* last = range_.GenerateLastValue(phi, graph_, loop_preheader_);
+ ASSERT_TRUE(last->IsSub());
+ ASSERT_TRUE(last->InputAt(0)->IsIntConstant());
+ EXPECT_EQ(1000, last->InputAt(0)->AsIntConstant()->GetValue());
+ ASSERT_TRUE(last->InputAt(1)->IsNeg());
+ last = last->InputAt(1)->InputAt(0);
+ ASSERT_TRUE(last->IsSub());
+ ASSERT_TRUE(last->InputAt(0)->IsIntConstant());
+ EXPECT_EQ(0, last->InputAt(0)->AsIntConstant()->GetValue());
+ ASSERT_TRUE(last->InputAt(1)->IsIntConstant());
+ EXPECT_EQ(1000, last->InputAt(1)->AsIntConstant()->GetValue());
}
TEST_F(InductionVarRangeTest, SymbolicTripCountUp) {
@@ -650,14 +694,16 @@ TEST_F(InductionVarRangeTest, SymbolicTripCountUp) {
bool needs_finite_test = true;
bool needs_taken_test = true;
+ HInstruction* phi = condition_->InputAt(0);
+
// In context of header: upper unknown.
- range_.GetInductionRange(condition_, condition_->InputAt(0), x_, &v1, &v2, &needs_finite_test);
+ range_.GetInductionRange(condition_, phi, x_, &v1, &v2, &needs_finite_test);
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(0), v1);
ExpectEqual(Value(), v2);
// In context of loop-body: known.
- range_.GetInductionRange(increment_, condition_->InputAt(0), x_, &v1, &v2, &needs_finite_test);
+ range_.GetInductionRange(increment_, phi, x_, &v1, &v2, &needs_finite_test);
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(0), v1);
ExpectEqual(Value(x_, 1, -1), v2);
@@ -668,19 +714,15 @@ TEST_F(InductionVarRangeTest, SymbolicTripCountUp) {
HInstruction* lower = nullptr;
HInstruction* upper = nullptr;
- HInstruction* taken = nullptr;
// Can generate code in context of loop-body only.
- EXPECT_FALSE(range_.CanGenerateCode(
- condition_, condition_->InputAt(0), &needs_finite_test, &needs_taken_test));
- ASSERT_TRUE(range_.CanGenerateCode(
- increment_, condition_->InputAt(0), &needs_finite_test, &needs_taken_test));
+ EXPECT_FALSE(range_.CanGenerateRange(condition_, phi, &needs_finite_test, &needs_taken_test));
+ ASSERT_TRUE(range_.CanGenerateRange(increment_, phi, &needs_finite_test, &needs_taken_test));
EXPECT_FALSE(needs_finite_test);
EXPECT_TRUE(needs_taken_test);
- // Generates code.
- range_.GenerateRangeCode(
- increment_, condition_->InputAt(0), graph_, loop_preheader_, &lower, &upper);
+ // Generates code (unsimplified).
+ range_.GenerateRange(increment_, phi, graph_, loop_preheader_, &lower, &upper);
// Verify lower is 0+0.
ASSERT_TRUE(lower != nullptr);
@@ -701,12 +743,19 @@ TEST_F(InductionVarRangeTest, SymbolicTripCountUp) {
EXPECT_EQ(0, upper->InputAt(1)->AsIntConstant()->GetValue());
// Verify taken-test is 0<V.
- range_.GenerateTakenTest(increment_, graph_, loop_preheader_, &taken);
+ HInstruction* taken = range_.GenerateTakenTest(increment_, graph_, loop_preheader_);
ASSERT_TRUE(taken != nullptr);
ASSERT_TRUE(taken->IsLessThan());
ASSERT_TRUE(taken->InputAt(0)->IsIntConstant());
EXPECT_EQ(0, taken->InputAt(0)->AsIntConstant()->GetValue());
EXPECT_TRUE(taken->InputAt(1)->IsParameterValue());
+
+ // Replacement.
+ range_.Replace(loop_header_->GetLastInstruction(), x_, y_);
+ range_.GetInductionRange(increment_, increment_, x_, &v1, &v2, &needs_finite_test);
+ EXPECT_FALSE(needs_finite_test);
+ ExpectEqual(Value(1), v1);
+ ExpectEqual(Value(y_, 1, 0), v2);
}
TEST_F(InductionVarRangeTest, SymbolicTripCountDown) {
@@ -717,14 +766,16 @@ TEST_F(InductionVarRangeTest, SymbolicTripCountDown) {
bool needs_finite_test = true;
bool needs_taken_test = true;
+ HInstruction* phi = condition_->InputAt(0);
+
// In context of header: lower unknown.
- range_.GetInductionRange(condition_, condition_->InputAt(0), x_, &v1, &v2, &needs_finite_test);
+ range_.GetInductionRange(condition_, phi, x_, &v1, &v2, &needs_finite_test);
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(), v1);
ExpectEqual(Value(1000), v2);
// In context of loop-body: known.
- range_.GetInductionRange(increment_, condition_->InputAt(0), x_, &v1, &v2, &needs_finite_test);
+ range_.GetInductionRange(increment_, phi, x_, &v1, &v2, &needs_finite_test);
EXPECT_FALSE(needs_finite_test);
ExpectEqual(Value(x_, 1, 1), v1);
ExpectEqual(Value(1000), v2);
@@ -735,19 +786,15 @@ TEST_F(InductionVarRangeTest, SymbolicTripCountDown) {
HInstruction* lower = nullptr;
HInstruction* upper = nullptr;
- HInstruction* taken = nullptr;
// Can generate code in context of loop-body only.
- EXPECT_FALSE(range_.CanGenerateCode(
- condition_, condition_->InputAt(0), &needs_finite_test, &needs_taken_test));
- ASSERT_TRUE(range_.CanGenerateCode(
- increment_, condition_->InputAt(0), &needs_finite_test, &needs_taken_test));
+ EXPECT_FALSE(range_.CanGenerateRange(condition_, phi, &needs_finite_test, &needs_taken_test));
+ ASSERT_TRUE(range_.CanGenerateRange(increment_, phi, &needs_finite_test, &needs_taken_test));
EXPECT_FALSE(needs_finite_test);
EXPECT_TRUE(needs_taken_test);
- // Generates code.
- range_.GenerateRangeCode(
- increment_, condition_->InputAt(0), graph_, loop_preheader_, &lower, &upper);
+ // Generates code (unsimplified).
+ range_.GenerateRange(increment_, phi, graph_, loop_preheader_, &lower, &upper);
// Verify lower is 1000-((1000-V)-1).
ASSERT_TRUE(lower != nullptr);
@@ -773,12 +820,19 @@ TEST_F(InductionVarRangeTest, SymbolicTripCountDown) {
EXPECT_EQ(0, upper->InputAt(1)->AsIntConstant()->GetValue());
// Verify taken-test is 1000>V.
- range_.GenerateTakenTest(increment_, graph_, loop_preheader_, &taken);
+ HInstruction* taken = range_.GenerateTakenTest(increment_, graph_, loop_preheader_);
ASSERT_TRUE(taken != nullptr);
ASSERT_TRUE(taken->IsGreaterThan());
ASSERT_TRUE(taken->InputAt(0)->IsIntConstant());
EXPECT_EQ(1000, taken->InputAt(0)->AsIntConstant()->GetValue());
EXPECT_TRUE(taken->InputAt(1)->IsParameterValue());
+
+ // Replacement.
+ range_.Replace(loop_header_->GetLastInstruction(), x_, y_);
+ range_.GetInductionRange(increment_, increment_, x_, &v1, &v2, &needs_finite_test);
+ EXPECT_FALSE(needs_finite_test);
+ ExpectEqual(Value(y_, 1, 0), v1);
+ ExpectEqual(Value(999), v2);
}
} // namespace art
diff --git a/compiler/optimizing/instruction_simplifier.h b/compiler/optimizing/instruction_simplifier.h
index 7905104ed4..7fe1067aa9 100644
--- a/compiler/optimizing/instruction_simplifier.h
+++ b/compiler/optimizing/instruction_simplifier.h
@@ -35,9 +35,9 @@ namespace art {
*/
class InstructionSimplifier : public HOptimization {
public:
- InstructionSimplifier(HGraph* graph,
- OptimizingCompilerStats* stats = nullptr,
- const char* name = kInstructionSimplifierPassName)
+ explicit InstructionSimplifier(HGraph* graph,
+ OptimizingCompilerStats* stats = nullptr,
+ const char* name = kInstructionSimplifierPassName)
: HOptimization(graph, name, stats) {}
static constexpr const char* kInstructionSimplifierPassName = "instruction_simplifier";
diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc
index 8f7778fe68..04e063c92e 100644
--- a/compiler/optimizing/instruction_simplifier_shared.cc
+++ b/compiler/optimizing/instruction_simplifier_shared.cc
@@ -259,7 +259,8 @@ bool TryExtractArrayAccessAddress(HInstruction* access,
HIntConstant* offset = graph->GetIntConstant(data_offset);
HIntermediateAddress* address =
new (arena) HIntermediateAddress(array, offset, kNoDexPc);
- address->SetReferenceTypeInfo(array->GetReferenceTypeInfo());
+ // TODO: Is it ok to not have this on the intermediate address?
+ // address->SetReferenceTypeInfo(array->GetReferenceTypeInfo());
access->GetBlock()->InsertInstructionBefore(address, access);
access->ReplaceInput(address, 0);
// Both instructions must depend on GC to prevent any instruction that can
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 67640a1ebf..fd2da1004b 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -657,7 +657,7 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena,
LocationSummary::kNoCall,
kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 082076d79b..ce58657bcd 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -895,7 +895,7 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke
LocationSummary::kNoCall,
kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index be8eb51e42..1d153e2e18 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1857,11 +1857,11 @@ static void GenHighestOneBit(LocationSummary* locations,
if (type == Primitive::kPrimLong) {
__ Dclz(TMP, in);
__ LoadConst64(AT, INT64_C(0x8000000000000000));
- __ Dsrlv(out, AT, TMP);
+ __ Dsrlv(AT, AT, TMP);
} else {
__ Clz(TMP, in);
__ LoadConst32(AT, 0x80000000);
- __ Srlv(out, AT, TMP);
+ __ Srlv(AT, AT, TMP);
}
// For either value of "type", when "in" is zero, "out" should also
// be zero. Without this extra "and" operation, when "in" is zero,
@@ -1869,7 +1869,7 @@ static void GenHighestOneBit(LocationSummary* locations,
// the MIPS logical shift operations "dsrlv", and "srlv" don't use
// the shift amount (TMP) directly; they use either (TMP % 64) or
// (TMP % 32), respectively.
- __ And(out, out, in);
+ __ And(out, AT, in);
}
// int java.lang.Integer.highestOneBit(int)
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index d17f85ec8d..e61aba05b4 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -1977,7 +1977,7 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena,
LocationSummary::kNoCall,
kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index f8f30d9015..0f31fabbfb 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -2110,7 +2110,7 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke
LocationSummary::kNoCall,
kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
- locations->SetCustomSlowPathCallerSaves(RegisterSet()); // No caller-save registers.
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
}
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
diff --git a/compiler/optimizing/locations.cc b/compiler/optimizing/locations.cc
index 1b1b3a79ab..d157509758 100644
--- a/compiler/optimizing/locations.cc
+++ b/compiler/optimizing/locations.cc
@@ -33,8 +33,8 @@ LocationSummary::LocationSummary(HInstruction* instruction,
output_overlaps_(Location::kOutputOverlap),
stack_mask_(nullptr),
register_mask_(0),
- live_registers_(),
- custom_slow_path_caller_saves_() {
+ live_registers_(RegisterSet::Empty()),
+ custom_slow_path_caller_saves_(RegisterSet::Empty()) {
instruction->SetLocations(this);
if (NeedsSafepoint()) {
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 43840422ca..da27928ef2 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -420,7 +420,7 @@ std::ostream& operator<<(std::ostream& os, const Location::Policy& rhs);
class RegisterSet : public ValueObject {
public:
- RegisterSet() : core_registers_(0), floating_point_registers_(0) {}
+ static RegisterSet Empty() { return RegisterSet(); }
void Add(Location loc) {
if (loc.IsRegister()) {
@@ -465,6 +465,8 @@ class RegisterSet : public ValueObject {
}
private:
+ RegisterSet() : core_registers_(0), floating_point_registers_(0) {}
+
uint32_t core_registers_;
uint32_t floating_point_registers_;
};
@@ -488,9 +490,9 @@ class LocationSummary : public ArenaObject<kArenaAllocLocationSummary> {
kCallOnMainOnly
};
- LocationSummary(HInstruction* instruction,
- CallKind call_kind = kNoCall,
- bool intrinsified = false);
+ explicit LocationSummary(HInstruction* instruction,
+ CallKind call_kind = kNoCall,
+ bool intrinsified = false);
void SetInAt(uint32_t at, Location location) {
inputs_[at] = location;
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 8f37236ede..9cfa89b7d0 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -460,6 +460,113 @@ GraphAnalysisResult HGraph::AnalyzeLoops() const {
return kAnalysisSuccess;
}
+static bool InSameLoop(HLoopInformation* first_loop, HLoopInformation* second_loop) {
+ return first_loop == second_loop;
+}
+
+static bool IsLoop(HLoopInformation* info) {
+ return info != nullptr;
+}
+
+static bool IsInnerLoop(HLoopInformation* outer, HLoopInformation* inner) {
+ return (inner != outer)
+ && (inner != nullptr)
+ && (outer != nullptr)
+ && inner->IsIn(*outer);
+}
+
+// Helper method to update work list for linear order.
+static void AddToListForLinearization(ArenaVector<HBasicBlock*>* worklist, HBasicBlock* block) {
+ HLoopInformation* block_loop = block->GetLoopInformation();
+ auto insert_pos = worklist->rbegin(); // insert_pos.base() will be the actual position.
+ for (auto end = worklist->rend(); insert_pos != end; ++insert_pos) {
+ HBasicBlock* current = *insert_pos;
+ HLoopInformation* current_loop = current->GetLoopInformation();
+ if (InSameLoop(block_loop, current_loop)
+ || !IsLoop(current_loop)
+ || IsInnerLoop(current_loop, block_loop)) {
+ // The block can be processed immediately.
+ break;
+ }
+ }
+ worklist->insert(insert_pos.base(), block);
+}
+
+// Helper method to validate linear order.
+static bool IsLinearOrderWellFormed(const HGraph& graph) {
+ for (HBasicBlock* header : graph.GetBlocks()) {
+ if (header == nullptr || !header->IsLoopHeader()) {
+ continue;
+ }
+ HLoopInformation* loop = header->GetLoopInformation();
+ size_t num_blocks = loop->GetBlocks().NumSetBits();
+ size_t found_blocks = 0u;
+ for (HLinearOrderIterator it(graph); !it.Done(); it.Advance()) {
+ HBasicBlock* current = it.Current();
+ if (loop->Contains(*current)) {
+ found_blocks++;
+ if (found_blocks == 1u && current != header) {
+ // First block is not the header.
+ return false;
+ } else if (found_blocks == num_blocks && !loop->IsBackEdge(*current)) {
+ // Last block is not a back edge.
+ return false;
+ }
+ } else if (found_blocks != 0u && found_blocks != num_blocks) {
+ // Blocks are not adjacent.
+ return false;
+ }
+ }
+ DCHECK_EQ(found_blocks, num_blocks);
+ }
+ return true;
+}
+
+void HGraph::Linearize() {
+ // Create a reverse post ordering with the following properties:
+ // - Blocks in a loop are consecutive,
+ // - Back-edge is the last block before loop exits.
+
+ // (1): Record the number of forward predecessors for each block. This is to
+ // ensure the resulting order is reverse post order. We could use the
+ // current reverse post order in the graph, but it would require making
+ // order queries to a GrowableArray, which is not the best data structure
+ // for it.
+ ArenaVector<uint32_t> forward_predecessors(blocks_.size(),
+ arena_->Adapter(kArenaAllocSsaLiveness));
+ for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
+ HBasicBlock* block = it.Current();
+ size_t number_of_forward_predecessors = block->GetPredecessors().size();
+ if (block->IsLoopHeader()) {
+ number_of_forward_predecessors -= block->GetLoopInformation()->NumberOfBackEdges();
+ }
+ forward_predecessors[block->GetBlockId()] = number_of_forward_predecessors;
+ }
+
+ // (2): Following a worklist approach, first start with the entry block, and
+ // iterate over the successors. When all non-back edge predecessors of a
+ // successor block are visited, the successor block is added in the worklist
+ // following an order that satisfies the requirements to build our linear graph.
+ linear_order_.reserve(GetReversePostOrder().size());
+ ArenaVector<HBasicBlock*> worklist(arena_->Adapter(kArenaAllocSsaLiveness));
+ worklist.push_back(GetEntryBlock());
+ do {
+ HBasicBlock* current = worklist.back();
+ worklist.pop_back();
+ linear_order_.push_back(current);
+ for (HBasicBlock* successor : current->GetSuccessors()) {
+ int block_id = successor->GetBlockId();
+ size_t number_of_remaining_predecessors = forward_predecessors[block_id];
+ if (number_of_remaining_predecessors == 1) {
+ AddToListForLinearization(&worklist, successor);
+ }
+ forward_predecessors[block_id] = number_of_remaining_predecessors - 1;
+ }
+ } while (!worklist.empty());
+
+ DCHECK(HasIrreducibleLoops() || IsLinearOrderWellFormed(*this));
+}
+
void HLoopInformation::Dump(std::ostream& os) {
os << "header: " << header_->GetBlockId() << std::endl;
os << "pre header: " << GetPreHeader()->GetBlockId() << std::endl;
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 99d7673467..6d207765e3 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -24,7 +24,9 @@
#include "base/arena_bit_vector.h"
#include "base/arena_containers.h"
#include "base/arena_object.h"
+#include "base/array_ref.h"
#include "base/stl_util.h"
+#include "base/transform_array_ref.h"
#include "dex_file.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "handle.h"
@@ -35,9 +37,7 @@
#include "mirror/class.h"
#include "offsets.h"
#include "primitive.h"
-#include "utils/array_ref.h"
#include "utils/intrusive_forward_list.h"
-#include "utils/transform_array_ref.h"
namespace art {
@@ -365,6 +365,13 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
// is a throw-catch loop, i.e. the header is a catch block.
GraphAnalysisResult AnalyzeLoops() const;
+ // Computes the linear order (should be called before using HLinearOrderIterator).
+ // Linearizes the graph such that:
+ // (1): a block is always after its dominator,
+ // (2): blocks of loops are contiguous.
+ // This creates a natural and efficient ordering when visualizing live ranges.
+ void Linearize();
+
// Iterate over blocks to compute try block membership. Needs reverse post
// order and loop information.
void ComputeTryBlockInformation();
@@ -830,7 +837,7 @@ static constexpr uint32_t kInvalidBlockId = static_cast<uint32_t>(-1);
class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
public:
- HBasicBlock(HGraph* graph, uint32_t dex_pc = kNoDexPc)
+ explicit HBasicBlock(HGraph* graph, uint32_t dex_pc = kNoDexPc)
: graph_(graph),
predecessors_(graph->GetArena()->Adapter(kArenaAllocPredecessors)),
successors_(graph->GetArena()->Adapter(kArenaAllocSuccessors)),
@@ -1314,7 +1321,8 @@ class HLoopInformationOutwardIterator : public ValueObject {
#else
#define FOR_EACH_CONCRETE_INSTRUCTION_MIPS(M) \
M(MipsComputeBaseMethodAddress, Instruction) \
- M(MipsDexCacheArraysBase, Instruction)
+ M(MipsDexCacheArraysBase, Instruction) \
+ M(MipsPackedSwitch, Instruction)
#endif
#define FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(M)
@@ -4366,7 +4374,7 @@ class HDiv FINAL : public HBinaryOperation {
HInstruction* left,
HInstruction* right,
uint32_t dex_pc)
- : HBinaryOperation(result_type, left, right, SideEffectsForArchRuntimeCalls(), dex_pc) {}
+ : HBinaryOperation(result_type, left, right, SideEffects::None(), dex_pc) {}
template <typename T>
T ComputeIntegral(T x, T y) const {
@@ -4401,11 +4409,6 @@ class HDiv FINAL : public HBinaryOperation {
ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
}
- static SideEffects SideEffectsForArchRuntimeCalls() {
- // The generated code can use a runtime call.
- return SideEffects::CanTriggerGC();
- }
-
DECLARE_INSTRUCTION(Div);
private:
@@ -4418,7 +4421,7 @@ class HRem FINAL : public HBinaryOperation {
HInstruction* left,
HInstruction* right,
uint32_t dex_pc)
- : HBinaryOperation(result_type, left, right, SideEffectsForArchRuntimeCalls(), dex_pc) {}
+ : HBinaryOperation(result_type, left, right, SideEffects::None(), dex_pc) {}
template <typename T>
T ComputeIntegral(T x, T y) const {
@@ -4453,10 +4456,6 @@ class HRem FINAL : public HBinaryOperation {
ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
}
- static SideEffects SideEffectsForArchRuntimeCalls() {
- return SideEffects::CanTriggerGC();
- }
-
DECLARE_INSTRUCTION(Rem);
private:
@@ -4909,9 +4908,7 @@ class HTypeConversion FINAL : public HExpression<1> {
public:
// Instantiate a type conversion of `input` to `result_type`.
HTypeConversion(Primitive::Type result_type, HInstruction* input, uint32_t dex_pc)
- : HExpression(result_type,
- SideEffectsForArchRuntimeCalls(input->GetType(), result_type),
- dex_pc) {
+ : HExpression(result_type, SideEffects::None(), dex_pc) {
SetRawInputAt(0, input);
// Invariant: We should never generate a conversion to a Boolean value.
DCHECK_NE(Primitive::kPrimBoolean, result_type);
@@ -4930,18 +4927,6 @@ class HTypeConversion FINAL : public HExpression<1> {
// containing the result. If the input cannot be converted, return nullptr.
HConstant* TryStaticEvaluation() const;
- static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type input_type,
- Primitive::Type result_type) {
- // Some architectures may not require the 'GC' side effects, but at this point
- // in the compilation process we do not know what architecture we will
- // generate code for, so we must be conservative.
- if ((Primitive::IsFloatingPointType(input_type) && Primitive::IsIntegralType(result_type))
- || (input_type == Primitive::kPrimLong && Primitive::IsFloatingPointType(result_type))) {
- return SideEffects::CanTriggerGC();
- }
- return SideEffects::None();
- }
-
DECLARE_INSTRUCTION(TypeConversion);
private:
@@ -5023,9 +5008,7 @@ class HInstanceFieldGet FINAL : public HExpression<1> {
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
- : HExpression(field_type,
- SideEffectsForArchRuntimeCalls(field_type, is_volatile),
- dex_pc),
+ : HExpression(field_type, SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc),
field_info_(field_offset,
field_type,
is_volatile,
@@ -5056,16 +5039,6 @@ class HInstanceFieldGet FINAL : public HExpression<1> {
Primitive::Type GetFieldType() const { return field_info_.GetFieldType(); }
bool IsVolatile() const { return field_info_.IsVolatile(); }
- static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type field_type, bool is_volatile) {
- SideEffects side_effects = SideEffects::FieldReadOfType(field_type, is_volatile);
-
- // MIPS delegates volatile kPrimLong and kPrimDouble loads to a runtime helper.
- if (Primitive::Is64BitType(field_type)) {
- side_effects.Add(SideEffects::CanTriggerGC());
- }
- return side_effects;
- }
-
DECLARE_INSTRUCTION(InstanceFieldGet);
private:
@@ -5086,8 +5059,7 @@ class HInstanceFieldSet FINAL : public HTemplateInstruction<2> {
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
- : HTemplateInstruction(SideEffectsForArchRuntimeCalls(field_type, is_volatile),
- dex_pc),
+ : HTemplateInstruction(SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc),
field_info_(field_offset,
field_type,
is_volatile,
@@ -5112,16 +5084,6 @@ class HInstanceFieldSet FINAL : public HTemplateInstruction<2> {
bool GetValueCanBeNull() const { return GetPackedFlag<kFlagValueCanBeNull>(); }
void ClearValueCanBeNull() { SetPackedFlag<kFlagValueCanBeNull>(false); }
- static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type field_type, bool is_volatile) {
- SideEffects side_effects = SideEffects::FieldWriteOfType(field_type, is_volatile);
-
- // MIPS delegates volatile kPrimLong and kPrimDouble stores to a runtime helper.
- if (Primitive::Is64BitType(field_type)) {
- side_effects.Add(SideEffects::CanTriggerGC());
- }
- return side_effects;
- }
-
DECLARE_INSTRUCTION(InstanceFieldSet);
private:
@@ -5926,9 +5888,7 @@ class HStaticFieldGet FINAL : public HExpression<1> {
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
- : HExpression(field_type,
- SideEffectsForArchRuntimeCalls(field_type, is_volatile),
- dex_pc),
+ : HExpression(field_type, SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc),
field_info_(field_offset,
field_type,
is_volatile,
@@ -5956,16 +5916,6 @@ class HStaticFieldGet FINAL : public HExpression<1> {
Primitive::Type GetFieldType() const { return field_info_.GetFieldType(); }
bool IsVolatile() const { return field_info_.IsVolatile(); }
- static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type field_type, bool is_volatile) {
- SideEffects side_effects = SideEffects::FieldReadOfType(field_type, is_volatile);
-
- // MIPS delegates volatile kPrimLong and kPrimDouble loads to a runtime helper.
- if (Primitive::Is64BitType(field_type)) {
- side_effects.Add(SideEffects::CanTriggerGC());
- }
- return side_effects;
- }
-
DECLARE_INSTRUCTION(StaticFieldGet);
private:
@@ -5986,8 +5936,7 @@ class HStaticFieldSet FINAL : public HTemplateInstruction<2> {
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
uint32_t dex_pc)
- : HTemplateInstruction(SideEffectsForArchRuntimeCalls(field_type, is_volatile),
- dex_pc),
+ : HTemplateInstruction(SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc),
field_info_(field_offset,
field_type,
is_volatile,
@@ -6009,16 +5958,6 @@ class HStaticFieldSet FINAL : public HTemplateInstruction<2> {
bool GetValueCanBeNull() const { return GetPackedFlag<kFlagValueCanBeNull>(); }
void ClearValueCanBeNull() { SetPackedFlag<kFlagValueCanBeNull>(false); }
- static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type field_type, bool is_volatile) {
- SideEffects side_effects = SideEffects::FieldWriteOfType(field_type, is_volatile);
-
- // MIPS delegates volatile kPrimLong and kPrimDouble stores to a runtime helper.
- if (Primitive::Is64BitType(field_type)) {
- side_effects.Add(SideEffects::CanTriggerGC());
- }
- return side_effects;
- }
-
DECLARE_INSTRUCTION(StaticFieldSet);
private:
@@ -6274,7 +6213,7 @@ class HInstanceOf FINAL : public HExpression<2> {
class HBoundType FINAL : public HExpression<1> {
public:
- HBoundType(HInstruction* input, uint32_t dex_pc = kNoDexPc)
+ explicit HBoundType(HInstruction* input, uint32_t dex_pc = kNoDexPc)
: HExpression(Primitive::kPrimNot, SideEffects::None(), dex_pc),
upper_bound_(ReferenceTypeInfo::CreateInvalid()) {
SetPackedFlag<kFlagUpperCanBeNull>(true);
diff --git a/compiler/optimizing/nodes_mips.h b/compiler/optimizing/nodes_mips.h
index de77245e17..36431c1fb9 100644
--- a/compiler/optimizing/nodes_mips.h
+++ b/compiler/optimizing/nodes_mips.h
@@ -66,6 +66,41 @@ class HMipsDexCacheArraysBase : public HExpression<0> {
DISALLOW_COPY_AND_ASSIGN(HMipsDexCacheArraysBase);
};
+// Mips version of HPackedSwitch that holds a pointer to the base method address.
+class HMipsPackedSwitch FINAL : public HTemplateInstruction<2> {
+ public:
+ HMipsPackedSwitch(int32_t start_value,
+ int32_t num_entries,
+ HInstruction* input,
+ HMipsComputeBaseMethodAddress* method_base,
+ uint32_t dex_pc)
+ : HTemplateInstruction(SideEffects::None(), dex_pc),
+ start_value_(start_value),
+ num_entries_(num_entries) {
+ SetRawInputAt(0, input);
+ SetRawInputAt(1, method_base);
+ }
+
+ bool IsControlFlow() const OVERRIDE { return true; }
+
+ int32_t GetStartValue() const { return start_value_; }
+
+ int32_t GetNumEntries() const { return num_entries_; }
+
+ HBasicBlock* GetDefaultBlock() const {
+ // Last entry is the default block.
+ return GetBlock()->GetSuccessors()[num_entries_];
+ }
+
+ DECLARE_INSTRUCTION(MipsPackedSwitch);
+
+ private:
+ const int32_t start_value_;
+ const int32_t num_entries_;
+
+ DISALLOW_COPY_AND_ASSIGN(HMipsPackedSwitch);
+};
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_NODES_MIPS_H_
diff --git a/compiler/optimizing/nodes_shared.h b/compiler/optimizing/nodes_shared.h
index 8bd8667f84..814202e97b 100644
--- a/compiler/optimizing/nodes_shared.h
+++ b/compiler/optimizing/nodes_shared.h
@@ -17,6 +17,11 @@
#ifndef ART_COMPILER_OPTIMIZING_NODES_SHARED_H_
#define ART_COMPILER_OPTIMIZING_NODES_SHARED_H_
+// This `#include` should never be used by compilation, as this file (`nodes_shared.h`) is included
+// in `nodes.h`. However it helps editing tools (e.g. YouCompleteMe) by giving them better context
+// (defining `HInstruction` and co).
+#include "nodes.h"
+
namespace art {
class HMultiplyAccumulate FINAL : public HExpression<3> {
@@ -117,10 +122,15 @@ class HBitwiseNegatedRight FINAL : public HBinaryOperation {
// This instruction computes an intermediate address pointing in the 'middle' of an object. The
// result pointer cannot be handled by GC, so extra care is taken to make sure that this value is
// never used across anything that can trigger GC.
+// The result of this instruction is not a pointer in the sense of `Primitive::kPrimNot`. So we
+// represent it by the type `Primitive::kPrimInt`.
class HIntermediateAddress FINAL : public HExpression<2> {
public:
HIntermediateAddress(HInstruction* base_address, HInstruction* offset, uint32_t dex_pc)
- : HExpression(Primitive::kPrimNot, SideEffects::DependsOnGC(), dex_pc) {
+ : HExpression(Primitive::kPrimInt, SideEffects::DependsOnGC(), dex_pc) {
+ DCHECK_EQ(Primitive::ComponentSize(Primitive::kPrimInt),
+ Primitive::ComponentSize(Primitive::kPrimNot))
+ << "kPrimInt and kPrimNot have different sizes.";
SetRawInputAt(0, base_address);
SetRawInputAt(1, offset);
}
diff --git a/compiler/optimizing/pc_relative_fixups_mips.cc b/compiler/optimizing/pc_relative_fixups_mips.cc
index c6d297df4f..6006e6cf5d 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.cc
+++ b/compiler/optimizing/pc_relative_fixups_mips.cc
@@ -92,6 +92,25 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
}
}
+ void VisitPackedSwitch(HPackedSwitch* switch_insn) OVERRIDE {
+ if (switch_insn->GetNumEntries() <=
+ InstructionCodeGeneratorMIPS::kPackedSwitchJumpTableThreshold) {
+ return;
+ }
+ // We need to replace the HPackedSwitch with a HMipsPackedSwitch in order to
+ // address the constant area.
+ InitializePCRelativeBasePointer();
+ HGraph* graph = GetGraph();
+ HBasicBlock* block = switch_insn->GetBlock();
+ HMipsPackedSwitch* mips_switch = new (graph->GetArena()) HMipsPackedSwitch(
+ switch_insn->GetStartValue(),
+ switch_insn->GetNumEntries(),
+ switch_insn->InputAt(0),
+ base_,
+ switch_insn->GetDexPc());
+ block->ReplaceAndRemoveInstructionWith(switch_insn, mips_switch);
+ }
+
void HandleInvoke(HInvoke* invoke) {
// If this is an invoke-static/-direct with PC-relative dex cache array
// addressing, we need the PC-relative address base.
diff --git a/compiler/optimizing/register_allocation_resolver.h b/compiler/optimizing/register_allocation_resolver.h
index a70ceae076..d48b1a0bb9 100644
--- a/compiler/optimizing/register_allocation_resolver.h
+++ b/compiler/optimizing/register_allocation_resolver.h
@@ -18,9 +18,9 @@
#define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATION_RESOLVER_H_
#include "base/arena_containers.h"
+#include "base/array_ref.h"
#include "base/value_object.h"
#include "primitive.h"
-#include "utils/array_ref.h"
namespace art {
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index b8e1379ef9..e64c005410 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -157,20 +157,11 @@ void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
}
void HSharpening::ProcessLoadClass(HLoadClass* load_class) {
- if (load_class->NeedsAccessCheck()) {
- // We need to call the runtime anyway, so we simply get the class as that call's return value.
- return;
- }
- if (load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass) {
- // Loading from the ArtMethod* is the most efficient retrieval.
- // TODO: This may not actually be true for all architectures and
- // locations of target classes. The additional register pressure
- // for using the ArtMethod* should be considered.
- return;
- }
-
- DCHECK_EQ(load_class->GetLoadKind(), HLoadClass::LoadKind::kDexCacheViaMethod);
+ DCHECK(load_class->GetLoadKind() == HLoadClass::LoadKind::kDexCacheViaMethod ||
+ load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass)
+ << load_class->GetLoadKind();
DCHECK(!load_class->IsInDexCache()) << "HLoadClass should not be optimized before sharpening.";
+ DCHECK(!load_class->IsInBootImage()) << "HLoadClass should not be optimized before sharpening.";
const DexFile& dex_file = load_class->GetDexFile();
uint32_t type_index = load_class->GetTypeIndex();
@@ -242,13 +233,28 @@ void HSharpening::ProcessLoadClass(HLoadClass* load_class) {
}
}
}
- if (is_in_dex_cache) {
- load_class->MarkInDexCache();
- }
+
if (is_in_boot_image) {
load_class->MarkInBootImage();
}
+ if (load_class->NeedsAccessCheck()) {
+ // We need to call the runtime anyway, so we simply get the class as that call's return value.
+ return;
+ }
+
+ if (load_class->GetLoadKind() == HLoadClass::LoadKind::kReferrersClass) {
+ // Loading from the ArtMethod* is the most efficient retrieval in code size.
+ // TODO: This may not actually be true for all architectures and
+ // locations of target classes. The additional register pressure
+ // for using the ArtMethod* should be considered.
+ return;
+ }
+
+ if (is_in_dex_cache) {
+ load_class->MarkInDexCache();
+ }
+
HLoadClass::LoadKind load_kind = codegen_->GetSupportedLoadClassKind(desired_load_kind);
switch (load_kind) {
case HLoadClass::LoadKind::kBootImageLinkTimeAddress:
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index a4d52d7761..9ce34aa80b 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -23,119 +23,11 @@
namespace art {
void SsaLivenessAnalysis::Analyze() {
- LinearizeGraph();
+ graph_->Linearize();
NumberInstructions();
ComputeLiveness();
}
-static bool IsLoop(HLoopInformation* info) {
- return info != nullptr;
-}
-
-static bool InSameLoop(HLoopInformation* first_loop, HLoopInformation* second_loop) {
- return first_loop == second_loop;
-}
-
-static bool IsInnerLoop(HLoopInformation* outer, HLoopInformation* inner) {
- return (inner != outer)
- && (inner != nullptr)
- && (outer != nullptr)
- && inner->IsIn(*outer);
-}
-
-static void AddToListForLinearization(ArenaVector<HBasicBlock*>* worklist, HBasicBlock* block) {
- HLoopInformation* block_loop = block->GetLoopInformation();
- auto insert_pos = worklist->rbegin(); // insert_pos.base() will be the actual position.
- for (auto end = worklist->rend(); insert_pos != end; ++insert_pos) {
- HBasicBlock* current = *insert_pos;
- HLoopInformation* current_loop = current->GetLoopInformation();
- if (InSameLoop(block_loop, current_loop)
- || !IsLoop(current_loop)
- || IsInnerLoop(current_loop, block_loop)) {
- // The block can be processed immediately.
- break;
- }
- }
- worklist->insert(insert_pos.base(), block);
-}
-
-static bool IsLinearOrderWellFormed(const HGraph& graph) {
- for (HBasicBlock* header : graph.GetBlocks()) {
- if (header == nullptr || !header->IsLoopHeader()) {
- continue;
- }
-
- HLoopInformation* loop = header->GetLoopInformation();
- size_t num_blocks = loop->GetBlocks().NumSetBits();
- size_t found_blocks = 0u;
-
- for (HLinearOrderIterator it(graph); !it.Done(); it.Advance()) {
- HBasicBlock* current = it.Current();
- if (loop->Contains(*current)) {
- found_blocks++;
- if (found_blocks == 1u && current != header) {
- // First block is not the header.
- return false;
- } else if (found_blocks == num_blocks && !loop->IsBackEdge(*current)) {
- // Last block is not a back edge.
- return false;
- }
- } else if (found_blocks != 0u && found_blocks != num_blocks) {
- // Blocks are not adjacent.
- return false;
- }
- }
- DCHECK_EQ(found_blocks, num_blocks);
- }
-
- return true;
-}
-
-void SsaLivenessAnalysis::LinearizeGraph() {
- // Create a reverse post ordering with the following properties:
- // - Blocks in a loop are consecutive,
- // - Back-edge is the last block before loop exits.
-
- // (1): Record the number of forward predecessors for each block. This is to
- // ensure the resulting order is reverse post order. We could use the
- // current reverse post order in the graph, but it would require making
- // order queries to a GrowableArray, which is not the best data structure
- // for it.
- ArenaVector<uint32_t> forward_predecessors(graph_->GetBlocks().size(),
- graph_->GetArena()->Adapter(kArenaAllocSsaLiveness));
- for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
- size_t number_of_forward_predecessors = block->GetPredecessors().size();
- if (block->IsLoopHeader()) {
- number_of_forward_predecessors -= block->GetLoopInformation()->NumberOfBackEdges();
- }
- forward_predecessors[block->GetBlockId()] = number_of_forward_predecessors;
- }
-
- // (2): Following a worklist approach, first start with the entry block, and
- // iterate over the successors. When all non-back edge predecessors of a
- // successor block are visited, the successor block is added in the worklist
- // following an order that satisfies the requirements to build our linear graph.
- graph_->linear_order_.reserve(graph_->GetReversePostOrder().size());
- ArenaVector<HBasicBlock*> worklist(graph_->GetArena()->Adapter(kArenaAllocSsaLiveness));
- worklist.push_back(graph_->GetEntryBlock());
- do {
- HBasicBlock* current = worklist.back();
- worklist.pop_back();
- graph_->linear_order_.push_back(current);
- for (HBasicBlock* successor : current->GetSuccessors()) {
- int block_id = successor->GetBlockId();
- size_t number_of_remaining_predecessors = forward_predecessors[block_id];
- if (number_of_remaining_predecessors == 1) {
- AddToListForLinearization(&worklist, successor);
- }
- forward_predecessors[block_id] = number_of_remaining_predecessors - 1;
- }
- } while (!worklist.empty());
-
- DCHECK(graph_->HasIrreducibleLoops() || IsLinearOrderWellFormed(*graph_));
-}
-
void SsaLivenessAnalysis::NumberInstructions() {
int ssa_index = 0;
size_t lifetime_position = 0;
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 0be16118b1..b62bf4e5f9 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -1186,12 +1186,6 @@ class SsaLivenessAnalysis : public ValueObject {
static constexpr const char* kLivenessPassName = "liveness";
private:
- // Linearize the graph so that:
- // (1): a block is always after its dominator,
- // (2): blocks of loops are contiguous.
- // This creates a natural and efficient ordering when visualizing live ranges.
- void LinearizeGraph();
-
// Give an SSA number to each instruction that defines a value used by another instruction,
// and setup the lifetime information of each instruction and block.
void NumberInstructions();
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index 3084e6e2b6..ee5811c3c0 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -246,7 +246,7 @@ class Address : public ValueObject {
NegPostIndex = (0|0|0) << 21 // negative post-indexed with writeback
};
- Address(Register rn, int32_t offset = 0, Mode am = Offset) : rn_(rn), rm_(R0),
+ explicit Address(Register rn, int32_t offset = 0, Mode am = Offset) : rn_(rn), rm_(R0),
offset_(offset),
am_(am), is_immed_offset_(true), shift_(LSL) {
}
@@ -763,6 +763,9 @@ class ArmAssembler : public Assembler {
virtual void PushList(RegList regs, Condition cond = AL) = 0;
virtual void PopList(RegList regs, Condition cond = AL) = 0;
+ virtual void StoreList(RegList regs, size_t stack_offset) = 0;
+ virtual void LoadList(RegList regs, size_t stack_offset) = 0;
+
virtual void Mov(Register rd, Register rm, Condition cond = AL) = 0;
// Convenience shift instructions. Use mov instruction with shifter operand
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index ebdfc98554..2269ba2d20 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -2018,6 +2018,45 @@ inline size_t Thumb2Assembler::Fixup::IncreaseSize(Size new_size) {
return adjustment;
}
+bool Thumb2Assembler::Fixup::IsCandidateForEmitEarly() const {
+ DCHECK(size_ == original_size_);
+ if (target_ == kUnresolved) {
+ return false;
+ }
+ // GetOffset() does not depend on current_code_size for branches, only for literals.
+ constexpr uint32_t current_code_size = 0u;
+ switch (GetSize()) {
+ case kBranch16Bit:
+ return IsInt(cond_ != AL ? 9 : 12, GetOffset(current_code_size));
+ case kBranch32Bit:
+ // We don't support conditional branches beyond +-1MiB
+ // or unconditional branches beyond +-16MiB.
+ return true;
+
+ case kCbxz16Bit:
+ return IsUint<7>(GetOffset(current_code_size));
+ case kCbxz32Bit:
+ return IsInt<9>(GetOffset(current_code_size));
+ case kCbxz48Bit:
+ // We don't support conditional branches beyond +-1MiB.
+ return true;
+
+ case kLiteral1KiB:
+ case kLiteral4KiB:
+ case kLiteral64KiB:
+ case kLiteral1MiB:
+ case kLiteralFar:
+ case kLiteralAddr1KiB:
+ case kLiteralAddr4KiB:
+ case kLiteralAddr64KiB:
+ case kLiteralAddrFar:
+ case kLongOrFPLiteral1KiB:
+ case kLongOrFPLiteral64KiB:
+ case kLongOrFPLiteralFar:
+ return false;
+ }
+}
+
uint32_t Thumb2Assembler::Fixup::AdjustSizeIfNeeded(uint32_t current_code_size) {
uint32_t old_code_size = current_code_size;
switch (GetSize()) {
@@ -3333,6 +3372,30 @@ void Thumb2Assembler::PopList(RegList regs, Condition cond) {
ldm(IA_W, SP, regs, cond);
}
+void Thumb2Assembler::StoreList(RegList regs, size_t stack_offset) {
+ DCHECK_NE(regs, 0u);
+ DCHECK_EQ(regs & (1u << IP), 0u);
+ if (IsPowerOfTwo(regs)) {
+ Register reg = static_cast<Register>(CTZ(static_cast<uint32_t>(regs)));
+ str(reg, Address(SP, stack_offset));
+ } else {
+ add(IP, SP, ShifterOperand(stack_offset));
+ stm(IA, IP, regs);
+ }
+}
+
+void Thumb2Assembler::LoadList(RegList regs, size_t stack_offset) {
+ DCHECK_NE(regs, 0u);
+ DCHECK_EQ(regs & (1u << IP), 0u);
+ if (IsPowerOfTwo(regs)) {
+ Register reg = static_cast<Register>(CTZ(static_cast<uint32_t>(regs)));
+ ldr(reg, Address(SP, stack_offset));
+ } else {
+ Register lowest_reg = static_cast<Register>(CTZ(static_cast<uint32_t>(regs)));
+ add(lowest_reg, SP, ShifterOperand(stack_offset));
+ ldm(IA, lowest_reg, regs);
+ }
+}
void Thumb2Assembler::Mov(Register rd, Register rm, Condition cond) {
if (cond != AL || rd != rm) {
@@ -3343,6 +3406,30 @@ void Thumb2Assembler::Mov(Register rd, Register rm, Condition cond) {
void Thumb2Assembler::Bind(Label* label) {
BindLabel(label, buffer_.Size());
+
+ // Try to emit some Fixups now to reduce the memory needed during the branch fixup later.
+ while (!fixups_.empty() && fixups_.back().IsCandidateForEmitEarly()) {
+ const Fixup& last_fixup = fixups_.back();
+ // Fixups are ordered by location, so the candidate can surely be emitted if it is
+ // a forward branch. If it's a backward branch, it may go over any number of other
+ // fixups. We could check for any number of emit early candidates but we want this
+ // heuristics to be quick, so check just one.
+ uint32_t target = last_fixup.GetTarget();
+ if (target < last_fixup.GetLocation() &&
+ fixups_.size() >= 2u &&
+ fixups_[fixups_.size() - 2u].GetLocation() >= target) {
+ const Fixup& prev_fixup = fixups_[fixups_.size() - 2u];
+ if (!prev_fixup.IsCandidateForEmitEarly()) {
+ break;
+ }
+ uint32_t min_target = std::min(target, prev_fixup.GetTarget());
+ if (fixups_.size() >= 3u && fixups_[fixups_.size() - 3u].GetLocation() >= min_target) {
+ break;
+ }
+ }
+ last_fixup.Emit(&buffer_, buffer_.Size());
+ fixups_.pop_back();
+ }
}
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index 13f3becb6d..1c495aa7a7 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -22,11 +22,11 @@
#include <vector>
#include "base/arena_containers.h"
+#include "base/array_ref.h"
#include "base/logging.h"
#include "constants_arm.h"
#include "utils/arm/managed_register_arm.h"
#include "utils/arm/assembler_arm.h"
-#include "utils/array_ref.h"
#include "offsets.h"
namespace art {
@@ -293,6 +293,8 @@ class Thumb2Assembler FINAL : public ArmAssembler {
void PushList(RegList regs, Condition cond = AL) OVERRIDE;
void PopList(RegList regs, Condition cond = AL) OVERRIDE;
+ void StoreList(RegList regs, size_t stack_offset) OVERRIDE;
+ void LoadList(RegList regs, size_t stack_offset) OVERRIDE;
void Mov(Register rd, Register rm, Condition cond = AL) OVERRIDE;
@@ -573,6 +575,10 @@ class Thumb2Assembler FINAL : public ArmAssembler {
return location_;
}
+ uint32_t GetTarget() const {
+ return target_;
+ }
+
uint32_t GetAdjustment() const {
return adjustment_;
}
@@ -592,6 +598,11 @@ class Thumb2Assembler FINAL : public ArmAssembler {
target_ = target;
}
+ // Branches with bound targets that are in range can be emitted early.
+ // However, the caller still needs to check if the branch doesn't go over
+ // another Fixup that's not ready to be emitted.
+ bool IsCandidateForEmitEarly() const;
+
// Check if the current size is OK for current location_, target_ and adjustment_.
// If not, increase the size. Return the size increase, 0 if unchanged.
// If the target if after this Fixup, also add the difference to adjustment_,
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
index a03dd74657..14d29c4f1a 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -314,11 +314,21 @@ void ArmVIXLJNIMacroAssembler::Move(ManagedRegister m_dst,
CHECK(src.IsCoreRegister()) << src;
___ Mov(dst.AsVIXLRegister(), src.AsVIXLRegister());
} else if (dst.IsDRegister()) {
- CHECK(src.IsDRegister()) << src;
- ___ Vmov(F64, dst.AsVIXLDRegister(), src.AsVIXLDRegister());
+ if (src.IsDRegister()) {
+ ___ Vmov(F64, dst.AsVIXLDRegister(), src.AsVIXLDRegister());
+ } else {
+ // VMOV Dn, Rlo, Rhi (Dn = {Rlo, Rhi})
+ CHECK(src.IsRegisterPair()) << src;
+ ___ Vmov(dst.AsVIXLDRegister(), src.AsVIXLRegisterPairLow(), src.AsVIXLRegisterPairHigh());
+ }
} else if (dst.IsSRegister()) {
- CHECK(src.IsSRegister()) << src;
- ___ Vmov(F32, dst.AsVIXLSRegister(), src.AsVIXLSRegister());
+ if (src.IsSRegister()) {
+ ___ Vmov(F32, dst.AsVIXLSRegister(), src.AsVIXLSRegister());
+ } else {
+ // VMOV Sn, Rn (Sn = Rn)
+ CHECK(src.IsCoreRegister()) << src;
+ ___ Vmov(dst.AsVIXLSRegister(), src.AsVIXLRegister());
+ }
} else {
CHECK(dst.IsRegisterPair()) << dst;
CHECK(src.IsRegisterPair()) << src;
diff --git a/compiler/utils/array_ref.h b/compiler/utils/array_ref.h
deleted file mode 100644
index 8dc9ab4a5e..0000000000
--- a/compiler/utils/array_ref.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_UTILS_ARRAY_REF_H_
-#define ART_COMPILER_UTILS_ARRAY_REF_H_
-
-#include <type_traits>
-#include <vector>
-
-#include "base/logging.h"
-
-namespace art {
-
-/**
- * @brief A container that references an array.
- *
- * @details The template class ArrayRef provides a container that references
- * an external array. This external array must remain alive while the ArrayRef
- * object is in use. The external array may be a std::vector<>-backed storage
- * or any other contiguous chunk of memory but that memory must remain valid,
- * i.e. the std::vector<> must not be resized for example.
- *
- * Except for copy/assign and insert/erase/capacity functions, the interface
- * is essentially the same as std::vector<>. Since we don't want to throw
- * exceptions, at() is also excluded.
- */
-template <typename T>
-class ArrayRef {
- public:
- typedef T value_type;
- typedef T& reference;
- typedef const T& const_reference;
- typedef T* pointer;
- typedef const T* const_pointer;
- typedef T* iterator;
- typedef const T* const_iterator;
- typedef std::reverse_iterator<iterator> reverse_iterator;
- typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
- typedef ptrdiff_t difference_type;
- typedef size_t size_type;
-
- // Constructors.
-
- constexpr ArrayRef()
- : array_(nullptr), size_(0u) {
- }
-
- template <size_t size>
- explicit constexpr ArrayRef(T (&array)[size])
- : array_(array), size_(size) {
- }
-
- template <typename U,
- size_t size,
- typename = typename std::enable_if<std::is_same<T, const U>::value>::type>
- explicit constexpr ArrayRef(U (&array)[size])
- : array_(array), size_(size) {
- }
-
- constexpr ArrayRef(T* array_in, size_t size_in)
- : array_(array_in), size_(size_in) {
- }
-
- template <typename Vector,
- typename = typename std::enable_if<
- std::is_same<typename Vector::value_type, value_type>::value>::type>
- explicit ArrayRef(Vector& v)
- : array_(v.data()), size_(v.size()) {
- }
-
- template <typename Vector,
- typename = typename std::enable_if<
- std::is_same<
- typename std::add_const<typename Vector::value_type>::type,
- value_type>::value>::type>
- explicit ArrayRef(const Vector& v)
- : array_(v.data()), size_(v.size()) {
- }
-
- ArrayRef(const ArrayRef&) = default;
-
- // Assignment operators.
-
- ArrayRef& operator=(const ArrayRef& other) {
- array_ = other.array_;
- size_ = other.size_;
- return *this;
- }
-
- template <typename U>
- typename std::enable_if<std::is_same<T, const U>::value, ArrayRef>::type&
- operator=(const ArrayRef<U>& other) {
- return *this = ArrayRef(other);
- }
-
- // Destructor.
- ~ArrayRef() = default;
-
- // Iterators.
- iterator begin() { return array_; }
- const_iterator begin() const { return array_; }
- const_iterator cbegin() const { return array_; }
- iterator end() { return array_ + size_; }
- const_iterator end() const { return array_ + size_; }
- const_iterator cend() const { return array_ + size_; }
- reverse_iterator rbegin() { return reverse_iterator(end()); }
- const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); }
- const_reverse_iterator crbegin() const { return const_reverse_iterator(cend()); }
- reverse_iterator rend() { return reverse_iterator(begin()); }
- const_reverse_iterator rend() const { return const_reverse_iterator(begin()); }
- const_reverse_iterator crend() const { return const_reverse_iterator(cbegin()); }
-
- // Size.
- size_type size() const { return size_; }
- bool empty() const { return size() == 0u; }
-
- // Element access. NOTE: Not providing at().
-
- reference operator[](size_type n) {
- DCHECK_LT(n, size_);
- return array_[n];
- }
-
- const_reference operator[](size_type n) const {
- DCHECK_LT(n, size_);
- return array_[n];
- }
-
- reference front() {
- DCHECK_NE(size_, 0u);
- return array_[0];
- }
-
- const_reference front() const {
- DCHECK_NE(size_, 0u);
- return array_[0];
- }
-
- reference back() {
- DCHECK_NE(size_, 0u);
- return array_[size_ - 1u];
- }
-
- const_reference back() const {
- DCHECK_NE(size_, 0u);
- return array_[size_ - 1u];
- }
-
- value_type* data() { return array_; }
- const value_type* data() const { return array_; }
-
- ArrayRef SubArray(size_type pos) {
- return SubArray(pos, size() - pos);
- }
- ArrayRef<const T> SubArray(size_type pos) const {
- return SubArray(pos, size() - pos);
- }
- ArrayRef SubArray(size_type pos, size_type length) {
- DCHECK_LE(pos, size());
- DCHECK_LE(length, size() - pos);
- return ArrayRef(data() + pos, length);
- }
- ArrayRef<const T> SubArray(size_type pos, size_type length) const {
- DCHECK_LE(pos, size());
- DCHECK_LE(length, size() - pos);
- return ArrayRef<const T>(data() + pos, length);
- }
-
- private:
- T* array_;
- size_t size_;
-};
-
-template <typename T>
-bool operator==(const ArrayRef<T>& lhs, const ArrayRef<T>& rhs) {
- return lhs.size() == rhs.size() && std::equal(lhs.begin(), lhs.end(), rhs.begin());
-}
-
-template <typename T>
-bool operator!=(const ArrayRef<T>& lhs, const ArrayRef<T>& rhs) {
- return !(lhs == rhs);
-}
-
-} // namespace art
-
-
-#endif // ART_COMPILER_UTILS_ARRAY_REF_H_
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index b616057e79..314ff8cf7a 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -24,6 +24,7 @@
#include "arm/constants_arm.h"
#include "base/arena_allocator.h"
#include "base/arena_object.h"
+#include "base/array_ref.h"
#include "base/enums.h"
#include "base/logging.h"
#include "base/macros.h"
@@ -33,7 +34,6 @@
#include "memory_region.h"
#include "mips/constants_mips.h"
#include "offsets.h"
-#include "utils/array_ref.h"
#include "x86/constants_x86.h"
#include "x86_64/constants_x86_64.h"
diff --git a/compiler/utils/dedupe_set_test.cc b/compiler/utils/dedupe_set_test.cc
index 60a891d6a2..4c0979e0b7 100644
--- a/compiler/utils/dedupe_set_test.cc
+++ b/compiler/utils/dedupe_set_test.cc
@@ -20,10 +20,10 @@
#include <cstdio>
#include <vector>
+#include "base/array_ref.h"
#include "dedupe_set-inl.h"
#include "gtest/gtest.h"
#include "thread-inl.h"
-#include "utils/array_ref.h"
namespace art {
diff --git a/compiler/utils/intrusive_forward_list.h b/compiler/utils/intrusive_forward_list.h
index ec2c08722c..b5fc2f2456 100644
--- a/compiler/utils/intrusive_forward_list.h
+++ b/compiler/utils/intrusive_forward_list.h
@@ -59,7 +59,7 @@ class IntrusiveForwardListIterator : public std::iterator<std::forward_iterator_
// Conversion from iterator to const_iterator.
template <typename OtherT,
typename = typename std::enable_if<std::is_same<T, const OtherT>::value>::type>
- IntrusiveForwardListIterator(const IntrusiveForwardListIterator<OtherT, HookTraits>& src)
+ IntrusiveForwardListIterator(const IntrusiveForwardListIterator<OtherT, HookTraits>& src) // NOLINT, implicit
: hook_(src.hook_) { }
// Iteration.
diff --git a/compiler/utils/jni_macro_assembler.h b/compiler/utils/jni_macro_assembler.h
index 6f45bd62db..0119ae9bfb 100644
--- a/compiler/utils/jni_macro_assembler.h
+++ b/compiler/utils/jni_macro_assembler.h
@@ -22,12 +22,12 @@
#include "arch/instruction_set.h"
#include "base/arena_allocator.h"
#include "base/arena_object.h"
+#include "base/array_ref.h"
#include "base/enums.h"
#include "base/logging.h"
#include "base/macros.h"
#include "managed_register.h"
#include "offsets.h"
-#include "utils/array_ref.h"
namespace art {
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index 4b580b620f..b972c70eb9 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -230,12 +230,14 @@ void MipsAssembler::FinalizeCode() {
DsFsmCommitLabel();
SetReorder(false);
EmitLiterals();
+ ReserveJumpTableSpace();
PromoteBranches();
}
void MipsAssembler::FinalizeInstructions(const MemoryRegion& region) {
size_t number_of_delayed_adjust_pcs = cfi().NumberOfDelayedAdvancePCs();
EmitBranches();
+ EmitJumpTables();
Assembler::FinalizeInstructions(region);
PatchCFI(number_of_delayed_adjust_pcs);
}
@@ -1724,47 +1726,68 @@ void MipsAssembler::Branch::InitShortOrLong(MipsAssembler::Branch::OffsetBits of
type_ = (offset_size <= branch_info_[short_type].offset_size) ? short_type : long_type;
}
-void MipsAssembler::Branch::InitializeType(bool is_call, bool is_literal, bool is_r6) {
- CHECK_EQ(is_call && is_literal, false);
+void MipsAssembler::Branch::InitializeType(Type initial_type, bool is_r6) {
OffsetBits offset_size = GetOffsetSizeNeeded(location_, target_);
if (is_r6) {
// R6
- if (is_literal) {
- CHECK(!IsResolved());
- type_ = kR6Literal;
- } else if (is_call) {
- InitShortOrLong(offset_size, kR6Call, kR6LongCall);
- } else {
- switch (condition_) {
- case kUncond:
- InitShortOrLong(offset_size, kR6UncondBranch, kR6LongUncondBranch);
- break;
- case kCondEQZ:
- case kCondNEZ:
- // Special case for beqzc/bnezc with longer offset than in other b<cond>c instructions.
- type_ = (offset_size <= kOffset23) ? kR6CondBranch : kR6LongCondBranch;
- break;
- default:
- InitShortOrLong(offset_size, kR6CondBranch, kR6LongCondBranch);
- break;
- }
+ switch (initial_type) {
+ case kLabel:
+ CHECK(!IsResolved());
+ type_ = kR6Label;
+ break;
+ case kLiteral:
+ CHECK(!IsResolved());
+ type_ = kR6Literal;
+ break;
+ case kCall:
+ InitShortOrLong(offset_size, kR6Call, kR6LongCall);
+ break;
+ case kCondBranch:
+ switch (condition_) {
+ case kUncond:
+ InitShortOrLong(offset_size, kR6UncondBranch, kR6LongUncondBranch);
+ break;
+ case kCondEQZ:
+ case kCondNEZ:
+ // Special case for beqzc/bnezc with longer offset than in other b<cond>c instructions.
+ type_ = (offset_size <= kOffset23) ? kR6CondBranch : kR6LongCondBranch;
+ break;
+ default:
+ InitShortOrLong(offset_size, kR6CondBranch, kR6LongCondBranch);
+ break;
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unexpected branch type " << initial_type;
+ UNREACHABLE();
}
} else {
// R2
- if (is_literal) {
- CHECK(!IsResolved());
- type_ = kLiteral;
- } else if (is_call) {
- InitShortOrLong(offset_size, kCall, kLongCall);
- } else {
- switch (condition_) {
- case kUncond:
- InitShortOrLong(offset_size, kUncondBranch, kLongUncondBranch);
- break;
- default:
- InitShortOrLong(offset_size, kCondBranch, kLongCondBranch);
- break;
- }
+ switch (initial_type) {
+ case kLabel:
+ CHECK(!IsResolved());
+ type_ = kLabel;
+ break;
+ case kLiteral:
+ CHECK(!IsResolved());
+ type_ = kLiteral;
+ break;
+ case kCall:
+ InitShortOrLong(offset_size, kCall, kLongCall);
+ break;
+ case kCondBranch:
+ switch (condition_) {
+ case kUncond:
+ InitShortOrLong(offset_size, kUncondBranch, kLongUncondBranch);
+ break;
+ default:
+ InitShortOrLong(offset_size, kCondBranch, kLongCondBranch);
+ break;
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unexpected branch type " << initial_type;
+ UNREACHABLE();
}
}
old_type_ = type_;
@@ -1804,7 +1827,7 @@ MipsAssembler::Branch::Branch(bool is_r6, uint32_t location, uint32_t target, bo
rhs_reg_(0),
condition_(kUncond),
delayed_instruction_(kUnfilledDelaySlot) {
- InitializeType(is_call, /* is_literal */ false, is_r6);
+ InitializeType((is_call ? kCall : kCondBranch), is_r6);
}
MipsAssembler::Branch::Branch(bool is_r6,
@@ -1862,10 +1885,14 @@ MipsAssembler::Branch::Branch(bool is_r6,
// Branch condition is always true, make the branch unconditional.
condition_ = kUncond;
}
- InitializeType(/* is_call */ false, /* is_literal */ false, is_r6);
+ InitializeType(kCondBranch, is_r6);
}
-MipsAssembler::Branch::Branch(bool is_r6, uint32_t location, Register dest_reg, Register base_reg)
+MipsAssembler::Branch::Branch(bool is_r6,
+ uint32_t location,
+ Register dest_reg,
+ Register base_reg,
+ Type label_or_literal_type)
: old_location_(location),
location_(location),
target_(kUnresolved),
@@ -1879,7 +1906,7 @@ MipsAssembler::Branch::Branch(bool is_r6, uint32_t location, Register dest_reg,
} else {
CHECK_NE(base_reg, ZERO);
}
- InitializeType(/* is_call */ false, /* is_literal */ true, is_r6);
+ InitializeType(label_or_literal_type, is_r6);
}
MipsAssembler::BranchCondition MipsAssembler::Branch::OppositeCondition(
@@ -2007,12 +2034,16 @@ bool MipsAssembler::Branch::IsLong() const {
case kUncondBranch:
case kCondBranch:
case kCall:
+ // R2 near label.
+ case kLabel:
// R2 near literal.
case kLiteral:
// R6 short branches.
case kR6UncondBranch:
case kR6CondBranch:
case kR6Call:
+ // R6 near label.
+ case kR6Label:
// R6 near literal.
case kR6Literal:
return false;
@@ -2020,12 +2051,16 @@ bool MipsAssembler::Branch::IsLong() const {
case kLongUncondBranch:
case kLongCondBranch:
case kLongCall:
+ // R2 far label.
+ case kFarLabel:
// R2 far literal.
case kFarLiteral:
// R6 long branches.
case kR6LongUncondBranch:
case kR6LongCondBranch:
case kR6LongCall:
+ // R6 far label.
+ case kR6FarLabel:
// R6 far literal.
case kR6FarLiteral:
return true;
@@ -2096,6 +2131,10 @@ void MipsAssembler::Branch::PromoteToLong() {
case kCall:
type_ = kLongCall;
break;
+ // R2 near label.
+ case kLabel:
+ type_ = kFarLabel;
+ break;
// R2 near literal.
case kLiteral:
type_ = kFarLiteral;
@@ -2110,6 +2149,10 @@ void MipsAssembler::Branch::PromoteToLong() {
case kR6Call:
type_ = kR6LongCall;
break;
+ // R6 near label.
+ case kR6Label:
+ type_ = kR6FarLabel;
+ break;
// R6 near literal.
case kR6Literal:
type_ = kR6FarLiteral;
@@ -2123,6 +2166,8 @@ void MipsAssembler::Branch::PromoteToLong() {
uint32_t MipsAssembler::GetBranchLocationOrPcRelBase(const MipsAssembler::Branch* branch) const {
switch (branch->GetType()) {
+ case Branch::kLabel:
+ case Branch::kFarLabel:
case Branch::kLiteral:
case Branch::kFarLiteral:
return GetLabelLocation(&pc_rel_base_label_);
@@ -2132,7 +2177,7 @@ uint32_t MipsAssembler::GetBranchLocationOrPcRelBase(const MipsAssembler::Branch
}
uint32_t MipsAssembler::Branch::PromoteIfNeeded(uint32_t location, uint32_t max_short_distance) {
- // `location` is either `GetLabelLocation(&pc_rel_base_label_)` for R2 literals or
+ // `location` is either `GetLabelLocation(&pc_rel_base_label_)` for R2 labels/literals or
// `this->GetLocation()` for everything else.
// If the branch is still unresolved or already long, nothing to do.
if (IsLong() || !IsResolved()) {
@@ -2170,6 +2215,8 @@ uint32_t MipsAssembler::Branch::GetOffsetLocation() const {
uint32_t MipsAssembler::GetBranchOrPcRelBaseForEncoding(const MipsAssembler::Branch* branch) const {
switch (branch->GetType()) {
+ case Branch::kLabel:
+ case Branch::kFarLabel:
case Branch::kLiteral:
case Branch::kFarLiteral:
return GetLabelLocation(&pc_rel_base_label_);
@@ -2180,7 +2227,7 @@ uint32_t MipsAssembler::GetBranchOrPcRelBaseForEncoding(const MipsAssembler::Bra
}
uint32_t MipsAssembler::Branch::GetOffset(uint32_t location) const {
- // `location` is either `GetLabelLocation(&pc_rel_base_label_)` for R2 literals or
+ // `location` is either `GetLabelLocation(&pc_rel_base_label_)` for R2 labels/literals or
// `this->GetOffsetLocation() + branch_info_[this->GetType()].pc_org * sizeof(uint32_t)`
// for everything else.
CHECK(IsResolved());
@@ -2457,6 +2504,13 @@ void MipsAssembler::Call(MipsLabel* label) {
FinalizeLabeledBranch(label);
}
+void MipsAssembler::LoadLabelAddress(Register dest_reg, Register base_reg, MipsLabel* label) {
+ // Label address loads are treated as pseudo branches since they require very similar handling.
+ DCHECK(!label->IsBound());
+ branches_.emplace_back(IsR6(), buffer_.Size(), dest_reg, base_reg, Branch::kLabel);
+ FinalizeLabeledBranch(label);
+}
+
Literal* MipsAssembler::NewLiteral(size_t size, const uint8_t* data) {
DCHECK(size == 4u || size == 8u) << size;
literals_.emplace_back(size, data);
@@ -2468,13 +2522,17 @@ void MipsAssembler::LoadLiteral(Register dest_reg, Register base_reg, Literal* l
DCHECK_EQ(literal->GetSize(), 4u);
MipsLabel* label = literal->GetLabel();
DCHECK(!label->IsBound());
- branches_.emplace_back(IsR6(),
- buffer_.Size(),
- dest_reg,
- base_reg);
+ branches_.emplace_back(IsR6(), buffer_.Size(), dest_reg, base_reg, Branch::kLiteral);
FinalizeLabeledBranch(label);
}
+JumpTable* MipsAssembler::CreateJumpTable(std::vector<MipsLabel*>&& labels) {
+ jump_tables_.emplace_back(std::move(labels));
+ JumpTable* table = &jump_tables_.back();
+ DCHECK(!table->GetLabel()->IsBound());
+ return table;
+}
+
void MipsAssembler::EmitLiterals() {
if (!literals_.empty()) {
// We don't support byte and half-word literals.
@@ -2491,6 +2549,60 @@ void MipsAssembler::EmitLiterals() {
}
}
+void MipsAssembler::ReserveJumpTableSpace() {
+ if (!jump_tables_.empty()) {
+ for (JumpTable& table : jump_tables_) {
+ MipsLabel* label = table.GetLabel();
+ Bind(label);
+
+ // Bulk ensure capacity, as this may be large.
+ size_t orig_size = buffer_.Size();
+ size_t required_capacity = orig_size + table.GetSize();
+ if (required_capacity > buffer_.Capacity()) {
+ buffer_.ExtendCapacity(required_capacity);
+ }
+#ifndef NDEBUG
+ buffer_.has_ensured_capacity_ = true;
+#endif
+
+ // Fill the space with dummy data as the data is not final
+ // until the branches have been promoted. And we shouldn't
+ // be moving uninitialized data during branch promotion.
+ for (size_t cnt = table.GetData().size(), i = 0; i < cnt; i++) {
+ buffer_.Emit<uint32_t>(0x1abe1234u);
+ }
+
+#ifndef NDEBUG
+ buffer_.has_ensured_capacity_ = false;
+#endif
+ }
+ }
+}
+
+void MipsAssembler::EmitJumpTables() {
+ if (!jump_tables_.empty()) {
+ CHECK(!overwriting_);
+ // Switch from appending instructions at the end of the buffer to overwriting
+ // existing instructions (here, jump tables) in the buffer.
+ overwriting_ = true;
+
+ for (JumpTable& table : jump_tables_) {
+ MipsLabel* table_label = table.GetLabel();
+ uint32_t start = GetLabelLocation(table_label);
+ overwrite_location_ = start;
+
+ for (MipsLabel* target : table.GetData()) {
+ CHECK_EQ(buffer_.Load<uint32_t>(overwrite_location_), 0x1abe1234u);
+ // The table will contain target addresses relative to the table start.
+ uint32_t offset = GetLabelLocation(target) - start;
+ Emit(offset);
+ }
+ }
+
+ overwriting_ = false;
+ }
+}
+
void MipsAssembler::PromoteBranches() {
// Promote short branches to long as necessary.
bool changed;
@@ -2539,12 +2651,16 @@ const MipsAssembler::Branch::BranchInfo MipsAssembler::Branch::branch_info_[] =
{ 2, 0, 1, MipsAssembler::Branch::kOffset18, 2 }, // kUncondBranch
{ 2, 0, 1, MipsAssembler::Branch::kOffset18, 2 }, // kCondBranch
{ 2, 0, 1, MipsAssembler::Branch::kOffset18, 2 }, // kCall
+ // R2 near label.
+ { 1, 0, 0, MipsAssembler::Branch::kOffset16, 0 }, // kLabel
// R2 near literal.
{ 1, 0, 0, MipsAssembler::Branch::kOffset16, 0 }, // kLiteral
// R2 long branches.
{ 9, 3, 1, MipsAssembler::Branch::kOffset32, 0 }, // kLongUncondBranch
{ 10, 4, 1, MipsAssembler::Branch::kOffset32, 0 }, // kLongCondBranch
{ 6, 1, 1, MipsAssembler::Branch::kOffset32, 0 }, // kLongCall
+ // R2 far label.
+ { 3, 0, 0, MipsAssembler::Branch::kOffset32, 0 }, // kFarLabel
// R2 far literal.
{ 3, 0, 0, MipsAssembler::Branch::kOffset32, 0 }, // kFarLiteral
// R6 short branches.
@@ -2552,12 +2668,16 @@ const MipsAssembler::Branch::BranchInfo MipsAssembler::Branch::branch_info_[] =
{ 2, 0, 1, MipsAssembler::Branch::kOffset18, 2 }, // kR6CondBranch
// Exception: kOffset23 for beqzc/bnezc.
{ 1, 0, 1, MipsAssembler::Branch::kOffset28, 2 }, // kR6Call
+ // R6 near label.
+ { 1, 0, 0, MipsAssembler::Branch::kOffset21, 2 }, // kR6Label
// R6 near literal.
{ 1, 0, 0, MipsAssembler::Branch::kOffset21, 2 }, // kR6Literal
// R6 long branches.
{ 2, 0, 0, MipsAssembler::Branch::kOffset32, 0 }, // kR6LongUncondBranch
{ 3, 1, 0, MipsAssembler::Branch::kOffset32, 0 }, // kR6LongCondBranch
{ 2, 0, 0, MipsAssembler::Branch::kOffset32, 0 }, // kR6LongCall
+ // R6 far label.
+ { 2, 0, 0, MipsAssembler::Branch::kOffset32, 0 }, // kR6FarLabel
// R6 far literal.
{ 2, 0, 0, MipsAssembler::Branch::kOffset32, 0 }, // kR6FarLiteral
};
@@ -2614,6 +2734,12 @@ void MipsAssembler::EmitBranch(MipsAssembler::Branch* branch) {
Emit(delayed_instruction);
break;
+ // R2 near label.
+ case Branch::kLabel:
+ DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Addiu(lhs, rhs, offset);
+ break;
// R2 near literal.
case Branch::kLiteral:
DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
@@ -2691,6 +2817,14 @@ void MipsAssembler::EmitBranch(MipsAssembler::Branch* branch) {
Nop();
break;
+ // R2 far label.
+ case Branch::kFarLabel:
+ DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Lui(AT, High16Bits(offset));
+ Ori(AT, AT, Low16Bits(offset));
+ Addu(lhs, AT, rhs);
+ break;
// R2 far literal.
case Branch::kFarLiteral:
DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
@@ -2725,6 +2859,12 @@ void MipsAssembler::EmitBranch(MipsAssembler::Branch* branch) {
Balc(offset);
break;
+ // R6 near label.
+ case Branch::kR6Label:
+ DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Addiupc(lhs, offset);
+ break;
// R6 near literal.
case Branch::kR6Literal:
DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
@@ -2759,6 +2899,14 @@ void MipsAssembler::EmitBranch(MipsAssembler::Branch* branch) {
Jialc(AT, Low16Bits(offset));
break;
+ // R6 far label.
+ case Branch::kR6FarLabel:
+ DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
+ offset += (offset & 0x8000) << 1; // Account for sign extension in addiu.
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Auipc(AT, High16Bits(offset));
+ Addiu(lhs, AT, Low16Bits(offset));
+ break;
// R6 far literal.
case Branch::kR6FarLiteral:
DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index d50c439418..e1255f7f23 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -126,6 +126,36 @@ class Literal {
DISALLOW_COPY_AND_ASSIGN(Literal);
};
+// Jump table: table of labels emitted after the literals. Similar to literals.
+class JumpTable {
+ public:
+ explicit JumpTable(std::vector<MipsLabel*>&& labels)
+ : label_(), labels_(std::move(labels)) {
+ }
+
+ uint32_t GetSize() const {
+ return static_cast<uint32_t>(labels_.size()) * sizeof(uint32_t);
+ }
+
+ const std::vector<MipsLabel*>& GetData() const {
+ return labels_;
+ }
+
+ MipsLabel* GetLabel() {
+ return &label_;
+ }
+
+ const MipsLabel* GetLabel() const {
+ return &label_;
+ }
+
+ private:
+ MipsLabel label_;
+ std::vector<MipsLabel*> labels_;
+
+ DISALLOW_COPY_AND_ASSIGN(JumpTable);
+};
+
// Slowpath entered when Thread::Current()->_exception is non-null.
class MipsExceptionSlowPath {
public:
@@ -158,6 +188,7 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
ds_fsm_state_(kExpectingLabel),
ds_fsm_target_pc_(0),
literals_(arena->Adapter(kArenaAllocAssembler)),
+ jump_tables_(arena->Adapter(kArenaAllocAssembler)),
last_position_adjustment_(0),
last_old_position_(0),
last_branch_id_(0),
@@ -465,46 +496,61 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
public:
template <typename ImplicitNullChecker = NoImplicitNullChecker>
- void StoreConst32ToOffset(int32_t value,
- Register base,
- int32_t offset,
- Register temp,
- ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
+ void StoreConstToOffset(StoreOperandType type,
+ int64_t value,
+ Register base,
+ int32_t offset,
+ Register temp,
+ ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
+ // We permit `base` and `temp` to coincide (however, we check that neither is AT),
+ // in which case the `base` register may be overwritten in the process.
CHECK_NE(temp, AT); // Must not use AT as temp, so as not to overwrite the adjusted base.
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ false);
- if (value == 0) {
- temp = ZERO;
- } else {
- LoadConst32(temp, value);
- }
- Sw(temp, base, offset);
- null_checker();
- }
-
- template <typename ImplicitNullChecker = NoImplicitNullChecker>
- void StoreConst64ToOffset(int64_t value,
- Register base,
- int32_t offset,
- Register temp,
- ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
- CHECK_NE(temp, AT); // Must not use AT as temp, so as not to overwrite the adjusted base.
- AdjustBaseAndOffset(base, offset, /* is_doubleword */ true);
+ AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword));
uint32_t low = Low32Bits(value);
uint32_t high = High32Bits(value);
+ Register reg;
+ // If the adjustment left `base` unchanged and equal to `temp`, we can't use `temp`
+ // to load and hold the value but we can use AT instead as AT hasn't been used yet.
+ // Otherwise, `temp` can be used for the value. And if `temp` is the same as the
+ // original `base` (that is, `base` prior to the adjustment), the original `base`
+ // register will be overwritten.
+ if (base == temp) {
+ temp = AT;
+ }
if (low == 0) {
- Sw(ZERO, base, offset);
+ reg = ZERO;
} else {
- LoadConst32(temp, low);
- Sw(temp, base, offset);
+ reg = temp;
+ LoadConst32(reg, low);
}
- null_checker();
- if (high == 0) {
- Sw(ZERO, base, offset + kMipsWordSize);
- } else {
- if (high != low) {
- LoadConst32(temp, high);
- }
- Sw(temp, base, offset + kMipsWordSize);
+ switch (type) {
+ case kStoreByte:
+ Sb(reg, base, offset);
+ break;
+ case kStoreHalfword:
+ Sh(reg, base, offset);
+ break;
+ case kStoreWord:
+ Sw(reg, base, offset);
+ break;
+ case kStoreDoubleword:
+ Sw(reg, base, offset);
+ null_checker();
+ if (high == 0) {
+ reg = ZERO;
+ } else {
+ reg = temp;
+ if (high != low) {
+ LoadConst32(reg, high);
+ }
+ }
+ Sw(reg, base, offset + kMipsWordSize);
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+ if (type != kStoreDoubleword) {
+ null_checker();
}
}
@@ -685,6 +731,11 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
return NewLiteral(sizeof(value), reinterpret_cast<const uint8_t*>(&value));
}
+ // Load label address using the base register (for R2 only) or using PC-relative loads
+ // (for R6 only; base_reg must be ZERO). To be used with data labels in the literal /
+ // jump table area only and not with regular code labels.
+ void LoadLabelAddress(Register dest_reg, Register base_reg, MipsLabel* label);
+
// Create a new literal with the given data.
Literal* NewLiteral(size_t size, const uint8_t* data);
@@ -692,6 +743,12 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
// (for R6 only; base_reg must be ZERO).
void LoadLiteral(Register dest_reg, Register base_reg, Literal* literal);
+ // Create a jump table for the given labels that will be emitted when finalizing.
+ // When the table is emitted, offsets will be relative to the location of the table.
+ // The table location is determined by the location of its label (the label precedes
+ // the table data) and should be loaded using LoadLabelAddress().
+ JumpTable* CreateJumpTable(std::vector<MipsLabel*>&& labels);
+
//
// Overridden common assembler high-level functionality.
//
@@ -935,24 +992,32 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
kUncondBranch,
kCondBranch,
kCall,
+ // R2 near label.
+ kLabel,
// R2 near literal.
kLiteral,
// R2 long branches.
kLongUncondBranch,
kLongCondBranch,
kLongCall,
+ // R2 far label.
+ kFarLabel,
// R2 far literal.
kFarLiteral,
// R6 short branches.
kR6UncondBranch,
kR6CondBranch,
kR6Call,
+ // R6 near label.
+ kR6Label,
// R6 near literal.
kR6Literal,
// R6 long branches.
kR6LongUncondBranch,
kR6LongCondBranch,
kR6LongCall,
+ // R6 far label.
+ kR6FarLabel,
// R6 far literal.
kR6FarLiteral,
};
@@ -1009,8 +1074,12 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
BranchCondition condition,
Register lhs_reg,
Register rhs_reg);
- // Literal.
- Branch(bool is_r6, uint32_t location, Register dest_reg, Register base_reg);
+ // Label address (in literal area) or literal.
+ Branch(bool is_r6,
+ uint32_t location,
+ Register dest_reg,
+ Register base_reg,
+ Type label_or_literal_type);
// Some conditional branches with lhs = rhs are effectively NOPs, while some
// others are effectively unconditional. MIPSR6 conditional branches require lhs != rhs.
@@ -1105,7 +1174,7 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
private:
// Completes branch construction by determining and recording its type.
- void InitializeType(bool is_call, bool is_literal, bool is_r6);
+ void InitializeType(Type initial_type, bool is_r6);
// Helper for the above.
void InitShortOrLong(OffsetBits ofs_size, Type short_type, Type long_type);
@@ -1178,6 +1247,8 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
uint32_t GetBranchOrPcRelBaseForEncoding(const MipsAssembler::Branch* branch) const;
void EmitLiterals();
+ void ReserveJumpTableSpace();
+ void EmitJumpTables();
void PromoteBranches();
void EmitBranch(Branch* branch);
void EmitBranches();
@@ -1227,6 +1298,9 @@ class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSi
// without invalidating pointers and references to existing elements.
ArenaDeque<Literal> literals_;
+ // Jump table list.
+ ArenaDeque<JumpTable> jump_tables_;
+
// There's no PC-relative addressing on MIPS32R2. So, in order to access literals relative to PC
// we get PC using the NAL instruction. This label marks the position within the assembler buffer
// that PC (from NAL) points to.
diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc
index fabb0962fb..750a94df02 100644
--- a/compiler/utils/mips/assembler_mips32r6_test.cc
+++ b/compiler/utils/mips/assembler_mips32r6_test.cc
@@ -309,6 +309,12 @@ TEST_F(AssemblerMIPS32r6Test, Lwpc) {
DriverStr(RepeatRIb(&mips::MipsAssembler::Lwpc, 19, code), "Lwpc");
}
+TEST_F(AssemblerMIPS32r6Test, Addiupc) {
+ // The comment from the Lwpc() test applies to this Addiupc() test as well.
+ const char* code = ".set imm, {imm}\naddiupc ${reg}, (imm - ((imm & 0x40000) << 1)) << 2";
+ DriverStr(RepeatRIb(&mips::MipsAssembler::Addiupc, 19, code), "Addiupc");
+}
+
TEST_F(AssemblerMIPS32r6Test, Bitswap) {
DriverStr(RepeatRR(&mips::MipsAssembler::Bitswap, "bitswap ${reg1}, ${reg2}"), "bitswap");
}
@@ -635,6 +641,40 @@ TEST_F(AssemblerMIPS32r6Test, StoreDToOffset) {
DriverStr(expected, "StoreDToOffset");
}
+TEST_F(AssemblerMIPS32r6Test, LoadFarthestNearLabelAddress) {
+ mips::MipsLabel label;
+ __ LoadLabelAddress(mips::V0, mips::ZERO, &label);
+ constexpr size_t kAdduCount = 0x3FFDE;
+ for (size_t i = 0; i != kAdduCount; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+
+ std::string expected =
+ "lapc $v0, 1f\n" +
+ RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
+ "1:\n";
+ DriverStr(expected, "LoadFarthestNearLabelAddress");
+}
+
+TEST_F(AssemblerMIPS32r6Test, LoadNearestFarLabelAddress) {
+ mips::MipsLabel label;
+ __ LoadLabelAddress(mips::V0, mips::ZERO, &label);
+ constexpr size_t kAdduCount = 0x3FFDF;
+ for (size_t i = 0; i != kAdduCount; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+
+ std::string expected =
+ "1:\n"
+ "auipc $at, %hi(2f - 1b)\n"
+ "addiu $v0, $at, %lo(2f - 1b)\n" +
+ RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
+ "2:\n";
+ DriverStr(expected, "LoadNearestFarLabelAddress");
+}
+
TEST_F(AssemblerMIPS32r6Test, LoadFarthestNearLiteral) {
mips::Literal* literal = __ NewLiteral<uint32_t>(0x12345678);
__ LoadLiteral(mips::V0, mips::ZERO, literal);
@@ -811,8 +851,7 @@ TEST_F(AssemblerMIPS32r6Test, LongBranchReorder) {
DriverStr(expected, "LongBeqc");
}
-// TODO: MipsAssembler::Addiupc
-// MipsAssembler::Bc
+// TODO: MipsAssembler::Bc
// MipsAssembler::Jic
// MipsAssembler::Jialc
// MipsAssembler::Bltc
diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc
index 708bc3d50d..a9abf2f86e 100644
--- a/compiler/utils/mips/assembler_mips_test.cc
+++ b/compiler/utils/mips/assembler_mips_test.cc
@@ -1977,6 +1977,85 @@ TEST_F(AssemblerMIPSTest, StoreDToOffset) {
DriverStr(expected, "StoreDToOffset");
}
+TEST_F(AssemblerMIPSTest, StoreConstToOffset) {
+ __ StoreConstToOffset(mips::kStoreByte, 0xFF, mips::A1, +0, mips::T8);
+ __ StoreConstToOffset(mips::kStoreHalfword, 0xFFFF, mips::A1, +0, mips::T8);
+ __ StoreConstToOffset(mips::kStoreWord, 0x12345678, mips::A1, +0, mips::T8);
+ __ StoreConstToOffset(mips::kStoreDoubleword, 0x123456789ABCDEF0, mips::A1, +0, mips::T8);
+
+ __ StoreConstToOffset(mips::kStoreByte, 0, mips::A1, +0, mips::T8);
+ __ StoreConstToOffset(mips::kStoreHalfword, 0, mips::A1, +0, mips::T8);
+ __ StoreConstToOffset(mips::kStoreWord, 0, mips::A1, +0, mips::T8);
+ __ StoreConstToOffset(mips::kStoreDoubleword, 0, mips::A1, +0, mips::T8);
+
+ __ StoreConstToOffset(mips::kStoreDoubleword, 0x1234567812345678, mips::A1, +0, mips::T8);
+ __ StoreConstToOffset(mips::kStoreDoubleword, 0x1234567800000000, mips::A1, +0, mips::T8);
+ __ StoreConstToOffset(mips::kStoreDoubleword, 0x0000000012345678, mips::A1, +0, mips::T8);
+
+ __ StoreConstToOffset(mips::kStoreWord, 0, mips::T8, +0, mips::T8);
+ __ StoreConstToOffset(mips::kStoreWord, 0x12345678, mips::T8, +0, mips::T8);
+
+ __ StoreConstToOffset(mips::kStoreWord, 0, mips::A1, -0xFFF0, mips::T8);
+ __ StoreConstToOffset(mips::kStoreWord, 0x12345678, mips::A1, +0xFFF0, mips::T8);
+
+ __ StoreConstToOffset(mips::kStoreWord, 0, mips::T8, -0xFFF0, mips::T8);
+ __ StoreConstToOffset(mips::kStoreWord, 0x12345678, mips::T8, +0xFFF0, mips::T8);
+
+ const char* expected =
+ "ori $t8, $zero, 0xFF\n"
+ "sb $t8, 0($a1)\n"
+ "ori $t8, $zero, 0xFFFF\n"
+ "sh $t8, 0($a1)\n"
+ "lui $t8, 0x1234\n"
+ "ori $t8, $t8, 0x5678\n"
+ "sw $t8, 0($a1)\n"
+ "lui $t8, 0x9ABC\n"
+ "ori $t8, $t8, 0xDEF0\n"
+ "sw $t8, 0($a1)\n"
+ "lui $t8, 0x1234\n"
+ "ori $t8, $t8, 0x5678\n"
+ "sw $t8, 4($a1)\n"
+
+ "sb $zero, 0($a1)\n"
+ "sh $zero, 0($a1)\n"
+ "sw $zero, 0($a1)\n"
+ "sw $zero, 0($a1)\n"
+ "sw $zero, 4($a1)\n"
+
+ "lui $t8, 0x1234\n"
+ "ori $t8, $t8, 0x5678\n"
+ "sw $t8, 0($a1)\n"
+ "sw $t8, 4($a1)\n"
+ "sw $zero, 0($a1)\n"
+ "lui $t8, 0x1234\n"
+ "ori $t8, $t8, 0x5678\n"
+ "sw $t8, 4($a1)\n"
+ "lui $t8, 0x1234\n"
+ "ori $t8, $t8, 0x5678\n"
+ "sw $t8, 0($a1)\n"
+ "sw $zero, 4($a1)\n"
+
+ "sw $zero, 0($t8)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, $at, 0x5678\n"
+ "sw $at, 0($t8)\n"
+
+ "addiu $at, $a1, -0x7FF8\n"
+ "sw $zero, -0x7FF8($at)\n"
+ "addiu $at, $a1, 0x7FF8\n"
+ "lui $t8, 0x1234\n"
+ "ori $t8, $t8, 0x5678\n"
+ "sw $t8, 0x7FF8($at)\n"
+
+ "addiu $at, $t8, -0x7FF8\n"
+ "sw $zero, -0x7FF8($at)\n"
+ "addiu $at, $t8, 0x7FF8\n"
+ "lui $t8, 0x1234\n"
+ "ori $t8, $t8, 0x5678\n"
+ "sw $t8, 0x7FF8($at)\n";
+ DriverStr(expected, "StoreConstToOffset");
+}
+
TEST_F(AssemblerMIPSTest, B) {
mips::MipsLabel label1, label2;
__ B(&label1);
@@ -2307,6 +2386,44 @@ TEST_F(AssemblerMIPSTest, LoadConst32) {
DriverStr(expected, "LoadConst32");
}
+TEST_F(AssemblerMIPSTest, LoadFarthestNearLabelAddress) {
+ mips::MipsLabel label;
+ __ BindPcRelBaseLabel();
+ __ LoadLabelAddress(mips::V0, mips::V1, &label);
+ constexpr size_t kAddiuCount = 0x1FDE;
+ for (size_t i = 0; i != kAddiuCount; ++i) {
+ __ Addiu(mips::A0, mips::A1, 0);
+ }
+ __ Bind(&label);
+
+ std::string expected =
+ "1:\n"
+ "addiu $v0, $v1, %lo(2f - 1b)\n" +
+ RepeatInsn(kAddiuCount, "addiu $a0, $a1, %hi(2f - 1b)\n") +
+ "2:\n";
+ DriverStr(expected, "LoadFarthestNearLabelAddress");
+}
+
+TEST_F(AssemblerMIPSTest, LoadNearestFarLabelAddress) {
+ mips::MipsLabel label;
+ __ BindPcRelBaseLabel();
+ __ LoadLabelAddress(mips::V0, mips::V1, &label);
+ constexpr size_t kAdduCount = 0x1FDF;
+ for (size_t i = 0; i != kAdduCount; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+
+ std::string expected =
+ "1:\n"
+ "lui $at, %hi(2f - 1b)\n"
+ "ori $at, $at, %lo(2f - 1b)\n"
+ "addu $v0, $at, $v1\n" +
+ RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
+ "2:\n";
+ DriverStr(expected, "LoadNearestFarLabelAddress");
+}
+
TEST_F(AssemblerMIPSTest, LoadFarthestNearLiteral) {
mips::Literal* literal = __ NewLiteral<uint32_t>(0x12345678);
__ BindPcRelBaseLabel();
diff --git a/compiler/utils/swap_space.h b/compiler/utils/swap_space.h
index bf06675d72..9600907278 100644
--- a/compiler/utils/swap_space.h
+++ b/compiler/utils/swap_space.h
@@ -114,7 +114,8 @@ class SwapAllocator<void> {
explicit SwapAllocator(SwapSpace* swap_space) : swap_space_(swap_space) {}
template <typename U>
- SwapAllocator(const SwapAllocator<U>& other) : swap_space_(other.swap_space_) {}
+ SwapAllocator(const SwapAllocator<U>& other) // NOLINT, implicit
+ : swap_space_(other.swap_space_) {}
SwapAllocator(const SwapAllocator& other) = default;
SwapAllocator& operator=(const SwapAllocator& other) = default;
@@ -149,7 +150,8 @@ class SwapAllocator {
explicit SwapAllocator(SwapSpace* swap_space) : swap_space_(swap_space) {}
template <typename U>
- SwapAllocator(const SwapAllocator<U>& other) : swap_space_(other.swap_space_) {}
+ SwapAllocator(const SwapAllocator<U>& other) // NOLINT, implicit
+ : swap_space_(other.swap_space_) {}
SwapAllocator(const SwapAllocator& other) = default;
SwapAllocator& operator=(const SwapAllocator& other) = default;
diff --git a/compiler/utils/transform_array_ref.h b/compiler/utils/transform_array_ref.h
deleted file mode 100644
index a6da34fb40..0000000000
--- a/compiler/utils/transform_array_ref.h
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_UTILS_TRANSFORM_ARRAY_REF_H_
-#define ART_COMPILER_UTILS_TRANSFORM_ARRAY_REF_H_
-
-#include <type_traits>
-
-#include "utils/array_ref.h"
-#include "utils/transform_iterator.h"
-
-namespace art {
-
-/**
- * @brief An ArrayRef<> wrapper that uses a transformation function for element access.
- */
-template <typename BaseType, typename Function>
-class TransformArrayRef {
- private:
- using Iter = TransformIterator<typename ArrayRef<BaseType>::iterator, Function>;
-
- // The Function may take a non-const reference, so const_iterator may not exist.
- using FallbackConstIter = std::iterator<std::random_access_iterator_tag, void, void, void, void>;
- using PreferredConstIter =
- TransformIterator<typename ArrayRef<BaseType>::const_iterator, Function>;
- template <typename F, typename = typename std::result_of<F(const BaseType&)>::type>
- static PreferredConstIter ConstIterHelper(int&);
- template <typename F>
- static FallbackConstIter ConstIterHelper(const int&);
-
- using ConstIter = decltype(ConstIterHelper<Function>(*reinterpret_cast<int*>(0)));
-
- public:
- using value_type = typename Iter::value_type;
- using reference = typename Iter::reference;
- using const_reference = typename ConstIter::reference;
- using pointer = typename Iter::pointer;
- using const_pointer = typename ConstIter::pointer;
- using iterator = Iter;
- using const_iterator = typename std::conditional<
- std::is_same<ConstIter, FallbackConstIter>::value,
- void,
- ConstIter>::type;
- using reverse_iterator = std::reverse_iterator<Iter>;
- using const_reverse_iterator = typename std::conditional<
- std::is_same<ConstIter, FallbackConstIter>::value,
- void,
- std::reverse_iterator<ConstIter>>::type;
- using difference_type = typename ArrayRef<BaseType>::difference_type;
- using size_type = typename ArrayRef<BaseType>::size_type;
-
- // Constructors.
-
- TransformArrayRef(const TransformArrayRef& other) = default;
-
- template <typename OtherBT>
- TransformArrayRef(const ArrayRef<OtherBT>& base, Function fn)
- : data_(base, fn) { }
-
- template <typename OtherBT,
- typename = typename std::enable_if<std::is_same<BaseType, const OtherBT>::value>::type>
- TransformArrayRef(const TransformArrayRef<OtherBT, Function>& other)
- : TransformArrayRef(other.base(), other.GetFunction()) { }
-
- // Assignment operators.
-
- TransformArrayRef& operator=(const TransformArrayRef& other) = default;
-
- template <typename OtherBT,
- typename = typename std::enable_if<std::is_same<BaseType, const OtherBT>::value>::type>
- TransformArrayRef& operator=(const TransformArrayRef<OtherBT, Function>& other) {
- return *this = TransformArrayRef(other.base(), other.GetFunction());
- }
-
- // Destructor.
- ~TransformArrayRef() = default;
-
- // Iterators.
- iterator begin() { return MakeIterator(base().begin()); }
- const_iterator begin() const { return MakeIterator(base().cbegin()); }
- const_iterator cbegin() const { return MakeIterator(base().cbegin()); }
- iterator end() { return MakeIterator(base().end()); }
- const_iterator end() const { MakeIterator(base().cend()); }
- const_iterator cend() const { return MakeIterator(base().cend()); }
- reverse_iterator rbegin() { return reverse_iterator(end()); }
- const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); }
- const_reverse_iterator crbegin() const { return const_reverse_iterator(cend()); }
- reverse_iterator rend() { return reverse_iterator(begin()); }
- const_reverse_iterator rend() const { return const_reverse_iterator(begin()); }
- const_reverse_iterator crend() const { return const_reverse_iterator(cbegin()); }
-
- // Size.
- size_type size() const { return base().size(); }
- bool empty() const { return base().empty(); }
-
- // Element access. NOTE: Not providing data().
-
- reference operator[](size_type n) { return GetFunction()(base()[n]); }
- const_reference operator[](size_type n) const { return GetFunction()(base()[n]); }
-
- reference front() { return GetFunction()(base().front()); }
- const_reference front() const { return GetFunction()(base().front()); }
-
- reference back() { return GetFunction()(base().back()); }
- const_reference back() const { return GetFunction()(base().back()); }
-
- TransformArrayRef SubArray(size_type pos) {
- return TransformArrayRef(base().subarray(pos), GetFunction());
- }
- TransformArrayRef SubArray(size_type pos) const {
- return TransformArrayRef(base().subarray(pos), GetFunction());
- }
- TransformArrayRef SubArray(size_type pos, size_type length) const {
- return TransformArrayRef(base().subarray(pos, length), GetFunction());
- }
-
- // Retrieve the base ArrayRef<>.
- ArrayRef<BaseType> base() {
- return data_.base_;
- }
- ArrayRef<const BaseType> base() const {
- return ArrayRef<const BaseType>(data_.base_);
- }
-
- private:
- // Allow EBO for state-less Function.
- struct Data : Function {
- public:
- Data(ArrayRef<BaseType> base, Function fn) : Function(fn), base_(base) { }
-
- ArrayRef<BaseType> base_;
- };
-
- const Function& GetFunction() const {
- return static_cast<const Function&>(data_);
- }
-
- template <typename BaseIterator>
- auto MakeIterator(BaseIterator base) const {
- return MakeTransformIterator(base, GetFunction());
- }
-
- Data data_;
-
- template <typename OtherBT, typename OtherFunction>
- friend class TransformArrayRef;
-};
-
-template <typename BaseType, typename Function>
-bool operator==(const TransformArrayRef<BaseType, Function>& lhs,
- const TransformArrayRef<BaseType, Function>& rhs) {
- return lhs.size() == rhs.size() && std::equal(lhs.begin(), lhs.end(), rhs.begin());
-}
-
-template <typename BaseType, typename Function>
-bool operator!=(const TransformArrayRef<BaseType, Function>& lhs,
- const TransformArrayRef<BaseType, Function>& rhs) {
- return !(lhs == rhs);
-}
-
-template <typename ValueType, typename Function>
-TransformArrayRef<ValueType, Function> MakeTransformArrayRef(
- ArrayRef<ValueType> container, Function f) {
- return TransformArrayRef<ValueType, Function>(container, f);
-}
-
-template <typename Container, typename Function>
-TransformArrayRef<typename Container::value_type, Function> MakeTransformArrayRef(
- Container& container, Function f) {
- return TransformArrayRef<typename Container::value_type, Function>(
- ArrayRef<typename Container::value_type>(container.data(), container.size()), f);
-}
-
-template <typename Container, typename Function>
-TransformArrayRef<const typename Container::value_type, Function> MakeTransformArrayRef(
- const Container& container, Function f) {
- return TransformArrayRef<const typename Container::value_type, Function>(
- ArrayRef<const typename Container::value_type>(container.data(), container.size()), f);
-}
-
-} // namespace art
-
-#endif // ART_COMPILER_UTILS_TRANSFORM_ARRAY_REF_H_
diff --git a/compiler/utils/transform_array_ref_test.cc b/compiler/utils/transform_array_ref_test.cc
deleted file mode 100644
index 8d71fd7179..0000000000
--- a/compiler/utils/transform_array_ref_test.cc
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <algorithm>
-#include <vector>
-
-#include "gtest/gtest.h"
-
-#include "utils/transform_array_ref.h"
-
-namespace art {
-
-namespace { // anonymous namespace
-
-struct ValueHolder {
- // Deliberately not explicit.
- ValueHolder(int v) : value(v) { } // NOLINT
- int value;
-};
-
-ATTRIBUTE_UNUSED bool operator==(const ValueHolder& lhs, const ValueHolder& rhs) {
- return lhs.value == rhs.value;
-}
-
-} // anonymous namespace
-
-TEST(TransformArrayRef, ConstRefAdd1) {
- auto add1 = [](const ValueHolder& h) { return h.value + 1; }; // NOLINT [readability/braces]
- std::vector<ValueHolder> input({ 7, 6, 4, 0 });
- std::vector<int> output;
-
- auto taref = MakeTransformArrayRef(input, add1);
- using TarefIter = decltype(taref)::iterator;
- using ConstTarefIter = decltype(taref)::const_iterator;
- static_assert(std::is_same<int, decltype(taref)::value_type>::value, "value_type");
- static_assert(std::is_same<TarefIter, decltype(taref)::pointer>::value, "pointer");
- static_assert(std::is_same<int, decltype(taref)::reference>::value, "reference");
- static_assert(std::is_same<ConstTarefIter, decltype(taref)::const_pointer>::value,
- "const_pointer");
- static_assert(std::is_same<int, decltype(taref)::const_reference>::value, "const_reference");
-
- std::copy(taref.begin(), taref.end(), std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 8, 7, 5, 1 }), output);
- output.clear();
-
- std::copy(taref.cbegin(), taref.cend(), std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 8, 7, 5, 1 }), output);
- output.clear();
-
- std::copy(taref.rbegin(), taref.rend(), std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 1, 5, 7, 8 }), output);
- output.clear();
-
- std::copy(taref.crbegin(), taref.crend(), std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 1, 5, 7, 8 }), output);
- output.clear();
-
- ASSERT_EQ(input.size(), taref.size());
- ASSERT_EQ(input.empty(), taref.empty());
- ASSERT_EQ(input.front().value + 1, taref.front());
- ASSERT_EQ(input.back().value + 1, taref.back());
-
- for (size_t i = 0; i != input.size(); ++i) {
- ASSERT_EQ(input[i].value + 1, taref[i]);
- }
-}
-
-TEST(TransformArrayRef, NonConstRefSub1) {
- auto sub1 = [](ValueHolder& h) { return h.value - 1; }; // NOLINT [readability/braces]
- std::vector<ValueHolder> input({ 4, 4, 5, 7, 10 });
- std::vector<int> output;
-
- auto taref = MakeTransformArrayRef(input, sub1);
- using TarefIter = decltype(taref)::iterator;
- static_assert(std::is_same<void, decltype(taref)::const_iterator>::value, "const_iterator");
- static_assert(std::is_same<int, decltype(taref)::value_type>::value, "value_type");
- static_assert(std::is_same<TarefIter, decltype(taref)::pointer>::value, "pointer");
- static_assert(std::is_same<int, decltype(taref)::reference>::value, "reference");
- static_assert(std::is_same<void, decltype(taref)::const_pointer>::value, "const_pointer");
- static_assert(std::is_same<void, decltype(taref)::const_reference>::value, "const_reference");
-
- std::copy(taref.begin(), taref.end(), std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 3, 3, 4, 6, 9 }), output);
- output.clear();
-
- std::copy(taref.rbegin(), taref.rend(), std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 9, 6, 4, 3, 3 }), output);
- output.clear();
-
- ASSERT_EQ(input.size(), taref.size());
- ASSERT_EQ(input.empty(), taref.empty());
- ASSERT_EQ(input.front().value - 1, taref.front());
- ASSERT_EQ(input.back().value - 1, taref.back());
-
- for (size_t i = 0; i != input.size(); ++i) {
- ASSERT_EQ(input[i].value - 1, taref[i]);
- }
-}
-
-TEST(TransformArrayRef, ConstAndNonConstRef) {
- struct Ref {
- int& operator()(ValueHolder& h) const { return h.value; }
- const int& operator()(const ValueHolder& h) const { return h.value; }
- };
- Ref ref;
- std::vector<ValueHolder> input({ 1, 0, 1, 0, 3, 1 });
- std::vector<int> output;
-
- auto taref = MakeTransformArrayRef(input, ref);
- static_assert(std::is_same<int, decltype(taref)::value_type>::value, "value_type");
- static_assert(std::is_same<int*, decltype(taref)::pointer>::value, "pointer");
- static_assert(std::is_same<int&, decltype(taref)::reference>::value, "reference");
- static_assert(std::is_same<const int*, decltype(taref)::const_pointer>::value, "const_pointer");
- static_assert(std::is_same<const int&, decltype(taref)::const_reference>::value,
- "const_reference");
-
- std::copy(taref.begin(), taref.end(), std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 1, 0, 1, 0, 3, 1 }), output);
- output.clear();
-
- std::copy(taref.cbegin(), taref.cend(), std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 1, 0, 1, 0, 3, 1 }), output);
- output.clear();
-
- std::copy(taref.rbegin(), taref.rend(), std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 1, 3, 0, 1, 0, 1 }), output);
- output.clear();
-
- std::copy(taref.crbegin(), taref.crend(), std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 1, 3, 0, 1, 0, 1 }), output);
- output.clear();
-
- ASSERT_EQ(input.size(), taref.size());
- ASSERT_EQ(input.empty(), taref.empty());
- ASSERT_EQ(input.front().value, taref.front());
- ASSERT_EQ(input.back().value, taref.back());
-
- for (size_t i = 0; i != input.size(); ++i) {
- ASSERT_EQ(input[i].value, taref[i]);
- }
-
- // Test writing through the transform iterator.
- std::vector<int> transform_input({ 24, 37, 11, 71 });
- std::vector<ValueHolder> transformed(transform_input.size(), 0);
- taref = MakeTransformArrayRef(transformed, ref);
- for (size_t i = 0; i != transform_input.size(); ++i) {
- taref[i] = transform_input[i];
- }
- ASSERT_EQ(std::vector<ValueHolder>({ 24, 37, 11, 71 }), transformed);
-
- const std::vector<ValueHolder>& cinput = input;
-
- auto ctaref = MakeTransformArrayRef(cinput, ref);
- static_assert(std::is_same<int, decltype(ctaref)::value_type>::value, "value_type");
- static_assert(std::is_same<const int*, decltype(ctaref)::pointer>::value, "pointer");
- static_assert(std::is_same<const int&, decltype(ctaref)::reference>::value, "reference");
- static_assert(std::is_same<const int*, decltype(ctaref)::const_pointer>::value, "const_pointer");
- static_assert(std::is_same<const int&, decltype(ctaref)::const_reference>::value,
- "const_reference");
-
- std::copy(ctaref.begin(), ctaref.end(), std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 1, 0, 1, 0, 3, 1 }), output);
- output.clear();
-
- std::copy(ctaref.cbegin(), ctaref.cend(), std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 1, 0, 1, 0, 3, 1 }), output);
- output.clear();
-
- std::copy(ctaref.rbegin(), ctaref.rend(), std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 1, 3, 0, 1, 0, 1 }), output);
- output.clear();
-
- std::copy(ctaref.crbegin(), ctaref.crend(), std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 1, 3, 0, 1, 0, 1 }), output);
- output.clear();
-
- ASSERT_EQ(cinput.size(), ctaref.size());
- ASSERT_EQ(cinput.empty(), ctaref.empty());
- ASSERT_EQ(cinput.front().value, ctaref.front());
- ASSERT_EQ(cinput.back().value, ctaref.back());
-
- for (size_t i = 0; i != cinput.size(); ++i) {
- ASSERT_EQ(cinput[i].value, ctaref[i]);
- }
-
- // Test conversion adding const.
- decltype(ctaref) ctaref2 = taref;
- ASSERT_EQ(taref.size(), ctaref2.size());
- for (size_t i = 0; i != taref.size(); ++i) {
- ASSERT_EQ(taref[i], ctaref2[i]);
- }
-}
-
-} // namespace art
diff --git a/compiler/utils/transform_iterator.h b/compiler/utils/transform_iterator.h
deleted file mode 100644
index 3bc9046408..0000000000
--- a/compiler/utils/transform_iterator.h
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_UTILS_TRANSFORM_ITERATOR_H_
-#define ART_COMPILER_UTILS_TRANSFORM_ITERATOR_H_
-
-#include <iterator>
-#include <type_traits>
-
-#include "base/iteration_range.h"
-
-namespace art {
-
-// The transform iterator transforms values from the base iterator with a given
-// transformation function. It can serve as a replacement for std::transform(), i.e.
-// std::copy(MakeTransformIterator(begin, f), MakeTransformIterator(end, f), out)
-// is equivalent to
-// std::transform(begin, end, f)
-// If the function returns an l-value reference or a wrapper that supports assignment,
-// the TransformIterator can be used also as an output iterator, i.e.
-// std::copy(begin, end, MakeTransformIterator(out, f))
-// is equivalent to
-// for (auto it = begin; it != end; ++it) {
-// f(*out++) = *it;
-// }
-template <typename BaseIterator, typename Function>
-class TransformIterator {
- private:
- static_assert(std::is_base_of<
- std::input_iterator_tag,
- typename std::iterator_traits<BaseIterator>::iterator_category>::value,
- "Transform iterator base must be an input iterator.");
-
- using InputType = typename std::iterator_traits<BaseIterator>::reference;
- using ResultType = typename std::result_of<Function(InputType)>::type;
-
- public:
- using iterator_category = typename std::iterator_traits<BaseIterator>::iterator_category;
- using value_type =
- typename std::remove_const<typename std::remove_reference<ResultType>::type>::type;
- using difference_type = typename std::iterator_traits<BaseIterator>::difference_type;
- using pointer = typename std::conditional<
- std::is_reference<ResultType>::value,
- typename std::add_pointer<typename std::remove_reference<ResultType>::type>::type,
- TransformIterator>::type;
- using reference = ResultType;
-
- TransformIterator(BaseIterator base, Function fn)
- : data_(base, fn) { }
-
- template <typename OtherBI>
- TransformIterator(const TransformIterator<OtherBI, Function>& other)
- : data_(other.base(), other.GetFunction()) {
- }
-
- TransformIterator& operator++() {
- ++data_.base_;
- return *this;
- }
-
- TransformIterator& operator++(int) {
- TransformIterator tmp(*this);
- ++*this;
- return tmp;
- }
-
- TransformIterator& operator--() {
- static_assert(
- std::is_base_of<std::bidirectional_iterator_tag,
- typename std::iterator_traits<BaseIterator>::iterator_category>::value,
- "BaseIterator must be bidirectional iterator to use operator--()");
- --data_.base_;
- return *this;
- }
-
- TransformIterator& operator--(int) {
- TransformIterator tmp(*this);
- --*this;
- return tmp;
- }
-
- reference operator*() const {
- return GetFunction()(*base());
- }
-
- reference operator[](difference_type n) const {
- static_assert(
- std::is_base_of<std::random_access_iterator_tag,
- typename std::iterator_traits<BaseIterator>::iterator_category>::value,
- "BaseIterator must be random access iterator to use operator[]");
- return GetFunction()(base()[n]);
- }
-
- TransformIterator operator+(difference_type n) const {
- static_assert(
- std::is_base_of<std::random_access_iterator_tag,
- typename std::iterator_traits<BaseIterator>::iterator_category>::value,
- "BaseIterator must be random access iterator to use operator+");
- return TransformIterator(base() + n, GetFunction());
- }
-
- TransformIterator operator-(difference_type n) const {
- static_assert(
- std::is_base_of<std::random_access_iterator_tag,
- typename std::iterator_traits<BaseIterator>::iterator_category>::value,
- "BaseIterator must be random access iterator to use operator-");
- return TransformIterator(base() - n, GetFunction());
- }
-
- difference_type operator-(const TransformIterator& other) const {
- static_assert(
- std::is_base_of<std::random_access_iterator_tag,
- typename std::iterator_traits<BaseIterator>::iterator_category>::value,
- "BaseIterator must be random access iterator to use operator-");
- return base() - other.base();
- }
-
- // Retrieve the base iterator.
- BaseIterator base() const {
- return data_.base_;
- }
-
- // Retrieve the transformation function.
- const Function& GetFunction() const {
- return static_cast<const Function&>(data_);
- }
-
- private:
- // Allow EBO for state-less Function.
- struct Data : Function {
- public:
- Data(BaseIterator base, Function fn) : Function(fn), base_(base) { }
-
- BaseIterator base_;
- };
-
- Data data_;
-};
-
-template <typename BaseIterator1, typename BaseIterator2, typename Function>
-bool operator==(const TransformIterator<BaseIterator1, Function>& lhs,
- const TransformIterator<BaseIterator2, Function>& rhs) {
- return lhs.base() == rhs.base();
-}
-
-template <typename BaseIterator1, typename BaseIterator2, typename Function>
-bool operator!=(const TransformIterator<BaseIterator1, Function>& lhs,
- const TransformIterator<BaseIterator2, Function>& rhs) {
- return !(lhs == rhs);
-}
-
-template <typename BaseIterator, typename Function>
-TransformIterator<BaseIterator, Function> MakeTransformIterator(BaseIterator base, Function f) {
- return TransformIterator<BaseIterator, Function>(base, f);
-}
-
-template <typename BaseRange, typename Function>
-auto MakeTransformRange(BaseRange& range, Function f) {
- return MakeIterationRange(MakeTransformIterator(range.begin(), f),
- MakeTransformIterator(range.end(), f));
-}
-
-} // namespace art
-
-#endif // ART_COMPILER_UTILS_TRANSFORM_ITERATOR_H_
diff --git a/compiler/utils/transform_iterator_test.cc b/compiler/utils/transform_iterator_test.cc
deleted file mode 100644
index 57ff0a62ac..0000000000
--- a/compiler/utils/transform_iterator_test.cc
+++ /dev/null
@@ -1,531 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <algorithm>
-#include <forward_list>
-#include <list>
-#include <type_traits>
-#include <vector>
-
-#include "gtest/gtest.h"
-
-#include "utils/transform_iterator.h"
-
-namespace art {
-
-namespace { // anonymous namespace
-
-struct ValueHolder {
- // Deliberately not explicit.
- ValueHolder(int v) : value(v) { } // NOLINT
- int value;
-};
-
-bool operator==(const ValueHolder& lhs, const ValueHolder& rhs) {
- return lhs.value == rhs.value;
-}
-
-} // anonymous namespace
-
-TEST(TransformIterator, VectorAdd1) {
- auto add1 = [](const ValueHolder& h) { return h.value + 1; }; // NOLINT [readability/braces]
- std::vector<ValueHolder> input({ 1, 7, 3, 8 });
- std::vector<int> output;
-
- using vector_titer = decltype(MakeTransformIterator(input.begin(), add1));
- static_assert(std::is_same<std::random_access_iterator_tag,
- vector_titer::iterator_category>::value, "category");
- static_assert(std::is_same<int, vector_titer::value_type>::value, "value_type");
- static_assert(std::is_same<vector_titer, vector_titer::pointer>::value, "pointer");
- static_assert(std::is_same<int, vector_titer::reference>::value, "reference");
-
- using vector_ctiter = decltype(MakeTransformIterator(input.cbegin(), add1));
- static_assert(std::is_same<std::random_access_iterator_tag,
- vector_ctiter::iterator_category>::value, "category");
- static_assert(std::is_same<int, vector_ctiter::value_type>::value, "value_type");
- static_assert(std::is_same<vector_ctiter, vector_ctiter::pointer>::value, "pointer");
- static_assert(std::is_same<int, vector_ctiter::reference>::value, "reference");
-
- using vector_rtiter = decltype(MakeTransformIterator(input.rbegin(), add1));
- static_assert(std::is_same<std::random_access_iterator_tag,
- vector_rtiter::iterator_category>::value, "category");
- static_assert(std::is_same<int, vector_rtiter::value_type>::value, "value_type");
- static_assert(std::is_same<vector_rtiter, vector_rtiter::pointer>::value, "pointer");
- static_assert(std::is_same<int, vector_rtiter::reference>::value, "reference");
-
- using vector_crtiter = decltype(MakeTransformIterator(input.crbegin(), add1));
- static_assert(std::is_same<std::random_access_iterator_tag,
- vector_crtiter::iterator_category>::value, "category");
- static_assert(std::is_same<int, vector_crtiter::value_type>::value, "value_type");
- static_assert(std::is_same<vector_crtiter, vector_crtiter::pointer>::value, "pointer");
- static_assert(std::is_same<int, vector_crtiter::reference>::value, "reference");
-
- std::copy(MakeTransformIterator(input.begin(), add1),
- MakeTransformIterator(input.end(), add1),
- std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 2, 8, 4, 9 }), output);
- output.clear();
-
- std::copy(MakeTransformIterator(input.cbegin(), add1),
- MakeTransformIterator(input.cend(), add1),
- std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 2, 8, 4, 9 }), output);
- output.clear();
-
- std::copy(MakeTransformIterator(input.rbegin(), add1),
- MakeTransformIterator(input.rend(), add1),
- std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 9, 4, 8, 2 }), output);
- output.clear();
-
- std::copy(MakeTransformIterator(input.crbegin(), add1),
- MakeTransformIterator(input.crend(), add1),
- std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 9, 4, 8, 2 }), output);
- output.clear();
-
- for (size_t i = 0; i != input.size(); ++i) {
- ASSERT_EQ(input[i].value + 1, MakeTransformIterator(input.begin(), add1)[i]);
- ASSERT_EQ(input[i].value + 1, MakeTransformIterator(input.cbegin(), add1)[i]);
- ptrdiff_t index_from_rbegin = static_cast<ptrdiff_t>(input.size() - i - 1u);
- ASSERT_EQ(input[i].value + 1, MakeTransformIterator(input.rbegin(), add1)[index_from_rbegin]);
- ASSERT_EQ(input[i].value + 1, MakeTransformIterator(input.crbegin(), add1)[index_from_rbegin]);
- ptrdiff_t index_from_end = -static_cast<ptrdiff_t>(input.size() - i);
- ASSERT_EQ(input[i].value + 1, MakeTransformIterator(input.end(), add1)[index_from_end]);
- ASSERT_EQ(input[i].value + 1, MakeTransformIterator(input.cend(), add1)[index_from_end]);
- ptrdiff_t index_from_rend = -1 - static_cast<ptrdiff_t>(i);
- ASSERT_EQ(input[i].value + 1, MakeTransformIterator(input.rend(), add1)[index_from_rend]);
- ASSERT_EQ(input[i].value + 1, MakeTransformIterator(input.crend(), add1)[index_from_rend]);
-
- ASSERT_EQ(MakeTransformIterator(input.begin(), add1) + i,
- MakeTransformIterator(input.begin() + i, add1));
- ASSERT_EQ(MakeTransformIterator(input.cbegin(), add1) + i,
- MakeTransformIterator(input.cbegin() + i, add1));
- ASSERT_EQ(MakeTransformIterator(input.rbegin(), add1) + i,
- MakeTransformIterator(input.rbegin() + i, add1));
- ASSERT_EQ(MakeTransformIterator(input.crbegin(), add1) + i,
- MakeTransformIterator(input.crbegin() + i, add1));
- ASSERT_EQ(MakeTransformIterator(input.end(), add1) - i,
- MakeTransformIterator(input.end() - i, add1));
- ASSERT_EQ(MakeTransformIterator(input.cend(), add1) - i,
- MakeTransformIterator(input.cend() - i, add1));
- ASSERT_EQ(MakeTransformIterator(input.rend(), add1) - i,
- MakeTransformIterator(input.rend() - i, add1));
- ASSERT_EQ(MakeTransformIterator(input.crend(), add1) - i,
- MakeTransformIterator(input.crend() - i, add1));
- }
- ASSERT_EQ(input.end(),
- (MakeTransformIterator(input.begin(), add1) + input.size()).base());
- ASSERT_EQ(MakeTransformIterator(input.end(), add1) - MakeTransformIterator(input.begin(), add1),
- static_cast<ptrdiff_t>(input.size()));
-
- // Test iterator->const_iterator conversion and comparison.
- auto it = MakeTransformIterator(input.begin(), add1);
- decltype(MakeTransformIterator(input.cbegin(), add1)) cit = it;
- static_assert(!std::is_same<decltype(it), decltype(cit)>::value, "Types must be different");
- ASSERT_EQ(it, cit);
- auto rit = MakeTransformIterator(input.rbegin(), add1);
- decltype(MakeTransformIterator(input.crbegin(), add1)) crit(rit);
- static_assert(!std::is_same<decltype(rit), decltype(crit)>::value, "Types must be different");
- ASSERT_EQ(rit, crit);
-}
-
-TEST(TransformIterator, ListSub1) {
- auto sub1 = [](const ValueHolder& h) { return h.value - 1; }; // NOLINT [readability/braces]
- std::list<ValueHolder> input({ 2, 3, 5, 7, 11 });
- std::vector<int> output;
-
- using list_titer = decltype(MakeTransformIterator(input.begin(), sub1));
- static_assert(std::is_same<std::bidirectional_iterator_tag,
- list_titer::iterator_category>::value, "category");
- static_assert(std::is_same<int, list_titer::value_type>::value, "value_type");
- static_assert(std::is_same<list_titer, list_titer::pointer>::value, "pointer");
- static_assert(std::is_same<int, list_titer::reference>::value, "reference");
-
- using list_ctiter = decltype(MakeTransformIterator(input.cbegin(), sub1));
- static_assert(std::is_same<std::bidirectional_iterator_tag,
- list_ctiter::iterator_category>::value, "category");
- static_assert(std::is_same<int, list_ctiter::value_type>::value, "value_type");
- static_assert(std::is_same<list_ctiter, list_ctiter::pointer>::value, "pointer");
- static_assert(std::is_same<int, list_ctiter::reference>::value, "reference");
-
- using list_rtiter = decltype(MakeTransformIterator(input.rbegin(), sub1));
- static_assert(std::is_same<std::bidirectional_iterator_tag,
- list_rtiter::iterator_category>::value, "category");
- static_assert(std::is_same<int, list_rtiter::value_type>::value, "value_type");
- static_assert(std::is_same<list_rtiter, list_rtiter::pointer>::value, "pointer");
- static_assert(std::is_same<int, list_rtiter::reference>::value, "reference");
-
- using list_crtiter = decltype(MakeTransformIterator(input.crbegin(), sub1));
- static_assert(std::is_same<std::bidirectional_iterator_tag,
- list_crtiter::iterator_category>::value, "category");
- static_assert(std::is_same<int, list_crtiter::value_type>::value, "value_type");
- static_assert(std::is_same<list_crtiter, list_crtiter::pointer>::value, "pointer");
- static_assert(std::is_same<int, list_crtiter::reference>::value, "reference");
-
- std::copy(MakeTransformIterator(input.begin(), sub1),
- MakeTransformIterator(input.end(), sub1),
- std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 1, 2, 4, 6, 10 }), output);
- output.clear();
-
- std::copy(MakeTransformIterator(input.cbegin(), sub1),
- MakeTransformIterator(input.cend(), sub1),
- std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 1, 2, 4, 6, 10 }), output);
- output.clear();
-
- std::copy(MakeTransformIterator(input.rbegin(), sub1),
- MakeTransformIterator(input.rend(), sub1),
- std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 10, 6, 4, 2, 1 }), output);
- output.clear();
-
- std::copy(MakeTransformIterator(input.crbegin(), sub1),
- MakeTransformIterator(input.crend(), sub1),
- std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 10, 6, 4, 2, 1 }), output);
- output.clear();
-
- // Test iterator->const_iterator conversion and comparison.
- auto it = MakeTransformIterator(input.begin(), sub1);
- decltype(MakeTransformIterator(input.cbegin(), sub1)) cit = it;
- static_assert(!std::is_same<decltype(it), decltype(cit)>::value, "Types must be different");
- ASSERT_EQ(it, cit);
-}
-
-TEST(TransformIterator, ForwardListSub1) {
- auto mul3 = [](const ValueHolder& h) { return h.value * 3; }; // NOLINT [readability/braces]
- std::forward_list<ValueHolder> input({ 1, 1, 2, 3, 5, 8 });
- std::vector<int> output;
-
- using flist_titer = decltype(MakeTransformIterator(input.begin(), mul3));
- static_assert(std::is_same<std::forward_iterator_tag,
- flist_titer::iterator_category>::value, "category");
- static_assert(std::is_same<int, flist_titer::value_type>::value, "value_type");
- static_assert(std::is_same<flist_titer, flist_titer::pointer>::value, "pointer");
- static_assert(std::is_same<int, flist_titer::reference>::value, "reference");
-
- using flist_ctiter = decltype(MakeTransformIterator(input.cbegin(), mul3));
- static_assert(std::is_same<std::forward_iterator_tag,
- flist_ctiter::iterator_category>::value, "category");
- static_assert(std::is_same<int, flist_ctiter::value_type>::value, "value_type");
- static_assert(std::is_same<flist_ctiter, flist_ctiter::pointer>::value, "pointer");
- static_assert(std::is_same<int, flist_ctiter::reference>::value, "reference");
-
- std::copy(MakeTransformIterator(input.begin(), mul3),
- MakeTransformIterator(input.end(), mul3),
- std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 3, 3, 6, 9, 15, 24 }), output);
- output.clear();
-
- std::copy(MakeTransformIterator(input.cbegin(), mul3),
- MakeTransformIterator(input.cend(), mul3),
- std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 3, 3, 6, 9, 15, 24 }), output);
- output.clear();
-
- // Test iterator->const_iterator conversion and comparison.
- auto it = MakeTransformIterator(input.begin(), mul3);
- decltype(MakeTransformIterator(input.cbegin(), mul3)) cit = it;
- static_assert(!std::is_same<decltype(it), decltype(cit)>::value, "Types must be different");
- ASSERT_EQ(it, cit);
-}
-
-TEST(TransformIterator, VectorConstReference) {
- auto ref = [](const ValueHolder& h) -> const int& { return h.value; }; // NOLINT [readability/braces]
- std::vector<ValueHolder> input({ 7, 3, 1, 2, 4, 8 });
- std::vector<int> output;
-
- using vector_titer = decltype(MakeTransformIterator(input.begin(), ref));
- static_assert(std::is_same<std::random_access_iterator_tag,
- vector_titer::iterator_category>::value, "category");
- static_assert(std::is_same<int, vector_titer::value_type>::value, "value_type");
- static_assert(std::is_same<const int*, vector_titer::pointer>::value, "pointer");
- static_assert(std::is_same<const int&, vector_titer::reference>::value, "reference");
-
- using vector_ctiter = decltype(MakeTransformIterator(input.cbegin(), ref));
- static_assert(std::is_same<std::random_access_iterator_tag,
- vector_ctiter::iterator_category>::value, "category");
- static_assert(std::is_same<int, vector_ctiter::value_type>::value, "value_type");
- static_assert(std::is_same<const int*, vector_ctiter::pointer>::value, "pointer");
- static_assert(std::is_same<const int&, vector_ctiter::reference>::value, "reference");
-
- using vector_rtiter = decltype(MakeTransformIterator(input.rbegin(), ref));
- static_assert(std::is_same<std::random_access_iterator_tag,
- vector_rtiter::iterator_category>::value, "category");
- static_assert(std::is_same<int, vector_rtiter::value_type>::value, "value_type");
- static_assert(std::is_same<const int*, vector_rtiter::pointer>::value, "pointer");
- static_assert(std::is_same<const int&, vector_rtiter::reference>::value, "reference");
-
- using vector_crtiter = decltype(MakeTransformIterator(input.crbegin(), ref));
- static_assert(std::is_same<std::random_access_iterator_tag,
- vector_crtiter::iterator_category>::value, "category");
- static_assert(std::is_same<int, vector_crtiter::value_type>::value, "value_type");
- static_assert(std::is_same<const int*, vector_crtiter::pointer>::value, "pointer");
- static_assert(std::is_same<const int&, vector_crtiter::reference>::value, "reference");
-
- std::copy(MakeTransformIterator(input.begin(), ref),
- MakeTransformIterator(input.end(), ref),
- std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 7, 3, 1, 2, 4, 8 }), output);
- output.clear();
-
- std::copy(MakeTransformIterator(input.cbegin(), ref),
- MakeTransformIterator(input.cend(), ref),
- std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 7, 3, 1, 2, 4, 8 }), output);
- output.clear();
-
- std::copy(MakeTransformIterator(input.rbegin(), ref),
- MakeTransformIterator(input.rend(), ref),
- std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 8, 4, 2, 1, 3, 7 }), output);
- output.clear();
-
- std::copy(MakeTransformIterator(input.crbegin(), ref),
- MakeTransformIterator(input.crend(), ref),
- std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 8, 4, 2, 1, 3, 7 }), output);
- output.clear();
-
- for (size_t i = 0; i != input.size(); ++i) {
- ASSERT_EQ(input[i].value, MakeTransformIterator(input.begin(), ref)[i]);
- ASSERT_EQ(input[i].value, MakeTransformIterator(input.cbegin(), ref)[i]);
- ptrdiff_t index_from_rbegin = static_cast<ptrdiff_t>(input.size() - i - 1u);
- ASSERT_EQ(input[i].value, MakeTransformIterator(input.rbegin(), ref)[index_from_rbegin]);
- ASSERT_EQ(input[i].value, MakeTransformIterator(input.crbegin(), ref)[index_from_rbegin]);
- ptrdiff_t index_from_end = -static_cast<ptrdiff_t>(input.size() - i);
- ASSERT_EQ(input[i].value, MakeTransformIterator(input.end(), ref)[index_from_end]);
- ASSERT_EQ(input[i].value, MakeTransformIterator(input.cend(), ref)[index_from_end]);
- ptrdiff_t index_from_rend = -1 - static_cast<ptrdiff_t>(i);
- ASSERT_EQ(input[i].value, MakeTransformIterator(input.rend(), ref)[index_from_rend]);
- ASSERT_EQ(input[i].value, MakeTransformIterator(input.crend(), ref)[index_from_rend]);
-
- ASSERT_EQ(MakeTransformIterator(input.begin(), ref) + i,
- MakeTransformIterator(input.begin() + i, ref));
- ASSERT_EQ(MakeTransformIterator(input.cbegin(), ref) + i,
- MakeTransformIterator(input.cbegin() + i, ref));
- ASSERT_EQ(MakeTransformIterator(input.rbegin(), ref) + i,
- MakeTransformIterator(input.rbegin() + i, ref));
- ASSERT_EQ(MakeTransformIterator(input.crbegin(), ref) + i,
- MakeTransformIterator(input.crbegin() + i, ref));
- ASSERT_EQ(MakeTransformIterator(input.end(), ref) - i,
- MakeTransformIterator(input.end() - i, ref));
- ASSERT_EQ(MakeTransformIterator(input.cend(), ref) - i,
- MakeTransformIterator(input.cend() - i, ref));
- ASSERT_EQ(MakeTransformIterator(input.rend(), ref) - i,
- MakeTransformIterator(input.rend() - i, ref));
- ASSERT_EQ(MakeTransformIterator(input.crend(), ref) - i,
- MakeTransformIterator(input.crend() - i, ref));
- }
- ASSERT_EQ(input.end(),
- (MakeTransformIterator(input.begin(), ref) + input.size()).base());
- ASSERT_EQ(MakeTransformIterator(input.end(), ref) - MakeTransformIterator(input.begin(), ref),
- static_cast<ptrdiff_t>(input.size()));
-}
-
-TEST(TransformIterator, VectorNonConstReference) {
- auto ref = [](ValueHolder& h) -> int& { return h.value; }; // NOLINT [readability/braces]
- std::vector<ValueHolder> input({ 7, 3, 1, 2, 4, 8 });
- std::vector<int> output;
-
- using vector_titer = decltype(MakeTransformIterator(input.begin(), ref));
- static_assert(std::is_same<std::random_access_iterator_tag,
- vector_titer::iterator_category>::value, "category");
- static_assert(std::is_same<int, vector_titer::value_type>::value, "value_type");
- static_assert(std::is_same<int*, vector_titer::pointer>::value, "pointer");
- static_assert(std::is_same<int&, vector_titer::reference>::value, "reference");
-
- using vector_rtiter = decltype(MakeTransformIterator(input.rbegin(), ref));
- static_assert(std::is_same<std::random_access_iterator_tag,
- vector_rtiter::iterator_category>::value, "category");
- static_assert(std::is_same<int, vector_rtiter::value_type>::value, "value_type");
- static_assert(std::is_same<int*, vector_rtiter::pointer>::value, "pointer");
- static_assert(std::is_same<int&, vector_rtiter::reference>::value, "reference");
-
- std::copy(MakeTransformIterator(input.begin(), ref),
- MakeTransformIterator(input.end(), ref),
- std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 7, 3, 1, 2, 4, 8 }), output);
- output.clear();
-
- std::copy(MakeTransformIterator(input.rbegin(), ref),
- MakeTransformIterator(input.rend(), ref),
- std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 8, 4, 2, 1, 3, 7 }), output);
- output.clear();
-
- for (size_t i = 0; i != input.size(); ++i) {
- ASSERT_EQ(input[i].value, MakeTransformIterator(input.begin(), ref)[i]);
- ptrdiff_t index_from_rbegin = static_cast<ptrdiff_t>(input.size() - i - 1u);
- ASSERT_EQ(input[i].value, MakeTransformIterator(input.rbegin(), ref)[index_from_rbegin]);
- ptrdiff_t index_from_end = -static_cast<ptrdiff_t>(input.size() - i);
- ASSERT_EQ(input[i].value, MakeTransformIterator(input.end(), ref)[index_from_end]);
- ptrdiff_t index_from_rend = -1 - static_cast<ptrdiff_t>(i);
- ASSERT_EQ(input[i].value, MakeTransformIterator(input.rend(), ref)[index_from_rend]);
-
- ASSERT_EQ(MakeTransformIterator(input.begin(), ref) + i,
- MakeTransformIterator(input.begin() + i, ref));
- ASSERT_EQ(MakeTransformIterator(input.rbegin(), ref) + i,
- MakeTransformIterator(input.rbegin() + i, ref));
- ASSERT_EQ(MakeTransformIterator(input.end(), ref) - i,
- MakeTransformIterator(input.end() - i, ref));
- ASSERT_EQ(MakeTransformIterator(input.rend(), ref) - i,
- MakeTransformIterator(input.rend() - i, ref));
- }
- ASSERT_EQ(input.end(),
- (MakeTransformIterator(input.begin(), ref) + input.size()).base());
- ASSERT_EQ(MakeTransformIterator(input.end(), ref) - MakeTransformIterator(input.begin(), ref),
- static_cast<ptrdiff_t>(input.size()));
-
- // Test writing through the transform iterator.
- std::list<int> transform_input({ 1, -1, 2, -2, 3, -3 });
- std::vector<ValueHolder> transformed(transform_input.size(), 0);
- std::transform(transform_input.begin(),
- transform_input.end(),
- MakeTransformIterator(transformed.begin(), ref),
- [](int v) { return -2 * v; });
- ASSERT_EQ(std::vector<ValueHolder>({ -2, 2, -4, 4, -6, 6 }), transformed);
-}
-
-TEST(TransformIterator, VectorConstAndNonConstReference) {
- struct Ref {
- int& operator()(ValueHolder& h) const { return h.value; }
- const int& operator()(const ValueHolder& h) const { return h.value; }
- };
- Ref ref;
- std::vector<ValueHolder> input({ 7, 3, 1, 2, 4, 8 });
- std::vector<int> output;
-
- using vector_titer = decltype(MakeTransformIterator(input.begin(), ref));
- static_assert(std::is_same<std::random_access_iterator_tag,
- vector_titer::iterator_category>::value, "category");
- static_assert(std::is_same<int, vector_titer::value_type>::value, "value_type");
- static_assert(std::is_same<int*, vector_titer::pointer>::value, "pointer");
- static_assert(std::is_same<int&, vector_titer::reference>::value, "reference");
-
- using vector_ctiter = decltype(MakeTransformIterator(input.cbegin(), ref));
- static_assert(std::is_same<std::random_access_iterator_tag,
- vector_ctiter::iterator_category>::value, "category");
- // static_assert(std::is_same<int, vector_ctiter::value_type>::value, "value_type");
- static_assert(std::is_same<const int*, vector_ctiter::pointer>::value, "pointer");
- static_assert(std::is_same<const int&, vector_ctiter::reference>::value, "reference");
-
- using vector_rtiter = decltype(MakeTransformIterator(input.rbegin(), ref));
- static_assert(std::is_same<std::random_access_iterator_tag,
- vector_rtiter::iterator_category>::value, "category");
- static_assert(std::is_same<int, vector_rtiter::value_type>::value, "value_type");
- static_assert(std::is_same<int*, vector_rtiter::pointer>::value, "pointer");
- static_assert(std::is_same<int&, vector_rtiter::reference>::value, "reference");
-
- using vector_crtiter = decltype(MakeTransformIterator(input.crbegin(), ref));
- static_assert(std::is_same<std::random_access_iterator_tag,
- vector_crtiter::iterator_category>::value, "category");
- // static_assert(std::is_same<int, vector_crtiter::value_type>::value, "value_type");
- static_assert(std::is_same<const int*, vector_crtiter::pointer>::value, "pointer");
- static_assert(std::is_same<const int&, vector_crtiter::reference>::value, "reference");
-
- std::copy(MakeTransformIterator(input.begin(), ref),
- MakeTransformIterator(input.end(), ref),
- std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 7, 3, 1, 2, 4, 8 }), output);
- output.clear();
-
- std::copy(MakeTransformIterator(input.cbegin(), ref),
- MakeTransformIterator(input.cend(), ref),
- std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 7, 3, 1, 2, 4, 8 }), output);
- output.clear();
-
- std::copy(MakeTransformIterator(input.rbegin(), ref),
- MakeTransformIterator(input.rend(), ref),
- std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 8, 4, 2, 1, 3, 7 }), output);
- output.clear();
-
- std::copy(MakeTransformIterator(input.crbegin(), ref),
- MakeTransformIterator(input.crend(), ref),
- std::back_inserter(output));
- ASSERT_EQ(std::vector<int>({ 8, 4, 2, 1, 3, 7 }), output);
- output.clear();
-
- for (size_t i = 0; i != input.size(); ++i) {
- ASSERT_EQ(input[i].value, MakeTransformIterator(input.begin(), ref)[i]);
- ASSERT_EQ(input[i].value, MakeTransformIterator(input.cbegin(), ref)[i]);
- ptrdiff_t index_from_rbegin = static_cast<ptrdiff_t>(input.size() - i - 1u);
- ASSERT_EQ(input[i].value, MakeTransformIterator(input.rbegin(), ref)[index_from_rbegin]);
- ASSERT_EQ(input[i].value, MakeTransformIterator(input.crbegin(), ref)[index_from_rbegin]);
- ptrdiff_t index_from_end = -static_cast<ptrdiff_t>(input.size() - i);
- ASSERT_EQ(input[i].value, MakeTransformIterator(input.end(), ref)[index_from_end]);
- ASSERT_EQ(input[i].value, MakeTransformIterator(input.cend(), ref)[index_from_end]);
- ptrdiff_t index_from_rend = -1 - static_cast<ptrdiff_t>(i);
- ASSERT_EQ(input[i].value, MakeTransformIterator(input.rend(), ref)[index_from_rend]);
- ASSERT_EQ(input[i].value, MakeTransformIterator(input.crend(), ref)[index_from_rend]);
-
- ASSERT_EQ(MakeTransformIterator(input.begin(), ref) + i,
- MakeTransformIterator(input.begin() + i, ref));
- ASSERT_EQ(MakeTransformIterator(input.cbegin(), ref) + i,
- MakeTransformIterator(input.cbegin() + i, ref));
- ASSERT_EQ(MakeTransformIterator(input.rbegin(), ref) + i,
- MakeTransformIterator(input.rbegin() + i, ref));
- ASSERT_EQ(MakeTransformIterator(input.crbegin(), ref) + i,
- MakeTransformIterator(input.crbegin() + i, ref));
- ASSERT_EQ(MakeTransformIterator(input.end(), ref) - i,
- MakeTransformIterator(input.end() - i, ref));
- ASSERT_EQ(MakeTransformIterator(input.cend(), ref) - i,
- MakeTransformIterator(input.cend() - i, ref));
- ASSERT_EQ(MakeTransformIterator(input.rend(), ref) - i,
- MakeTransformIterator(input.rend() - i, ref));
- ASSERT_EQ(MakeTransformIterator(input.crend(), ref) - i,
- MakeTransformIterator(input.crend() - i, ref));
- }
- ASSERT_EQ(input.end(),
- (MakeTransformIterator(input.begin(), ref) + input.size()).base());
- ASSERT_EQ(MakeTransformIterator(input.end(), ref) - MakeTransformIterator(input.begin(), ref),
- static_cast<ptrdiff_t>(input.size()));
-
- // Test iterator->const_iterator conversion and comparison.
- auto it = MakeTransformIterator(input.begin(), ref);
- decltype(MakeTransformIterator(input.cbegin(), ref)) cit = it;
- static_assert(!std::is_same<decltype(it), decltype(cit)>::value, "Types must be different");
- ASSERT_EQ(it, cit);
- auto rit = MakeTransformIterator(input.rbegin(), ref);
- decltype(MakeTransformIterator(input.crbegin(), ref)) crit(rit);
- static_assert(!std::is_same<decltype(rit), decltype(crit)>::value, "Types must be different");
- ASSERT_EQ(rit, crit);
-
- // Test writing through the transform iterator.
- std::list<int> transform_input({ 42, 73, 11, 17 });
- std::vector<ValueHolder> transformed(transform_input.size(), 0);
- std::transform(transform_input.begin(),
- transform_input.end(),
- MakeTransformIterator(transformed.begin(), ref),
- [](int v) { return -v; });
- ASSERT_EQ(std::vector<ValueHolder>({ -42, -73, -11, -17 }), transformed);
-}
-
-TEST(TransformIterator, TransformRange) {
- auto ref = [](ValueHolder& h) -> int& { return h.value; }; // NOLINT [readability/braces]
- std::vector<ValueHolder> data({ 1, 0, 1, 3, 1, 0 });
-
- for (int& v : MakeTransformRange(data, ref)) {
- v += 11;
- }
- ASSERT_EQ(std::vector<ValueHolder>({ 12, 11, 12, 14, 12, 11 }), data);
-}
-
-} // namespace art
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 9738784d45..114986b3e7 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -20,6 +20,7 @@
#include <vector>
#include "base/arena_containers.h"
+#include "base/array_ref.h"
#include "base/bit_utils.h"
#include "base/enums.h"
#include "base/macros.h"
@@ -27,7 +28,6 @@
#include "globals.h"
#include "managed_register_x86.h"
#include "offsets.h"
-#include "utils/array_ref.h"
#include "utils/assembler.h"
namespace art {
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.h b/compiler/utils/x86/jni_macro_assembler_x86.h
index 3f07ede865..015584cbc1 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.h
+++ b/compiler/utils/x86/jni_macro_assembler_x86.h
@@ -21,10 +21,10 @@
#include "assembler_x86.h"
#include "base/arena_containers.h"
+#include "base/array_ref.h"
#include "base/enums.h"
#include "base/macros.h"
#include "offsets.h"
-#include "utils/array_ref.h"
#include "utils/jni_macro_assembler.h"
namespace art {
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index fdd3aa9317..acad86d161 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -20,13 +20,13 @@
#include <vector>
#include "base/arena_containers.h"
+#include "base/array_ref.h"
#include "base/bit_utils.h"
#include "base/macros.h"
#include "constants_x86_64.h"
#include "globals.h"
#include "managed_register_x86_64.h"
#include "offsets.h"
-#include "utils/array_ref.h"
#include "utils/assembler.h"
#include "utils/jni_macro_assembler.h"
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
index cc4e57c999..9107f3c422 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
@@ -21,10 +21,10 @@
#include "assembler_x86_64.h"
#include "base/arena_containers.h"
+#include "base/array_ref.h"
#include "base/enums.h"
#include "base/macros.h"
#include "offsets.h"
-#include "utils/array_ref.h"
#include "utils/assembler.h"
#include "utils/jni_macro_assembler.h"