summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.mk2
-rw-r--r--compiler/debug/elf_debug_loc_writer.h5
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.cc1
-rw-r--r--compiler/driver/compiler_driver_test.cc6
-rw-r--r--compiler/jit/jit_compiler.cc10
-rw-r--r--compiler/optimizing/bounds_check_elimination.cc335
-rw-r--r--compiler/optimizing/builder.cc80
-rw-r--r--compiler/optimizing/builder.h3
-rw-r--r--compiler/optimizing/code_generator.cc13
-rw-r--r--compiler/optimizing/code_generator.h2
-rw-r--r--compiler/optimizing/code_generator_arm.cc110
-rw-r--r--compiler/optimizing/code_generator_arm.h1
-rw-r--r--compiler/optimizing/code_generator_arm64.cc198
-rw-r--r--compiler/optimizing/code_generator_arm64.h2
-rw-r--r--compiler/optimizing/code_generator_mips.cc48
-rw-r--r--compiler/optimizing/code_generator_mips.h1
-rw-r--r--compiler/optimizing/code_generator_mips64.cc95
-rw-r--r--compiler/optimizing/code_generator_mips64.h2
-rw-r--r--compiler/optimizing/code_generator_x86.cc143
-rw-r--r--compiler/optimizing/code_generator_x86.h1
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc206
-rw-r--r--compiler/optimizing/code_generator_x86_64.h1
-rw-r--r--compiler/optimizing/codegen_test.cc38
-rw-r--r--compiler/optimizing/constant_folding_test.cc78
-rw-r--r--compiler/optimizing/dead_code_elimination_test.cc8
-rw-r--r--compiler/optimizing/dominator_test.cc31
-rw-r--r--compiler/optimizing/find_loops_test.cc65
-rw-r--r--compiler/optimizing/graph_checker.cc390
-rw-r--r--compiler/optimizing/graph_checker.h80
-rw-r--r--compiler/optimizing/graph_checker_test.cc33
-rw-r--r--compiler/optimizing/graph_visualizer.cc10
-rw-r--r--compiler/optimizing/gvn_test.cc8
-rw-r--r--compiler/optimizing/induction_var_analysis_test.cc240
-rw-r--r--compiler/optimizing/induction_var_range_test.cc23
-rw-r--r--compiler/optimizing/inliner.cc100
-rw-r--r--compiler/optimizing/inliner.h4
-rw-r--r--compiler/optimizing/instruction_simplifier.cc92
-rw-r--r--compiler/optimizing/intrinsics.cc1
-rw-r--r--compiler/optimizing/intrinsics_arm.cc262
-rw-r--r--compiler/optimizing/licm_test.cc2
-rw-r--r--compiler/optimizing/linearize_test.cc9
-rw-r--r--compiler/optimizing/live_ranges_test.cc25
-rw-r--r--compiler/optimizing/liveness_test.cc7
-rw-r--r--compiler/optimizing/load_store_elimination.cc6
-rw-r--r--compiler/optimizing/nodes.cc58
-rw-r--r--compiler/optimizing/nodes.h51
-rw-r--r--compiler/optimizing/optimizing_compiler.cc133
-rw-r--r--compiler/optimizing/optimizing_unit_test.h29
-rw-r--r--compiler/optimizing/pretty_printer_test.cc80
-rw-r--r--compiler/optimizing/reference_type_propagation.cc22
-rw-r--r--compiler/optimizing/reference_type_propagation.h3
-rw-r--r--compiler/optimizing/register_allocator.cc16
-rw-r--r--compiler/optimizing/register_allocator_test.cc23
-rw-r--r--compiler/optimizing/ssa_builder.cc32
-rw-r--r--compiler/optimizing/ssa_builder.h3
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.cc2
-rw-r--r--compiler/optimizing/ssa_test.cc8
-rw-r--r--compiler/optimizing/suspend_check_test.cc32
-rw-r--r--compiler/utils/x86/assembler_x86.cc8
-rw-r--r--compiler/utils/x86/assembler_x86.h1
-rw-r--r--compiler/utils/x86/assembler_x86_test.cc15
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc13
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h1
-rw-r--r--compiler/utils/x86_64/assembler_x86_64_test.cc31
-rw-r--r--disassembler/disassembler_arm.cc23
-rw-r--r--patchoat/patchoat.cc599
-rw-r--r--patchoat/patchoat.h10
-rw-r--r--runtime/Android.mk16
-rw-r--r--runtime/arch/arm/entrypoints_init_arm.cc19
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S10
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S2
-rw-r--r--runtime/art_method-inl.h37
-rw-r--r--runtime/base/scoped_arena_allocator.cc2
-rw-r--r--runtime/class_linker.cc66
-rw-r--r--runtime/class_linker.h3
-rw-r--r--runtime/dex_file_verifier.cc154
-rw-r--r--runtime/dex_file_verifier.h7
-rw-r--r--runtime/dex_file_verifier_test.cc18
-rw-r--r--runtime/dex_instruction_utils.h10
-rw-r--r--runtime/gc/space/image_space.cc9
-rw-r--r--runtime/interpreter/interpreter.cc8
-rw-r--r--runtime/interpreter/interpreter_common.cc23
-rw-r--r--runtime/interpreter/mterp/arm/binopWide.S6
-rw-r--r--runtime/interpreter/mterp/arm/binopWide2addr.S6
-rw-r--r--runtime/interpreter/mterp/arm/fbinopWide.S2
-rw-r--r--runtime/interpreter/mterp/arm/fbinopWide2addr.S2
-rw-r--r--runtime/interpreter/mterp/arm/funopWider.S1
-rw-r--r--runtime/interpreter/mterp/arm/header.S13
-rw-r--r--runtime/interpreter/mterp/arm/op_aget_wide.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_const_wide.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_const_wide_16.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_const_wide_32.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_const_wide_high16.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_iget_wide.S5
-rw-r--r--runtime/interpreter/mterp/arm/op_iget_wide_quick.S1
-rw-r--r--runtime/interpreter/mterp/arm/op_move_result_wide.S5
-rw-r--r--runtime/interpreter/mterp/arm/op_move_wide.S5
-rw-r--r--runtime/interpreter/mterp/arm/op_move_wide_16.S5
-rw-r--r--runtime/interpreter/mterp/arm/op_move_wide_from16.S5
-rw-r--r--runtime/interpreter/mterp/arm/op_sget_wide.S5
-rw-r--r--runtime/interpreter/mterp/arm/unopWide.S5
-rw-r--r--runtime/interpreter/mterp/arm/unopWider.S5
-rw-r--r--runtime/interpreter/mterp/config_x864
-rwxr-xr-xruntime/interpreter/mterp/gen_mterp.py58
-rw-r--r--runtime/interpreter/mterp/out/mterp_arm.S193
-rw-r--r--runtime/interpreter/mterp/out/mterp_x86.S1558
-rw-r--r--runtime/interpreter/mterp/x86/alt_stub.S2
-rw-r--r--runtime/interpreter/mterp/x86/bincmp.S2
-rw-r--r--runtime/interpreter/mterp/x86/bindiv.S6
-rw-r--r--runtime/interpreter/mterp/x86/bindiv2addr.S8
-rw-r--r--runtime/interpreter/mterp/x86/bindivLit16.S6
-rw-r--r--runtime/interpreter/mterp/x86/bindivLit8.S6
-rw-r--r--runtime/interpreter/mterp/x86/binop.S4
-rw-r--r--runtime/interpreter/mterp/x86/binop1.S6
-rw-r--r--runtime/interpreter/mterp/x86/binop2addr.S2
-rw-r--r--runtime/interpreter/mterp/x86/binopLit16.S4
-rw-r--r--runtime/interpreter/mterp/x86/binopLit8.S4
-rw-r--r--runtime/interpreter/mterp/x86/binopWide.S16
-rw-r--r--runtime/interpreter/mterp/x86/binopWide2addr.S10
-rw-r--r--runtime/interpreter/mterp/x86/entry.S6
-rw-r--r--runtime/interpreter/mterp/x86/footer.S22
-rw-r--r--runtime/interpreter/mterp/x86/fpcmp.S2
-rw-r--r--runtime/interpreter/mterp/x86/header.S30
-rw-r--r--runtime/interpreter/mterp/x86/invoke.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_aget.S6
-rw-r--r--runtime/interpreter/mterp/x86/op_aget_object.S8
-rw-r--r--runtime/interpreter/mterp/x86/op_aget_wide.S6
-rw-r--r--runtime/interpreter/mterp/x86/op_aput.S6
-rw-r--r--runtime/interpreter/mterp/x86/op_aput_object.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_aput_wide.S6
-rw-r--r--runtime/interpreter/mterp/x86/op_array_length.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_check_cast.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_cmp_long.S6
-rw-r--r--runtime/interpreter/mterp/x86/op_const.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_const_16.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_const_4.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_const_class.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_const_high16.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_const_string.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_const_string_jumbo.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_const_wide.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_const_wide_16.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_const_wide_32.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_const_wide_high16.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_div_long.S14
-rw-r--r--runtime/interpreter/mterp/x86/op_div_long_2addr.S14
-rw-r--r--runtime/interpreter/mterp/x86/op_fill_array_data.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_filled_new_array.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_iget.S6
-rw-r--r--runtime/interpreter/mterp/x86/op_iget_object_quick.S6
-rw-r--r--runtime/interpreter/mterp/x86/op_iget_quick.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_iget_wide.S6
-rw-r--r--runtime/interpreter/mterp/x86/op_iget_wide_quick.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_instance_of.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_int_to_long.S6
-rw-r--r--runtime/interpreter/mterp/x86/op_iput.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_iput_object.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_iput_object_quick.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_iput_quick.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_iput_wide.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_iput_wide_quick.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_monitor_enter.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_monitor_exit.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_move.S6
-rw-r--r--runtime/interpreter/mterp/x86/op_move_16.S6
-rw-r--r--runtime/interpreter/mterp/x86/op_move_exception.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_move_from16.S6
-rw-r--r--runtime/interpreter/mterp/x86/op_move_result.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_move_result_wide.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_move_wide.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_move_wide_16.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_move_wide_from16.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_mul_int.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_mul_int_2addr.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_mul_int_lit16.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_mul_int_lit8.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_mul_long.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_neg_long.S8
-rw-r--r--runtime/interpreter/mterp/x86/op_new_array.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_new_instance.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_not_long.S8
-rw-r--r--runtime/interpreter/mterp/x86/op_packed_switch.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_return.S6
-rw-r--r--runtime/interpreter/mterp/x86/op_return_void.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_return_void_no_barrier.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_return_wide.S8
-rw-r--r--runtime/interpreter/mterp/x86/op_sget.S6
-rw-r--r--runtime/interpreter/mterp/x86/op_sget_wide.S6
-rw-r--r--runtime/interpreter/mterp/x86/op_shl_long.S10
-rw-r--r--runtime/interpreter/mterp/x86/op_shl_long_2addr.S10
-rw-r--r--runtime/interpreter/mterp/x86/op_shr_long.S10
-rw-r--r--runtime/interpreter/mterp/x86/op_shr_long_2addr.S10
-rw-r--r--runtime/interpreter/mterp/x86/op_sput.S4
-rw-r--r--runtime/interpreter/mterp/x86/op_sput_object.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_sput_wide.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_throw.S2
-rw-r--r--runtime/interpreter/mterp/x86/op_ushr_long.S10
-rw-r--r--runtime/interpreter/mterp/x86/op_ushr_long_2addr.S10
-rw-r--r--runtime/interpreter/mterp/x86/shop2addr.S6
-rw-r--r--runtime/interpreter/mterp/x86/unop.S4
-rw-r--r--runtime/jit/jit.cc153
-rw-r--r--runtime/jit/jit_code_cache.cc2
-rw-r--r--runtime/jit/offline_profiling_info.cc23
-rw-r--r--runtime/jit/offline_profiling_info.h2
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc6
-rw-r--r--runtime/oat_file_manager.cc17
-rw-r--r--runtime/openjdkjvm/Android.mk20
-rw-r--r--runtime/openjdkjvm/MODULE_LICENSE_GPL_WITH_CLASSPATH_EXCEPTION0
-rw-r--r--runtime/parsed_options.cc5
-rw-r--r--runtime/quick/inline_method_analyser.cc375
-rw-r--r--runtime/quick/inline_method_analyser.h15
-rw-r--r--runtime/runtime.cc2
-rw-r--r--runtime/runtime.h7
-rw-r--r--runtime/runtime_options.def1
-rw-r--r--runtime/thread.cc10
-rw-r--r--runtime/thread.h8
-rw-r--r--runtime/thread_list.cc17
-rw-r--r--runtime/thread_list.h2
-rw-r--r--runtime/verifier/method_verifier.cc15
-rw-r--r--test/127-checker-secondarydex/src/Test.java2
-rw-r--r--test/137-cfi/cfi.cc1
-rw-r--r--test/137-cfi/expected.txt1
-rwxr-xr-xtest/137-cfi/run3
-rw-r--r--test/137-cfi/src/Main.java12
-rw-r--r--test/444-checker-nce/src/Main.java8
-rw-r--r--test/449-checker-bce/src/Main.java271
-rw-r--r--test/450-checker-types/src/Main.java24
-rw-r--r--test/458-checker-instruction-simplification/src/Main.java392
-rw-r--r--test/464-checker-inline-sharpen-calls/src/Main.java2
-rw-r--r--test/477-checker-bound-type/src/Main.java8
-rw-r--r--test/492-checker-inline-invoke-interface/src/Main.java2
-rw-r--r--test/510-checker-try-catch/smali/Builder.smali378
-rw-r--r--test/510-checker-try-catch/smali/SsaBuilder.smali17
-rw-r--r--test/517-checker-builder-fallthrough/smali/TestCase.smali4
-rw-r--r--test/523-checker-can-throw-regression/smali/Test.smali6
-rw-r--r--test/530-checker-lse/src/Main.java25
-rw-r--r--test/537-checker-debuggable/smali/TestCase.smali4
-rw-r--r--test/540-checker-rtp-bug/src/Main.java6
-rw-r--r--test/549-checker-types-merge/src/Main.java22
-rw-r--r--test/550-checker-regression-wide-store/smali/TestCase.smali6
-rw-r--r--test/551-checker-shifter-operand/src/Main.java16
-rw-r--r--test/552-checker-primitive-typeprop/smali/ArrayGet.smali28
-rw-r--r--test/552-checker-primitive-typeprop/smali/ArraySet.smali2
-rw-r--r--test/552-checker-primitive-typeprop/smali/SsaBuilder.smali2
-rw-r--r--test/552-checker-primitive-typeprop/smali/TypePropagation.smali16
-rw-r--r--test/554-checker-rtp-checkcast/src/Main.java6
-rw-r--r--test/557-checker-ref-equivalent/smali/TestCase.smali2
-rw-r--r--test/557-checker-ref-equivalent/src/Main.java4
-rw-r--r--test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali10
-rw-r--r--test/559-checker-rtp-ifnotnull/src/Main.java2
-rw-r--r--test/569-checker-pattern-replacement/src-multidex/Base.java70
-rw-r--r--test/569-checker-pattern-replacement/src-multidex/BaseWithFinalField.java27
-rw-r--r--test/569-checker-pattern-replacement/src-multidex/Derived.java63
-rw-r--r--test/569-checker-pattern-replacement/src-multidex/DerivedInSecondDex.java31
-rw-r--r--test/569-checker-pattern-replacement/src-multidex/DerivedWithFinalField.java38
-rw-r--r--test/569-checker-pattern-replacement/src/BaseInMainDex.java26
-rw-r--r--test/569-checker-pattern-replacement/src/Main.java1410
-rw-r--r--test/570-checker-osr/expected.txt8
-rw-r--r--test/570-checker-osr/osr.cc81
-rw-r--r--test/570-checker-osr/src/Main.java70
-rw-r--r--test/570-checker-select/src/Main.java120
-rw-r--r--test/572-checker-array-get-regression/expected.txt1
-rw-r--r--test/572-checker-array-get-regression/info.txt3
-rw-r--r--test/572-checker-array-get-regression/src/Main.java61
-rw-r--r--test/573-checker-checkcast-regression/expected.txt1
-rw-r--r--test/573-checker-checkcast-regression/info.txt4
-rw-r--r--test/573-checker-checkcast-regression/src/Main.java49
-rw-r--r--test/800-smali/expected.txt2
-rw-r--r--test/800-smali/smali/b_26965384.smali20
-rw-r--r--test/800-smali/smali/b_26965384Super.smali10
-rw-r--r--test/800-smali/smali/b_27148248.smali27
-rw-r--r--test/800-smali/src/Main.java4
-rw-r--r--test/Android.run-test.mk14
-rwxr-xr-xtest/etc/run-test-jar4
-rwxr-xr-xtools/buildbot-build.sh2
-rwxr-xr-xtools/run-libcore-tests.sh12
276 files changed, 6992 insertions, 3875 deletions
diff --git a/Android.mk b/Android.mk
index 4f73127123..2e05d33209 100644
--- a/Android.mk
+++ b/Android.mk
@@ -547,3 +547,5 @@ endif # !art_dont_bother
art_dont_bother :=
art_test_bother :=
TEST_ART_TARGET_SYNC_DEPS :=
+
+include $(art_path)/runtime/openjdkjvm/Android.mk
diff --git a/compiler/debug/elf_debug_loc_writer.h b/compiler/debug/elf_debug_loc_writer.h
index fd7f949c5d..a19b36f9cc 100644
--- a/compiler/debug/elf_debug_loc_writer.h
+++ b/compiler/debug/elf_debug_loc_writer.h
@@ -82,6 +82,8 @@ struct VariableLocation {
// Get the location of given dex register (e.g. stack or machine register).
// Note that the location might be different based on the current pc.
// The result will cover all ranges where the variable is in scope.
+// PCs corresponding to stackmap with dex register map are accurate,
+// all other PCs are best-effort only.
std::vector<VariableLocation> GetVariableLocations(const MethodDebugInfo* method_info,
uint16_t vreg,
bool is64bitValue,
@@ -141,6 +143,9 @@ std::vector<VariableLocation> GetVariableLocations(const MethodDebugInfo* method
variable_locations.back().high_pc == low_pc) {
// Merge with the previous entry (extend its range).
variable_locations.back().high_pc = high_pc;
+ } else if (!variable_locations.empty() && reg_lo == DexRegisterLocation::None()) {
+ // Unknown location - use the last known location as best-effort guess.
+ variable_locations.back().high_pc = high_pc;
} else {
variable_locations.push_back({low_pc, high_pc, reg_lo, reg_hi});
}
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 22b178ce7d..209f101199 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -875,6 +875,7 @@ bool DexFileMethodInliner::GenInline(MIRGraph* mir_graph, BasicBlock* bb, MIR* i
move_result = mir_graph->FindMoveResult(bb, invoke);
result = GenInlineIPut(mir_graph, bb, invoke, move_result, method);
break;
+ case kInlineOpConstructor:
case kInlineStringInit:
return false;
default:
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 4c03e5ddfe..478588561f 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -249,9 +249,9 @@ class CompilerDriverProfileTest : public CompilerDriverTest {
ProfileCompilationInfo info;
for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
- std::cout << std::string(dex_file->GetLocation());
- profile_info_.AddData(dex_file->GetLocation(), dex_file->GetLocationChecksum(), 1);
- profile_info_.AddData(dex_file->GetLocation(), dex_file->GetLocationChecksum(), 2);
+ std::string key = ProfileCompilationInfo::GetProfileDexFileKey(dex_file->GetLocation());
+ profile_info_.AddData(key, dex_file->GetLocationChecksum(), 1);
+ profile_info_.AddData(key, dex_file->GetLocationChecksum(), 2);
}
return &profile_info_;
}
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index d2bf6c07c8..3fe786141e 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -203,6 +203,7 @@ JitCompiler::~JitCompiler() {
}
bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool osr) {
+ DCHECK(!method->IsProxyMethod());
TimingLogger logger("JIT compiler timing logger", true, VLOG_IS_ON(jit));
const uint64_t start_time = NanoTime();
StackHandleScope<2> hs(self);
@@ -220,20 +221,17 @@ bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool osr) {
bool success = false;
{
TimingLogger::ScopedTiming t2("Compiling", &logger);
- // If we get a request to compile a proxy method, we pass the actual Java method
- // of that proxy method, as the compiler does not expect a proxy method.
- ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(sizeof(void*));
JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache();
- success = compiler_driver_->GetCompiler()->JitCompile(self, code_cache, method_to_compile, osr);
+ success = compiler_driver_->GetCompiler()->JitCompile(self, code_cache, method, osr);
if (success && (perf_file_ != nullptr)) {
- const void* ptr = method_to_compile->GetEntryPointFromQuickCompiledCode();
+ const void* ptr = method->GetEntryPointFromQuickCompiledCode();
std::ostringstream stream;
stream << std::hex
<< reinterpret_cast<uintptr_t>(ptr)
<< " "
<< code_cache->GetMemorySizeOfCodePointer(ptr)
<< " "
- << PrettyMethod(method_to_compile)
+ << PrettyMethod(method)
<< std::endl;
std::string str = stream.str();
bool res = perf_file_->WriteFully(str.c_str(), str.size());
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index c307522f2b..ba1b1683d7 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -63,9 +63,10 @@ class ValueBound : public ValueObject {
return true;
}
+ // Return true if instruction can be expressed as "left_instruction + right_constant".
static bool IsAddOrSubAConstant(HInstruction* instruction,
- HInstruction** left_instruction,
- int* right_constant) {
+ /* out */ HInstruction** left_instruction,
+ /* out */ int32_t* right_constant) {
if (instruction->IsAdd() || instruction->IsSub()) {
HBinaryOperation* bin_op = instruction->AsBinaryOperation();
HInstruction* left = bin_op->GetLeft();
@@ -82,9 +83,22 @@ class ValueBound : public ValueObject {
return false;
}
+ // Expresses any instruction as a value bound.
+ static ValueBound AsValueBound(HInstruction* instruction) {
+ if (instruction->IsIntConstant()) {
+ return ValueBound(nullptr, instruction->AsIntConstant()->GetValue());
+ }
+ HInstruction *left;
+ int32_t right;
+ if (IsAddOrSubAConstant(instruction, &left, &right)) {
+ return ValueBound(left, right);
+ }
+ return ValueBound(instruction, 0);
+ }
+
// Try to detect useful value bound format from an instruction, e.g.
// a constant or array length related value.
- static ValueBound DetectValueBoundFromValue(HInstruction* instruction, bool* found) {
+ static ValueBound DetectValueBoundFromValue(HInstruction* instruction, /* out */ bool* found) {
DCHECK(instruction != nullptr);
if (instruction->IsIntConstant()) {
*found = true;
@@ -227,7 +241,7 @@ class ValueBound : public ValueObject {
// Add a constant to a ValueBound.
// `overflow` or `underflow` will return whether the resulting bound may
// overflow or underflow an int.
- ValueBound Add(int32_t c, bool* overflow, bool* underflow) const {
+ ValueBound Add(int32_t c, /* out */ bool* overflow, /* out */ bool* underflow) const {
*overflow = *underflow = false;
if (c == 0) {
return *this;
@@ -488,10 +502,10 @@ class BCEVisitor : public HGraphVisitor {
// the deoptimization technique.
static constexpr size_t kThresholdForAddingDeoptimize = 2;
- // Very large constant index is considered as an anomaly. This is a threshold
- // beyond which we don't bother to apply the deoptimization technique since
- // it's likely some AIOOBE will be thrown.
- static constexpr int32_t kMaxConstantForAddingDeoptimize =
+ // Very large lengths are considered an anomaly. This is a threshold beyond which we don't
+ // bother to apply the deoptimization technique since it's likely, or sometimes certain,
+ // an AIOOBE will be thrown.
+ static constexpr uint32_t kMaxLengthForAddingDeoptimize =
std::numeric_limits<int32_t>::max() - 1024 * 1024;
// Added blocks for loop body entry test.
@@ -508,7 +522,7 @@ class BCEVisitor : public HGraphVisitor {
std::less<int>(),
graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
- first_constant_index_bounds_check_map_(
+ first_index_bounds_check_map_(
std::less<int>(),
graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
early_exit_loop_(
@@ -518,23 +532,16 @@ class BCEVisitor : public HGraphVisitor {
std::less<uint32_t>(),
graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
finite_loop_(graph->GetArena()->Adapter(kArenaAllocBoundsCheckElimination)),
- need_to_revisit_block_(false),
- has_deoptimization_on_constant_subscripts_(false),
+ has_dom_based_dynamic_bce_(false),
initial_block_size_(graph->GetBlocks().size()),
side_effects_(side_effects),
induction_range_(induction_analysis) {}
void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
DCHECK(!IsAddedBlock(block));
- first_constant_index_bounds_check_map_.clear();
+ first_index_bounds_check_map_.clear();
HGraphVisitor::VisitBasicBlock(block);
- if (need_to_revisit_block_) {
- AddComparesWithDeoptimization(block);
- need_to_revisit_block_ = false;
- first_constant_index_bounds_check_map_.clear();
- GetValueRangeMap(block)->clear();
- HGraphVisitor::VisitBasicBlock(block);
- }
+ AddComparesWithDeoptimization(block);
}
void Finish() {
@@ -555,8 +562,7 @@ class BCEVisitor : public HGraphVisitor {
// Added blocks don't keep value ranges.
return nullptr;
}
- uint32_t block_id = basic_block->GetBlockId();
- return &maps_[block_id];
+ return &maps_[basic_block->GetBlockId()];
}
// Traverse up the dominator tree to look for value range info.
@@ -576,6 +582,11 @@ class BCEVisitor : public HGraphVisitor {
return nullptr;
}
+ // Helper method to assign a new range to an instruction in given basic block.
+ void AssignRange(HBasicBlock* basic_block, HInstruction* instruction, ValueRange* range) {
+ GetValueRangeMap(basic_block)->Overwrite(instruction->GetId(), range);
+ }
+
// Narrow the value range of `instruction` at the end of `basic_block` with `range`,
// and push the narrowed value range to `successor`.
void ApplyRangeFromComparison(HInstruction* instruction, HBasicBlock* basic_block,
@@ -583,7 +594,7 @@ class BCEVisitor : public HGraphVisitor {
ValueRange* existing_range = LookupValueRange(instruction, basic_block);
if (existing_range == nullptr) {
if (range != nullptr) {
- GetValueRangeMap(successor)->Overwrite(instruction->GetId(), range);
+ AssignRange(successor, instruction, range);
}
return;
}
@@ -595,8 +606,7 @@ class BCEVisitor : public HGraphVisitor {
return;
}
}
- ValueRange* narrowed_range = existing_range->Narrow(range);
- GetValueRangeMap(successor)->Overwrite(instruction->GetId(), narrowed_range);
+ AssignRange(successor, instruction, existing_range->Narrow(range));
}
// Special case that we may simultaneously narrow two MonotonicValueRange's to
@@ -778,37 +788,37 @@ class BCEVisitor : public HGraphVisitor {
array_length->IsPhi());
bool try_dynamic_bce = true;
+ // Analyze index range.
if (!index->IsIntConstant()) {
- // Non-constant subscript.
+ // Non-constant index.
ValueBound lower = ValueBound(nullptr, 0); // constant 0
ValueBound upper = ValueBound(array_length, -1); // array_length - 1
ValueRange array_range(GetGraph()->GetArena(), lower, upper);
- // Try range obtained by dominator-based analysis.
+ // Try index range obtained by dominator-based analysis.
ValueRange* index_range = LookupValueRange(index, block);
if (index_range != nullptr && index_range->FitsIn(&array_range)) {
ReplaceInstruction(bounds_check, index);
return;
}
- // Try range obtained by induction variable analysis.
+ // Try index range obtained by induction variable analysis.
// Disables dynamic bce if OOB is certain.
if (InductionRangeFitsIn(&array_range, bounds_check, index, &try_dynamic_bce)) {
ReplaceInstruction(bounds_check, index);
return;
}
} else {
- // Constant subscript.
+ // Constant index.
int32_t constant = index->AsIntConstant()->GetValue();
if (constant < 0) {
// Will always throw exception.
return;
- }
- if (array_length->IsIntConstant()) {
+ } else if (array_length->IsIntConstant()) {
if (constant < array_length->AsIntConstant()->GetValue()) {
ReplaceInstruction(bounds_check, index);
}
return;
}
-
+ // Analyze array length range.
DCHECK(array_length->IsArrayLength());
ValueRange* existing_range = LookupValueRange(array_length, block);
if (existing_range != nullptr) {
@@ -823,37 +833,35 @@ class BCEVisitor : public HGraphVisitor {
// bounds check.
}
}
-
- if (first_constant_index_bounds_check_map_.find(array_length->GetId()) ==
- first_constant_index_bounds_check_map_.end()) {
- // Remember the first bounds check against array_length of a constant index.
- // That bounds check instruction has an associated HEnvironment where we
- // may add an HDeoptimize to eliminate bounds checks of constant indices
- // against array_length.
- first_constant_index_bounds_check_map_.Put(array_length->GetId(), bounds_check);
- } else {
- // We've seen it at least twice. It's beneficial to introduce a compare with
- // deoptimization fallback to eliminate the bounds checks.
- need_to_revisit_block_ = true;
- }
-
// Once we have an array access like 'array[5] = 1', we record array.length >= 6.
// We currently don't do it for non-constant index since a valid array[i] can't prove
// a valid array[i-1] yet due to the lower bound side.
if (constant == std::numeric_limits<int32_t>::max()) {
// Max() as an index will definitely throw AIOOBE.
return;
+ } else {
+ ValueBound lower = ValueBound(nullptr, constant + 1);
+ ValueBound upper = ValueBound::Max();
+ ValueRange* range = new (GetGraph()->GetArena())
+ ValueRange(GetGraph()->GetArena(), lower, upper);
+ AssignRange(block, array_length, range);
}
- ValueBound lower = ValueBound(nullptr, constant + 1);
- ValueBound upper = ValueBound::Max();
- ValueRange* range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), lower, upper);
- GetValueRangeMap(block)->Overwrite(array_length->GetId(), range);
}
// If static analysis fails, and OOB is not certain, try dynamic elimination.
if (try_dynamic_bce) {
- TryDynamicBCE(bounds_check);
+ // Try loop-based dynamic elimination.
+ if (TryDynamicBCE(bounds_check)) {
+ return;
+ }
+ // Prepare dominator-based dynamic elimination.
+ if (first_index_bounds_check_map_.find(array_length->GetId()) ==
+ first_index_bounds_check_map_.end()) {
+ // Remember the first bounds check against each array_length. That bounds check
+ // instruction has an associated HEnvironment where we may add an HDeoptimize
+ // to eliminate subsequent bounds checks against the same array_length.
+ first_index_bounds_check_map_.Put(array_length->GetId(), bounds_check);
+ }
}
}
@@ -914,7 +922,7 @@ class BCEVisitor : public HGraphVisitor {
increment,
bound);
}
- GetValueRangeMap(phi->GetBlock())->Overwrite(phi->GetId(), range);
+ AssignRange(phi->GetBlock(), phi, range);
}
}
}
@@ -942,7 +950,7 @@ class BCEVisitor : public HGraphVisitor {
}
ValueRange* range = left_range->Add(right->AsIntConstant()->GetValue());
if (range != nullptr) {
- GetValueRangeMap(add->GetBlock())->Overwrite(add->GetId(), range);
+ AssignRange(add->GetBlock(), add, range);
}
}
}
@@ -957,7 +965,7 @@ class BCEVisitor : public HGraphVisitor {
}
ValueRange* range = left_range->Add(-right->AsIntConstant()->GetValue());
if (range != nullptr) {
- GetValueRangeMap(sub->GetBlock())->Overwrite(sub->GetId(), range);
+ AssignRange(sub->GetBlock(), sub, range);
return;
}
}
@@ -997,7 +1005,7 @@ class BCEVisitor : public HGraphVisitor {
GetGraph()->GetArena(),
ValueBound(nullptr, right_const - upper.GetConstant()),
ValueBound(array_length, right_const - lower.GetConstant()));
- GetValueRangeMap(sub->GetBlock())->Overwrite(sub->GetId(), range);
+ AssignRange(sub->GetBlock(), sub, range);
}
}
}
@@ -1045,7 +1053,7 @@ class BCEVisitor : public HGraphVisitor {
GetGraph()->GetArena(),
ValueBound(nullptr, std::numeric_limits<int32_t>::min()),
ValueBound(left, 0));
- GetValueRangeMap(instruction->GetBlock())->Overwrite(instruction->GetId(), range);
+ AssignRange(instruction->GetBlock(), instruction, range);
}
}
@@ -1071,7 +1079,7 @@ class BCEVisitor : public HGraphVisitor {
GetGraph()->GetArena(),
ValueBound(nullptr, 0),
ValueBound(nullptr, constant));
- GetValueRangeMap(instruction->GetBlock())->Overwrite(instruction->GetId(), range);
+ AssignRange(instruction->GetBlock(), instruction, range);
}
}
}
@@ -1095,30 +1103,11 @@ class BCEVisitor : public HGraphVisitor {
if (existing_range != nullptr) {
range = existing_range->Narrow(range);
}
- GetValueRangeMap(new_array->GetBlock())->Overwrite(left->GetId(), range);
+ AssignRange(new_array->GetBlock(), left, range);
}
}
}
- void VisitDeoptimize(HDeoptimize* deoptimize) OVERRIDE {
- if (!deoptimize->InputAt(0)->IsLessThanOrEqual()) {
- return;
- }
- // If this instruction was added by AddCompareWithDeoptimization(), narrow
- // the range accordingly in subsequent basic blocks.
- HLessThanOrEqual* less_than_or_equal = deoptimize->InputAt(0)->AsLessThanOrEqual();
- HInstruction* instruction = less_than_or_equal->InputAt(0);
- if (instruction->IsArrayLength()) {
- HInstruction* constant = less_than_or_equal->InputAt(1);
- DCHECK(constant->IsIntConstant());
- DCHECK(constant->AsIntConstant()->GetValue() <= kMaxConstantForAddingDeoptimize);
- ValueBound lower = ValueBound(nullptr, constant->AsIntConstant()->GetValue() + 1);
- ValueRange* range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), lower, ValueBound::Max());
- GetValueRangeMap(deoptimize->GetBlock())->Overwrite(instruction->GetId(), range);
- }
- }
-
/**
* After null/bounds checks are eliminated, some invariant array references
* may be exposed underneath which can be hoisted out of the loop to the
@@ -1130,13 +1119,12 @@ class BCEVisitor : public HGraphVisitor {
* a[i][j] = 0; --a[i]--+
* }
*
- * Note: this optimization is no longer applied after deoptimization on array references
- * with constant subscripts has occurred (see AddCompareWithDeoptimization()), since in
- * those cases it would be unsafe to hoist array references across their deoptimization
- * instruction inside a loop.
+ * Note: this optimization is no longer applied after dominator-based dynamic deoptimization
+ * has occurred (see AddCompareWithDeoptimization()), since in those cases it would be
+ * unsafe to hoist array references across their deoptimization instruction inside a loop.
*/
void VisitArrayGet(HArrayGet* array_get) OVERRIDE {
- if (!has_deoptimization_on_constant_subscripts_ && array_get->IsInLoop()) {
+ if (!has_dom_based_dynamic_bce_ && array_get->IsInLoop()) {
HLoopInformation* loop = array_get->GetBlock()->GetLoopInformation();
if (loop->IsDefinedOutOfTheLoop(array_get->InputAt(0)) &&
loop->IsDefinedOutOfTheLoop(array_get->InputAt(1))) {
@@ -1148,69 +1136,105 @@ class BCEVisitor : public HGraphVisitor {
}
}
- void AddCompareWithDeoptimization(HInstruction* array_length,
- HIntConstant* const_instr,
- HBasicBlock* block) {
- DCHECK(array_length->IsArrayLength());
- ValueRange* range = LookupValueRange(array_length, block);
- ValueBound lower_bound = range->GetLower();
- DCHECK(lower_bound.IsConstant());
- DCHECK(const_instr->GetValue() <= kMaxConstantForAddingDeoptimize);
- // Note that the lower bound of the array length may have been refined
- // through other instructions (such as `HNewArray(length - 4)`).
- DCHECK_LE(const_instr->GetValue() + 1, lower_bound.GetConstant());
-
- // If array_length is less than lower_const, deoptimize.
- HBoundsCheck* bounds_check = first_constant_index_bounds_check_map_.Get(
- array_length->GetId())->AsBoundsCheck();
- HCondition* cond = new (GetGraph()->GetArena()) HLessThanOrEqual(array_length, const_instr);
- HDeoptimize* deoptimize = new (GetGraph()->GetArena())
- HDeoptimize(cond, bounds_check->GetDexPc());
- block->InsertInstructionBefore(cond, bounds_check);
- block->InsertInstructionBefore(deoptimize, bounds_check);
- deoptimize->CopyEnvironmentFrom(bounds_check->GetEnvironment());
- // Flag that this kind of deoptimization on array references with constant
- // subscripts has occurred to prevent further hoisting of these references.
- has_deoptimization_on_constant_subscripts_ = true;
+ // Perform dominator-based dynamic elimination on suitable set of bounds checks.
+ void AddCompareWithDeoptimization(HBasicBlock* block,
+ HInstruction* array_length,
+ HInstruction* base,
+ int32_t min_c, int32_t max_c) {
+ HBoundsCheck* bounds_check =
+ first_index_bounds_check_map_.Get(array_length->GetId())->AsBoundsCheck();
+ // Construct deoptimization on single or double bounds on range [base-min_c,base+max_c],
+ // for example either for a[0]..a[3] just 3 or for a[base-1]..a[base+3] both base-1
+ // and base+3, since we made the assumption any in between value may occur too.
+ static_assert(kMaxLengthForAddingDeoptimize < std::numeric_limits<int32_t>::max(),
+ "Incorrect max length may be subject to arithmetic wrap-around");
+ HInstruction* upper = GetGraph()->GetIntConstant(max_c);
+ if (base == nullptr) {
+ DCHECK_GE(min_c, 0);
+ } else {
+ HInstruction* lower = new (GetGraph()->GetArena())
+ HAdd(Primitive::kPrimInt, base, GetGraph()->GetIntConstant(min_c));
+ upper = new (GetGraph()->GetArena()) HAdd(Primitive::kPrimInt, base, upper);
+ block->InsertInstructionBefore(lower, bounds_check);
+ block->InsertInstructionBefore(upper, bounds_check);
+ InsertDeoptInBlock(bounds_check, new (GetGraph()->GetArena()) HAbove(lower, upper));
+ }
+ InsertDeoptInBlock(bounds_check, new (GetGraph()->GetArena()) HAboveOrEqual(upper, array_length));
+ // Flag that this kind of deoptimization has occurred.
+ has_dom_based_dynamic_bce_ = true;
}
+ // Attempt dominator-based dynamic elimination on remaining candidates.
void AddComparesWithDeoptimization(HBasicBlock* block) {
- for (ArenaSafeMap<int, HBoundsCheck*>::iterator it =
- first_constant_index_bounds_check_map_.begin();
- it != first_constant_index_bounds_check_map_.end();
- ++it) {
- HBoundsCheck* bounds_check = it->second;
+ for (const auto& entry : first_index_bounds_check_map_) {
+ HBoundsCheck* bounds_check = entry.second;
+ HInstruction* index = bounds_check->InputAt(0);
HInstruction* array_length = bounds_check->InputAt(1);
if (!array_length->IsArrayLength()) {
- // Prior deoptimizations may have changed the array length to a phi.
- // TODO(mingyao): propagate the range to the phi?
- DCHECK(array_length->IsPhi()) << array_length->DebugName();
- continue;
+ continue; // disregard phis and constants
}
- HIntConstant* lower_bound_const_instr = nullptr;
- int32_t lower_bound_const = std::numeric_limits<int32_t>::min();
- size_t counter = 0;
- // Count the constant indexing for which bounds checks haven't
- // been removed yet.
- for (HUseIterator<HInstruction*> it2(array_length->GetUses());
- !it2.Done();
- it2.Advance()) {
+ // Collect all bounds checks are still there and that are related as "a[base + constant]"
+ // for a base instruction (possibly absent) and various constants. Note that no attempt
+ // is made to partition the set into matching subsets (viz. a[0], a[1] and a[base+1] and
+ // a[base+2] are considered as one set).
+ // TODO: would such a partitioning be worthwhile?
+ ValueBound value = ValueBound::AsValueBound(index);
+ HInstruction* base = value.GetInstruction();
+ int32_t min_c = base == nullptr ? 0 : value.GetConstant();
+ int32_t max_c = value.GetConstant();
+ ArenaVector<HBoundsCheck*> candidates(
+ GetGraph()->GetArena()->Adapter(kArenaAllocBoundsCheckElimination));
+ ArenaVector<HBoundsCheck*> standby(
+ GetGraph()->GetArena()->Adapter(kArenaAllocBoundsCheckElimination));
+ for (HUseIterator<HInstruction*> it2(array_length->GetUses()); !it2.Done(); it2.Advance()) {
+ // Another bounds check in same or dominated block?
HInstruction* user = it2.Current()->GetUser();
- if (user->GetBlock() == block &&
- user->IsBoundsCheck() &&
- user->AsBoundsCheck()->InputAt(0)->IsIntConstant()) {
- DCHECK_EQ(array_length, user->AsBoundsCheck()->InputAt(1));
- HIntConstant* const_instr = user->AsBoundsCheck()->InputAt(0)->AsIntConstant();
- if (const_instr->GetValue() > lower_bound_const) {
- lower_bound_const = const_instr->GetValue();
- lower_bound_const_instr = const_instr;
+ HBasicBlock* other_block = user->GetBlock();
+ if (user->IsBoundsCheck() && block->Dominates(other_block)) {
+ HBoundsCheck* other_bounds_check = user->AsBoundsCheck();
+ HInstruction* other_index = other_bounds_check->InputAt(0);
+ HInstruction* other_array_length = other_bounds_check->InputAt(1);
+ ValueBound other_value = ValueBound::AsValueBound(other_index);
+ if (array_length == other_array_length && base == other_value.GetInstruction()) {
+ int32_t other_c = other_value.GetConstant();
+ // Since a subsequent dominated block could be under a conditional, only accept
+ // the other bounds check if it is in same block or both blocks dominate the exit.
+ // TODO: we could improve this by testing proper post-dominance, or even if this
+ // constant is seen along *all* conditional paths that follow.
+ HBasicBlock* exit = GetGraph()->GetExitBlock();
+ if (block == user->GetBlock() ||
+ (block->Dominates(exit) && other_block->Dominates(exit))) {
+ min_c = std::min(min_c, other_c);
+ max_c = std::max(max_c, other_c);
+ candidates.push_back(other_bounds_check);
+ } else {
+ // Add this candidate later only if it falls into the range.
+ standby.push_back(other_bounds_check);
+ }
}
- counter++;
}
}
- if (counter >= kThresholdForAddingDeoptimize &&
- lower_bound_const_instr->GetValue() <= kMaxConstantForAddingDeoptimize) {
- AddCompareWithDeoptimization(array_length, lower_bound_const_instr, block);
+ // Add standby candidates that fall in selected range.
+ for (HBoundsCheck* other_bounds_check : standby) {
+ HInstruction* other_index = other_bounds_check->InputAt(0);
+ int32_t other_c = ValueBound::AsValueBound(other_index).GetConstant();
+ if (min_c <= other_c && other_c <= max_c) {
+ candidates.push_back(other_bounds_check);
+ }
+ }
+ // Perform dominator-based deoptimization if it seems profitable. Note that we reject cases
+ // where the distance min_c:max_c range gets close to the maximum possible array length,
+ // since those cases are likely to always deopt (such situations do not necessarily go
+ // OOB, though, since the programmer could rely on wrap-around from max to min).
+ size_t threshold = kThresholdForAddingDeoptimize + (base == nullptr ? 0 : 1); // extra test?
+ uint32_t distance = static_cast<uint32_t>(max_c) - static_cast<uint32_t>(min_c);
+ if (candidates.size() >= threshold &&
+ (base != nullptr || min_c >= 0) && // reject certain OOB
+ distance <= kMaxLengthForAddingDeoptimize) { // reject likely/certain deopt
+ AddCompareWithDeoptimization(block, array_length, base, min_c, max_c);
+ for (HInstruction* other_bounds_check : candidates) {
+ ReplaceInstruction(other_bounds_check, other_bounds_check->InputAt(0));
+ }
}
}
}
@@ -1259,7 +1283,7 @@ class BCEVisitor : public HGraphVisitor {
* deoptimization). If no deoptimization occurs, the loop is executed with all corresponding
* bounds checks and related null checks removed.
*/
- void TryDynamicBCE(HBoundsCheck* instruction) {
+ bool TryDynamicBCE(HBoundsCheck* instruction) {
HLoopInformation* loop = instruction->GetBlock()->GetLoopInformation();
HInstruction* index = instruction->InputAt(0);
HInstruction* length = instruction->InputAt(1);
@@ -1285,11 +1309,13 @@ class BCEVisitor : public HGraphVisitor {
HBasicBlock* block = GetPreHeader(loop, instruction);
induction_range_.GenerateRangeCode(instruction, index, GetGraph(), block, &lower, &upper);
if (lower != nullptr) {
- InsertDeopt(loop, block, new (GetGraph()->GetArena()) HAbove(lower, upper));
+ InsertDeoptInLoop(loop, block, new (GetGraph()->GetArena()) HAbove(lower, upper));
}
- InsertDeopt(loop, block, new (GetGraph()->GetArena()) HAboveOrEqual(upper, length));
+ InsertDeoptInLoop(loop, block, new (GetGraph()->GetArena()) HAboveOrEqual(upper, length));
ReplaceInstruction(instruction, index);
+ return true;
}
+ return false;
}
/**
@@ -1382,7 +1408,7 @@ class BCEVisitor : public HGraphVisitor {
HBasicBlock* block = GetPreHeader(loop, check);
HInstruction* cond =
new (GetGraph()->GetArena()) HEqual(array, GetGraph()->GetNullConstant());
- InsertDeopt(loop, block, cond);
+ InsertDeoptInLoop(loop, block, cond);
ReplaceInstruction(check, array);
return true;
}
@@ -1448,8 +1474,8 @@ class BCEVisitor : public HGraphVisitor {
return loop->GetPreHeader();
}
- /** Inserts a deoptimization test. */
- void InsertDeopt(HLoopInformation* loop, HBasicBlock* block, HInstruction* condition) {
+ /** Inserts a deoptimization test in a loop preheader. */
+ void InsertDeoptInLoop(HLoopInformation* loop, HBasicBlock* block, HInstruction* condition) {
HInstruction* suspend = loop->GetSuspendCheck();
block->InsertInstructionBefore(condition, block->GetLastInstruction());
HDeoptimize* deoptimize =
@@ -1461,6 +1487,16 @@ class BCEVisitor : public HGraphVisitor {
}
}
+ /** Inserts a deoptimization test right before a bounds check. */
+ void InsertDeoptInBlock(HBoundsCheck* bounds_check, HInstruction* condition) {
+ HBasicBlock* block = bounds_check->GetBlock();
+ block->InsertInstructionBefore(condition, bounds_check);
+ HDeoptimize* deoptimize =
+ new (GetGraph()->GetArena()) HDeoptimize(condition, bounds_check->GetDexPc());
+ block->InsertInstructionBefore(deoptimize, bounds_check);
+ deoptimize->CopyEnvironmentFrom(bounds_check->GetEnvironment());
+ }
+
/** Hoists instruction out of the loop to preheader or deoptimization block. */
void HoistToPreHeaderOrDeoptBlock(HLoopInformation* loop, HInstruction* instruction) {
HBasicBlock* block = GetPreHeader(loop, instruction);
@@ -1628,9 +1664,9 @@ class BCEVisitor : public HGraphVisitor {
// A set of maps, one per basic block, from instruction to range.
ArenaVector<ArenaSafeMap<int, ValueRange*>> maps_;
- // Map an HArrayLength instruction's id to the first HBoundsCheck instruction in
- // a block that checks a constant index against that HArrayLength.
- ArenaSafeMap<int, HBoundsCheck*> first_constant_index_bounds_check_map_;
+ // Map an HArrayLength instruction's id to the first HBoundsCheck instruction
+ // in a block that checks an index against that HArrayLength.
+ ArenaSafeMap<int, HBoundsCheck*> first_index_bounds_check_map_;
// Early-exit loop bookkeeping.
ArenaSafeMap<uint32_t, bool> early_exit_loop_;
@@ -1641,15 +1677,8 @@ class BCEVisitor : public HGraphVisitor {
// Finite loop bookkeeping.
ArenaSet<uint32_t> finite_loop_;
- // For the block, there is at least one HArrayLength instruction for which there
- // is more than one bounds check instruction with constant indexing. And it's
- // beneficial to add a compare instruction that has deoptimization fallback and
- // eliminate those bounds checks.
- bool need_to_revisit_block_;
-
- // Flag that denotes whether deoptimization has occurred on array references
- // with constant subscripts (see AddCompareWithDeoptimization()).
- bool has_deoptimization_on_constant_subscripts_;
+ // Flag that denotes whether dominator-based dynamic elimination has occurred.
+ bool has_dom_based_dynamic_bce_;
// Initial number of blocks.
uint32_t initial_block_size_;
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 8d77daf183..05e1356ed8 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -32,46 +32,12 @@
#include "nodes.h"
#include "primitive.h"
#include "scoped_thread_state_change.h"
+#include "ssa_builder.h"
#include "thread.h"
#include "utils/dex_cache_arrays_layout-inl.h"
namespace art {
-/**
- * Helper class to add HTemporary instructions. This class is used when
- * converting a DEX instruction to multiple HInstruction, and where those
- * instructions do not die at the following instruction, but instead spans
- * multiple instructions.
- */
-class Temporaries : public ValueObject {
- public:
- explicit Temporaries(HGraph* graph) : graph_(graph), index_(0) {}
-
- void Add(HInstruction* instruction) {
- HInstruction* temp = new (graph_->GetArena()) HTemporary(index_, instruction->GetDexPc());
- instruction->GetBlock()->AddInstruction(temp);
-
- DCHECK(temp->GetPrevious() == instruction);
-
- size_t offset;
- if (instruction->GetType() == Primitive::kPrimLong
- || instruction->GetType() == Primitive::kPrimDouble) {
- offset = 2;
- } else {
- offset = 1;
- }
- index_ += offset;
-
- graph_->UpdateTemporariesVRegSlots(index_);
- }
-
- private:
- HGraph* const graph_;
-
- // Current index in the temporary stack, updated by `Add`.
- size_t index_;
-};
-
void HGraphBuilder::InitializeLocals(uint16_t count) {
graph_->SetNumberOfVRegs(count);
locals_.resize(count);
@@ -283,7 +249,7 @@ void HGraphBuilder::InsertTryBoundaryBlocks(const DexFile::CodeItem& code_item)
// loop for synchronized blocks.
if (block->HasThrowingInstructions()) {
// Try to find a TryItem covering the block.
- DCHECK_NE(block->GetDexPc(), kNoDexPc) << "Block must have a dec_pc to find its TryItem.";
+ DCHECK_NE(block->GetDexPc(), kNoDexPc) << "Block must have a dex_pc to find its TryItem.";
const int32_t try_item_idx = DexFile::FindTryItem(code_item, block->GetDexPc());
if (try_item_idx != -1) {
// Block throwing and in a TryItem. Store the try block information.
@@ -357,7 +323,8 @@ void HGraphBuilder::InsertTryBoundaryBlocks(const DexFile::CodeItem& code_item)
}
}
-bool HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item) {
+GraphAnalysisResult HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item,
+ StackHandleScopeCollection* handles) {
DCHECK(graph_->GetBlocks().empty());
const uint16_t* code_ptr = code_item.insns_;
@@ -384,12 +351,12 @@ bool HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item) {
// start a new block, and create these blocks.
if (!ComputeBranchTargets(code_ptr, code_end, &number_of_branches)) {
MaybeRecordStat(MethodCompilationStat::kNotCompiledBranchOutsideMethodCode);
- return false;
+ return kAnalysisInvalidBytecode;
}
// Note that the compiler driver is null when unit testing.
if ((compiler_driver_ != nullptr) && SkipCompilation(code_item, number_of_branches)) {
- return false;
+ return kAnalysisInvalidBytecode;
}
// Find locations where we want to generate extra stackmaps for native debugging.
@@ -420,7 +387,7 @@ bool HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item) {
}
}
if (!AnalyzeDexInstruction(instruction, dex_pc)) {
- return false;
+ return kAnalysisInvalidBytecode;
}
dex_pc += instruction.SizeInCodeUnits();
code_ptr += instruction.SizeInCodeUnits();
@@ -439,7 +406,13 @@ bool HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item) {
// non-exceptional edges to have been created.
InsertTryBoundaryBlocks(code_item);
- return true;
+ GraphAnalysisResult result = graph_->BuildDominatorTree();
+ if (result != kAnalysisSuccess) {
+ return result;
+ }
+
+ graph_->InitializeInexactObjectRTI(handles);
+ return SsaBuilder(graph_, handles).BuildSsa();
}
void HGraphBuilder::MaybeUpdateCurrentBlock(size_t dex_pc) {
@@ -1166,12 +1139,10 @@ bool HGraphBuilder::HandleInvoke(HInvoke* invoke,
size_t start_index = 0;
size_t argument_index = 0;
if (invoke->GetOriginalInvokeType() != InvokeType::kStatic) { // Instance call.
- Temporaries temps(graph_);
HInstruction* arg = LoadLocal(
is_range ? register_index : args[0], Primitive::kPrimNot, invoke->GetDexPc());
HNullCheck* null_check = new (arena_) HNullCheck(arg, invoke->GetDexPc());
current_block_->AddInstruction(null_check);
- temps.Add(null_check);
invoke->SetArgumentAt(0, null_check);
start_index = 1;
argument_index = 1;
@@ -1269,9 +1240,6 @@ bool HGraphBuilder::BuildInstanceFieldAccess(const Instruction& instruction,
? GetFieldAccessType(*dex_file_, field_index)
: resolved_field->GetTypeAsPrimitiveType();
if (is_put) {
- Temporaries temps(graph_);
- // We need one temporary for the null check.
- temps.Add(null_check);
HInstruction* value = LoadLocal(source_or_dest_reg, field_type, dex_pc);
HInstruction* field_set = nullptr;
if (resolved_field == nullptr) {
@@ -1456,8 +1424,6 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
uint16_t class_def_index = klass->GetDexClassDefIndex();
if (is_put) {
// We need to keep the class alive before loading the value.
- Temporaries temps(graph_);
- temps.Add(cls);
HInstruction* value = LoadLocal(source_or_dest_reg, field_type, dex_pc);
DCHECK_EQ(value->GetType(), field_type);
current_block_->AddInstruction(new (arena_) HStaticFieldSet(cls,
@@ -1510,9 +1476,7 @@ void HGraphBuilder::BuildCheckedDivRem(uint16_t out_vreg,
|| (type == Primitive::kPrimInt && second->AsIntConstant()->GetValue() == 0)
|| (type == Primitive::kPrimLong && second->AsLongConstant()->GetValue() == 0)) {
second = new (arena_) HDivZeroCheck(second, dex_pc);
- Temporaries temps(graph_);
current_block_->AddInstruction(second);
- temps.Add(current_block_->GetLastInstruction());
}
if (isDiv) {
@@ -1531,21 +1495,15 @@ void HGraphBuilder::BuildArrayAccess(const Instruction& instruction,
uint8_t array_reg = instruction.VRegB_23x();
uint8_t index_reg = instruction.VRegC_23x();
- // We need one temporary for the null check, one for the index, and one for the length.
- Temporaries temps(graph_);
-
HInstruction* object = LoadLocal(array_reg, Primitive::kPrimNot, dex_pc);
object = new (arena_) HNullCheck(object, dex_pc);
current_block_->AddInstruction(object);
- temps.Add(object);
HInstruction* length = new (arena_) HArrayLength(object, dex_pc);
current_block_->AddInstruction(length);
- temps.Add(length);
HInstruction* index = LoadLocal(index_reg, Primitive::kPrimInt, dex_pc);
index = new (arena_) HBoundsCheck(index, length, dex_pc);
current_block_->AddInstruction(index);
- temps.Add(index);
if (is_put) {
HInstruction* value = LoadLocal(source_or_dest_reg, anticipated_type, dex_pc);
// TODO: Insert a type check node if the type is Object.
@@ -1586,8 +1544,6 @@ void HGraphBuilder::BuildFilledNewArray(uint32_t dex_pc,
bool is_reference_array = (primitive == 'L') || (primitive == '[');
Primitive::Type type = is_reference_array ? Primitive::kPrimNot : Primitive::kPrimInt;
- Temporaries temps(graph_);
- temps.Add(object);
for (size_t i = 0; i < number_of_vreg_arguments; ++i) {
HInstruction* value = LoadLocal(is_range ? register_index + i : args[i], type, dex_pc);
HInstruction* index = graph_->GetIntConstant(i, dex_pc);
@@ -1612,11 +1568,9 @@ void HGraphBuilder::BuildFillArrayData(HInstruction* object,
}
void HGraphBuilder::BuildFillArrayData(const Instruction& instruction, uint32_t dex_pc) {
- Temporaries temps(graph_);
HInstruction* array = LoadLocal(instruction.VRegA_31t(), Primitive::kPrimNot, dex_pc);
HNullCheck* null_check = new (arena_) HNullCheck(array, dex_pc);
current_block_->AddInstruction(null_check);
- temps.Add(null_check);
HInstruction* length = new (arena_) HArrayLength(null_check, dex_pc);
current_block_->AddInstruction(length);
@@ -1733,10 +1687,6 @@ void HGraphBuilder::BuildTypeCheck(const Instruction& instruction,
compiler_driver_->CanAssumeTypeIsPresentInDexCache(dex_file, type_index));
current_block_->AddInstruction(cls);
- // The class needs a temporary before being used by the type check.
- Temporaries temps(graph_);
- temps.Add(cls);
-
TypeCheckKind check_kind = ComputeTypeCheckKind(resolved_class);
if (instruction.Opcode() == Instruction::INSTANCE_OF) {
current_block_->AddInstruction(new (arena_) HInstanceOf(object, cls, check_kind, dex_pc));
@@ -2815,8 +2765,6 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
case Instruction::ARRAY_LENGTH: {
HInstruction* object = LoadLocal(instruction.VRegB_12x(), Primitive::kPrimNot, dex_pc);
- // No need for a temporary for the null check, it is the only input of the following
- // instruction.
object = new (arena_) HNullCheck(object, dex_pc);
current_block_->AddInstruction(object);
current_block_->AddInstruction(new (arena_) HArrayLength(object, dex_pc));
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 93e17d6422..e3dd0e8216 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -80,7 +80,8 @@ class HGraphBuilder : public ValueObject {
null_dex_cache_(),
dex_cache_(null_dex_cache_) {}
- bool BuildGraph(const DexFile::CodeItem& code);
+ GraphAnalysisResult BuildGraph(const DexFile::CodeItem& code,
+ StackHandleScopeCollection* handles);
static constexpr const char* kBuilderPassName = "builder";
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index e1b83f05d6..c2c8ccfc56 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -287,19 +287,6 @@ void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots,
}
}
-Location CodeGenerator::GetTemporaryLocation(HTemporary* temp) const {
- uint16_t number_of_locals = GetGraph()->GetNumberOfLocalVRegs();
- // The type of the previous instruction tells us if we need a single or double stack slot.
- Primitive::Type type = temp->GetType();
- int32_t temp_size = (type == Primitive::kPrimLong) || (type == Primitive::kPrimDouble) ? 2 : 1;
- // Use the temporary region (right below the dex registers).
- int32_t slot = GetFrameSize() - FrameEntrySpillSize()
- - kVRegSize // filler
- - (number_of_locals * kVRegSize)
- - ((temp_size + temp->GetIndex()) * kVRegSize);
- return temp_size == 2 ? Location::DoubleStackSlot(slot) : Location::StackSlot(slot);
-}
-
int32_t CodeGenerator::GetStackSlot(HLocal* local) const {
uint16_t reg_number = local->GetRegNumber();
uint16_t number_of_locals = GetGraph()->GetNumberOfLocalVRegs();
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 0a688cf649..49c193e7bf 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -187,7 +187,6 @@ class CodeGenerator {
virtual void GenerateFrameEntry() = 0;
virtual void GenerateFrameExit() = 0;
virtual void Bind(HBasicBlock* block) = 0;
- virtual void Move(HInstruction* instruction, Location location, HInstruction* move_for) = 0;
virtual void MoveConstant(Location destination, int32_t value) = 0;
virtual void MoveLocation(Location dst, Location src, Primitive::Type dst_type) = 0;
virtual void AddLocationAsTemp(Location location, LocationSummary* locations) = 0;
@@ -203,7 +202,6 @@ class CodeGenerator {
size_t number_of_out_slots,
const ArenaVector<HBasicBlock*>& block_order);
int32_t GetStackSlot(HLocal* local) const;
- Location GetTemporaryLocation(HTemporary* temp) const;
uint32_t GetFrameSize() const { return frame_size_; }
void SetFrameSize(uint32_t size) { frame_size_ = size; }
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index e43493280a..87f52c6f21 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1195,90 +1195,6 @@ void CodeGeneratorARM::Move64(Location destination, Location source) {
}
}
-void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstruction* move_for) {
- LocationSummary* locations = instruction->GetLocations();
- if (instruction->IsCurrentMethod()) {
- Move32(location, Location::StackSlot(kCurrentMethodStackOffset));
- } else if (locations != nullptr && locations->Out().Equals(location)) {
- return;
- } else if (locations != nullptr && locations->Out().IsConstant()) {
- HConstant* const_to_move = locations->Out().GetConstant();
- if (const_to_move->IsIntConstant() || const_to_move->IsNullConstant()) {
- int32_t value = GetInt32ValueOf(const_to_move);
- if (location.IsRegister()) {
- __ LoadImmediate(location.AsRegister<Register>(), value);
- } else {
- DCHECK(location.IsStackSlot());
- __ LoadImmediate(IP, value);
- __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
- }
- } else {
- DCHECK(const_to_move->IsLongConstant()) << const_to_move->DebugName();
- int64_t value = const_to_move->AsLongConstant()->GetValue();
- if (location.IsRegisterPair()) {
- __ LoadImmediate(location.AsRegisterPairLow<Register>(), Low32Bits(value));
- __ LoadImmediate(location.AsRegisterPairHigh<Register>(), High32Bits(value));
- } else {
- DCHECK(location.IsDoubleStackSlot());
- __ LoadImmediate(IP, Low32Bits(value));
- __ StoreToOffset(kStoreWord, IP, SP, location.GetStackIndex());
- __ LoadImmediate(IP, High32Bits(value));
- __ StoreToOffset(kStoreWord, IP, SP, location.GetHighStackIndex(kArmWordSize));
- }
- }
- } else if (instruction->IsLoadLocal()) {
- uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
- switch (instruction->GetType()) {
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- case Primitive::kPrimInt:
- case Primitive::kPrimNot:
- case Primitive::kPrimFloat:
- Move32(location, Location::StackSlot(stack_slot));
- break;
-
- case Primitive::kPrimLong:
- case Primitive::kPrimDouble:
- Move64(location, Location::DoubleStackSlot(stack_slot));
- break;
-
- default:
- LOG(FATAL) << "Unexpected type " << instruction->GetType();
- }
- } else if (instruction->IsTemporary()) {
- Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
- if (temp_location.IsStackSlot()) {
- Move32(location, temp_location);
- } else {
- DCHECK(temp_location.IsDoubleStackSlot());
- Move64(location, temp_location);
- }
- } else {
- DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
- switch (instruction->GetType()) {
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- case Primitive::kPrimNot:
- case Primitive::kPrimInt:
- case Primitive::kPrimFloat:
- Move32(location, locations->Out());
- break;
-
- case Primitive::kPrimLong:
- case Primitive::kPrimDouble:
- Move64(location, locations->Out());
- break;
-
- default:
- LOG(FATAL) << "Unexpected type " << instruction->GetType();
- }
- }
-}
-
void CodeGeneratorARM::MoveConstant(Location location, int32_t value) {
DCHECK(location.IsRegister());
__ LoadImmediate(location.AsRegister<Register>(), value);
@@ -2163,6 +2079,8 @@ void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
switch (result_type) {
case Primitive::kPrimByte:
switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to byte is a result of code transformations.
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimShort:
@@ -2181,6 +2099,8 @@ void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
case Primitive::kPrimShort:
switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to short is a result of code transformations.
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimByte:
@@ -2265,6 +2185,8 @@ void LocationsBuilderARM::VisitTypeConversion(HTypeConversion* conversion) {
case Primitive::kPrimChar:
switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to char is a result of code transformations.
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimByte:
@@ -2364,6 +2286,10 @@ void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversio
switch (result_type) {
case Primitive::kPrimByte:
switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to byte is a result of code transformations.
+ __ sbfx(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>(), 0, 8);
+ break;
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimShort:
@@ -2381,6 +2307,10 @@ void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversio
case Primitive::kPrimShort:
switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to short is a result of code transformations.
+ __ sbfx(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>(), 0, 16);
+ break;
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimByte:
@@ -2482,6 +2412,10 @@ void InstructionCodeGeneratorARM::VisitTypeConversion(HTypeConversion* conversio
case Primitive::kPrimChar:
switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to char is a result of code transformations.
+ __ ubfx(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>(), 0, 16);
+ break;
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimByte:
@@ -4933,14 +4867,6 @@ void CodeGeneratorARM::MarkGCCard(Register temp,
}
}
-void LocationsBuilderARM::VisitTemporary(HTemporary* temp) {
- temp->SetLocations(nullptr);
-}
-
-void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
- // Nothing to do, this is driven by the code generator.
-}
-
void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unreachable";
}
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 558c9cf0e4..cfd7a3bc14 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -307,7 +307,6 @@ class CodeGeneratorARM : public CodeGenerator {
void GenerateFrameEntry() OVERRIDE;
void GenerateFrameExit() OVERRIDE;
void Bind(HBasicBlock* block) OVERRIDE;
- void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
void MoveConstant(Location destination, int32_t value) OVERRIDE;
void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index e20e04400f..435ae5e954 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1066,54 +1066,6 @@ void CodeGeneratorARM64::Bind(HBasicBlock* block) {
__ Bind(GetLabelOf(block));
}
-void CodeGeneratorARM64::Move(HInstruction* instruction,
- Location location,
- HInstruction* move_for) {
- LocationSummary* locations = instruction->GetLocations();
- Primitive::Type type = instruction->GetType();
- DCHECK_NE(type, Primitive::kPrimVoid);
-
- if (instruction->IsCurrentMethod()) {
- MoveLocation(location,
- Location::DoubleStackSlot(kCurrentMethodStackOffset),
- Primitive::kPrimVoid);
- } else if (locations != nullptr && locations->Out().Equals(location)) {
- return;
- } else if (instruction->IsIntConstant()
- || instruction->IsLongConstant()
- || instruction->IsNullConstant()) {
- int64_t value = GetInt64ValueOf(instruction->AsConstant());
- if (location.IsRegister()) {
- Register dst = RegisterFrom(location, type);
- DCHECK(((instruction->IsIntConstant() || instruction->IsNullConstant()) && dst.Is32Bits()) ||
- (instruction->IsLongConstant() && dst.Is64Bits()));
- __ Mov(dst, value);
- } else {
- DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot());
- UseScratchRegisterScope temps(GetVIXLAssembler());
- Register temp = (instruction->IsIntConstant() || instruction->IsNullConstant())
- ? temps.AcquireW()
- : temps.AcquireX();
- __ Mov(temp, value);
- __ Str(temp, StackOperandFrom(location));
- }
- } else if (instruction->IsTemporary()) {
- Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
- MoveLocation(location, temp_location, type);
- } else if (instruction->IsLoadLocal()) {
- uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
- if (Primitive::Is64BitType(type)) {
- MoveLocation(location, Location::DoubleStackSlot(stack_slot), type);
- } else {
- MoveLocation(location, Location::StackSlot(stack_slot), type);
- }
-
- } else {
- DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
- MoveLocation(location, locations->Out(), type);
- }
-}
-
void CodeGeneratorARM64::MoveConstant(Location location, int32_t value) {
DCHECK(location.IsRegister());
__ Mov(RegisterFrom(location, Primitive::kPrimInt), value);
@@ -2976,30 +2928,128 @@ void InstructionCodeGeneratorARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
/* false_target */ nullptr);
}
-void LocationsBuilderARM64::VisitSelect(HSelect* select) {
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
+enum SelectVariant {
+ kCsel,
+ kCselFalseConst,
+ kCselTrueConst,
+ kFcsel,
+};
+
+static inline bool IsConditionOnFloatingPointValues(HInstruction* condition) {
+ return condition->IsCondition() &&
+ Primitive::IsFloatingPointType(condition->InputAt(0)->GetType());
+}
+
+static inline bool IsRecognizedCselConstant(HInstruction* constant) {
+ if (constant->IsConstant()) {
+ int64_t value = Int64FromConstant(constant->AsConstant());
+ if ((value == -1) || (value == 0) || (value == 1)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static inline SelectVariant GetSelectVariant(HSelect* select) {
if (Primitive::IsFloatingPointType(select->GetType())) {
- locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetInAt(1, Location::RequiresFpuRegister());
+ return kFcsel;
+ } else if (IsRecognizedCselConstant(select->GetFalseValue())) {
+ return kCselFalseConst;
+ } else if (IsRecognizedCselConstant(select->GetTrueValue())) {
+ return kCselTrueConst;
} else {
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+ return kCsel;
+ }
+}
+
+static inline bool HasSwappedInputs(SelectVariant variant) {
+ return variant == kCselTrueConst;
+}
+
+static inline Condition GetConditionForSelect(HCondition* condition, SelectVariant variant) {
+ IfCondition cond = HasSwappedInputs(variant) ? condition->GetOppositeCondition()
+ : condition->GetCondition();
+ return IsConditionOnFloatingPointValues(condition) ? ARM64FPCondition(cond, condition->IsGtBias())
+ : ARM64Condition(cond);
+}
+
+void LocationsBuilderARM64::VisitSelect(HSelect* select) {
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
+ switch (GetSelectVariant(select)) {
+ case kCsel:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+ break;
+ case kCselFalseConst:
+ locations->SetInAt(0, Location::ConstantLocation(select->InputAt(0)->AsConstant()));
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+ break;
+ case kCselTrueConst:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::ConstantLocation(select->InputAt(1)->AsConstant()));
+ locations->SetOut(Location::RequiresRegister());
+ break;
+ case kFcsel:
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ break;
}
if (IsBooleanValueOrMaterializedCondition(select->GetCondition())) {
locations->SetInAt(2, Location::RequiresRegister());
}
- locations->SetOut(Location::SameAsFirstInput());
}
void InstructionCodeGeneratorARM64::VisitSelect(HSelect* select) {
- LocationSummary* locations = select->GetLocations();
- vixl::Label false_target;
- GenerateTestAndBranch(select,
- /* condition_input_index */ 2,
- /* true_target */ nullptr,
- &false_target);
- codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
- __ Bind(&false_target);
+ HInstruction* cond = select->GetCondition();
+ SelectVariant variant = GetSelectVariant(select);
+ Condition csel_cond;
+
+ if (IsBooleanValueOrMaterializedCondition(cond)) {
+ if (cond->IsCondition() && cond->GetNext() == select) {
+ // Condition codes set from previous instruction.
+ csel_cond = GetConditionForSelect(cond->AsCondition(), variant);
+ } else {
+ __ Cmp(InputRegisterAt(select, 2), 0);
+ csel_cond = HasSwappedInputs(variant) ? eq : ne;
+ }
+ } else if (IsConditionOnFloatingPointValues(cond)) {
+ Location rhs = cond->GetLocations()->InAt(1);
+ if (rhs.IsConstant()) {
+ DCHECK(IsFloatingPointZeroConstant(rhs.GetConstant()));
+ __ Fcmp(InputFPRegisterAt(cond, 0), 0.0);
+ } else {
+ __ Fcmp(InputFPRegisterAt(cond, 0), InputFPRegisterAt(cond, 1));
+ }
+ csel_cond = GetConditionForSelect(cond->AsCondition(), variant);
+ } else {
+ __ Cmp(InputRegisterAt(cond, 0), InputOperandAt(cond, 1));
+ csel_cond = GetConditionForSelect(cond->AsCondition(), variant);
+ }
+
+ switch (variant) {
+ case kCsel:
+ case kCselFalseConst:
+ __ Csel(OutputRegister(select),
+ InputRegisterAt(select, 1),
+ InputOperandAt(select, 0),
+ csel_cond);
+ break;
+ case kCselTrueConst:
+ __ Csel(OutputRegister(select),
+ InputRegisterAt(select, 0),
+ InputOperandAt(select, 1),
+ csel_cond);
+ break;
+ case kFcsel:
+ __ Fcsel(OutputFPRegister(select),
+ InputFPRegisterAt(select, 1),
+ InputFPRegisterAt(select, 0),
+ csel_cond);
+ break;
+ }
}
void LocationsBuilderARM64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
@@ -4445,14 +4495,6 @@ void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction
GenerateSuspendCheck(instruction, nullptr);
}
-void LocationsBuilderARM64::VisitTemporary(HTemporary* temp) {
- temp->SetLocations(nullptr);
-}
-
-void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
- // Nothing to do, this is driven by the code generator.
-}
-
void LocationsBuilderARM64::VisitThrow(HThrow* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
@@ -4880,20 +4922,18 @@ void CodeGeneratorARM64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction*
static_assert(
sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
"art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
- temp2 = temps.AcquireW();
// /* HeapReference<Object> */ ref =
// *(obj + offset + index * sizeof(HeapReference<Object>))
- MemOperand source = HeapOperand(obj);
+ const size_t shift_amount = Primitive::ComponentSizeShift(type);
if (index.IsConstant()) {
- uint32_t computed_offset =
- offset + (Int64ConstantFrom(index) << Primitive::ComponentSizeShift(type));
- source = HeapOperand(obj, computed_offset);
+ uint32_t computed_offset = offset + (Int64ConstantFrom(index) << shift_amount);
+ Load(type, ref_reg, HeapOperand(obj, computed_offset));
} else {
+ temp2 = temps.AcquireW();
__ Add(temp2, obj, offset);
- source = HeapOperand(temp2, XRegisterFrom(index), LSL, Primitive::ComponentSizeShift(type));
+ Load(type, ref_reg, HeapOperand(temp2, XRegisterFrom(index), LSL, shift_amount));
+ temps.Release(temp2);
}
- Load(type, ref_reg, source);
- temps.Release(temp2);
} else {
// /* HeapReference<Object> */ ref = *(obj + offset)
MemOperand field = HeapOperand(obj, offset);
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index a9d1bbde98..360488eb4a 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -350,8 +350,6 @@ class CodeGeneratorARM64 : public CodeGenerator {
return CommonGetLabelOf<vixl::Label>(block_labels_, block);
}
- void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
-
size_t GetWordSize() const OVERRIDE {
return kArm64WordSize;
}
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index e9c0b6ae79..3eda8639c1 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -976,46 +976,6 @@ void CodeGeneratorMIPS::MoveConstant(Location destination, int32_t value) {
__ LoadConst32(dst, value);
}
-void CodeGeneratorMIPS::Move(HInstruction* instruction,
- Location location,
- HInstruction* move_for) {
- LocationSummary* locations = instruction->GetLocations();
- Primitive::Type type = instruction->GetType();
- DCHECK_NE(type, Primitive::kPrimVoid);
-
- if (instruction->IsCurrentMethod()) {
- Move32(location, Location::StackSlot(kCurrentMethodStackOffset));
- } else if (locations != nullptr && locations->Out().Equals(location)) {
- return;
- } else if (instruction->IsIntConstant()
- || instruction->IsLongConstant()
- || instruction->IsNullConstant()) {
- MoveConstant(location, instruction->AsConstant());
- } else if (instruction->IsTemporary()) {
- Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
- if (temp_location.IsStackSlot()) {
- Move32(location, temp_location);
- } else {
- DCHECK(temp_location.IsDoubleStackSlot());
- Move64(location, temp_location);
- }
- } else if (instruction->IsLoadLocal()) {
- uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
- if (Primitive::Is64BitType(type)) {
- Move64(location, Location::DoubleStackSlot(stack_slot));
- } else {
- Move32(location, Location::StackSlot(stack_slot));
- }
- } else {
- DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
- if (Primitive::Is64BitType(type)) {
- Move64(location, locations->Out());
- } else {
- Move32(location, locations->Out());
- }
- }
-}
-
void CodeGeneratorMIPS::AddLocationAsTemp(Location location, LocationSummary* locations) {
if (location.IsRegister()) {
locations->AddTemp(location);
@@ -4795,14 +4755,6 @@ void InstructionCodeGeneratorMIPS::VisitSuspendCheck(HSuspendCheck* instruction)
GenerateSuspendCheck(instruction, nullptr);
}
-void LocationsBuilderMIPS::VisitTemporary(HTemporary* temp) {
- temp->SetLocations(nullptr);
-}
-
-void InstructionCodeGeneratorMIPS::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
- // Nothing to do, this is driven by the code generator.
-}
-
void LocationsBuilderMIPS::VisitThrow(HThrow* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 2cde0ed90b..12964b0b6a 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -268,7 +268,6 @@ class CodeGeneratorMIPS : public CodeGenerator {
void Bind(HBasicBlock* block) OVERRIDE;
- void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
void Move32(Location destination, Location source);
void Move64(Location destination, Location source);
void MoveConstant(Location location, HConstant* c);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index da98a89f65..119084e026 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -869,65 +869,6 @@ void CodeGeneratorMIPS64::SwapLocations(Location loc1, Location loc2, Primitive:
}
}
-void CodeGeneratorMIPS64::Move(HInstruction* instruction,
- Location location,
- HInstruction* move_for) {
- LocationSummary* locations = instruction->GetLocations();
- Primitive::Type type = instruction->GetType();
- DCHECK_NE(type, Primitive::kPrimVoid);
-
- if (instruction->IsCurrentMethod()) {
- MoveLocation(location, Location::DoubleStackSlot(kCurrentMethodStackOffset), type);
- } else if (locations != nullptr && locations->Out().Equals(location)) {
- return;
- } else if (instruction->IsIntConstant()
- || instruction->IsLongConstant()
- || instruction->IsNullConstant()) {
- if (location.IsRegister()) {
- // Move to GPR from constant
- GpuRegister dst = location.AsRegister<GpuRegister>();
- if (instruction->IsNullConstant() || instruction->IsIntConstant()) {
- __ LoadConst32(dst, GetInt32ValueOf(instruction->AsConstant()));
- } else {
- __ LoadConst64(dst, instruction->AsLongConstant()->GetValue());
- }
- } else {
- DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot());
- // Move to stack from constant
- GpuRegister gpr = ZERO;
- if (location.IsStackSlot()) {
- int32_t value = GetInt32ValueOf(instruction->AsConstant());
- if (value != 0) {
- gpr = TMP;
- __ LoadConst32(gpr, value);
- }
- __ StoreToOffset(kStoreWord, gpr, SP, location.GetStackIndex());
- } else {
- DCHECK(location.IsDoubleStackSlot());
- int64_t value = instruction->AsLongConstant()->GetValue();
- if (value != 0) {
- gpr = TMP;
- __ LoadConst64(gpr, value);
- }
- __ StoreToOffset(kStoreDoubleword, gpr, SP, location.GetStackIndex());
- }
- }
- } else if (instruction->IsTemporary()) {
- Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
- MoveLocation(location, temp_location, type);
- } else if (instruction->IsLoadLocal()) {
- uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
- if (Primitive::Is64BitType(type)) {
- MoveLocation(location, Location::DoubleStackSlot(stack_slot), type);
- } else {
- MoveLocation(location, Location::StackSlot(stack_slot), type);
- }
- } else {
- DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
- MoveLocation(location, locations->Out(), type);
- }
-}
-
void CodeGeneratorMIPS64::MoveConstant(Location location, int32_t value) {
DCHECK(location.IsRegister());
__ LoadConst32(location.AsRegister<GpuRegister>(), value);
@@ -3946,14 +3887,6 @@ void InstructionCodeGeneratorMIPS64::VisitSuspendCheck(HSuspendCheck* instructio
GenerateSuspendCheck(instruction, nullptr);
}
-void LocationsBuilderMIPS64::VisitTemporary(HTemporary* temp) {
- temp->SetLocations(nullptr);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
- // Nothing to do, this is driven by the code generator.
-}
-
void LocationsBuilderMIPS64::VisitThrow(HThrow* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
@@ -4010,18 +3943,26 @@ void InstructionCodeGeneratorMIPS64::VisitTypeConversion(HTypeConversion* conver
__ Andi(dst, src, 0xFFFF);
break;
case Primitive::kPrimByte:
- // long is never converted into types narrower than int directly,
- // so SEB and SEH can be used without ever causing unpredictable results
- // on 64-bit inputs
- DCHECK(input_type != Primitive::kPrimLong);
- __ Seb(dst, src);
+ if (input_type == Primitive::kPrimLong) {
+ // Type conversion from long to types narrower than int is a result of code
+ // transformations. To avoid unpredictable results for SEB and SEH, we first
+ // need to sign-extend the low 32-bit value into bits 32 through 63.
+ __ Sll(dst, src, 0);
+ __ Seb(dst, dst);
+ } else {
+ __ Seb(dst, src);
+ }
break;
case Primitive::kPrimShort:
- // long is never converted into types narrower than int directly,
- // so SEB and SEH can be used without ever causing unpredictable results
- // on 64-bit inputs
- DCHECK(input_type != Primitive::kPrimLong);
- __ Seh(dst, src);
+ if (input_type == Primitive::kPrimLong) {
+ // Type conversion from long to types narrower than int is a result of code
+ // transformations. To avoid unpredictable results for SEB and SEH, we first
+ // need to sign-extend the low 32-bit value into bits 32 through 63.
+ __ Sll(dst, src, 0);
+ __ Seh(dst, dst);
+ } else {
+ __ Seh(dst, src);
+ }
break;
case Primitive::kPrimInt:
case Primitive::kPrimLong:
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index c836f837de..1161253792 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -268,8 +268,6 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
void Bind(HBasicBlock* block) OVERRIDE;
- void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
-
size_t GetWordSize() const OVERRIDE { return kMips64DoublewordSize; }
size_t GetFloatingPointSpillSlotSize() const OVERRIDE { return kMips64DoublewordSize; }
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index de62010102..07edd97c1f 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1127,91 +1127,6 @@ void CodeGeneratorX86::Move64(Location destination, Location source) {
}
}
-void CodeGeneratorX86::Move(HInstruction* instruction, Location location, HInstruction* move_for) {
- LocationSummary* locations = instruction->GetLocations();
- if (instruction->IsCurrentMethod()) {
- Move32(location, Location::StackSlot(kCurrentMethodStackOffset));
- } else if (locations != nullptr && locations->Out().Equals(location)) {
- return;
- } else if (locations != nullptr && locations->Out().IsConstant()) {
- HConstant* const_to_move = locations->Out().GetConstant();
- if (const_to_move->IsIntConstant() || const_to_move->IsNullConstant()) {
- Immediate imm(GetInt32ValueOf(const_to_move));
- if (location.IsRegister()) {
- __ movl(location.AsRegister<Register>(), imm);
- } else if (location.IsStackSlot()) {
- __ movl(Address(ESP, location.GetStackIndex()), imm);
- } else {
- DCHECK(location.IsConstant());
- DCHECK_EQ(location.GetConstant(), const_to_move);
- }
- } else if (const_to_move->IsLongConstant()) {
- int64_t value = const_to_move->AsLongConstant()->GetValue();
- if (location.IsRegisterPair()) {
- __ movl(location.AsRegisterPairLow<Register>(), Immediate(Low32Bits(value)));
- __ movl(location.AsRegisterPairHigh<Register>(), Immediate(High32Bits(value)));
- } else if (location.IsDoubleStackSlot()) {
- __ movl(Address(ESP, location.GetStackIndex()), Immediate(Low32Bits(value)));
- __ movl(Address(ESP, location.GetHighStackIndex(kX86WordSize)),
- Immediate(High32Bits(value)));
- } else {
- DCHECK(location.IsConstant());
- DCHECK_EQ(location.GetConstant(), instruction);
- }
- }
- } else if (instruction->IsTemporary()) {
- Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
- if (temp_location.IsStackSlot()) {
- Move32(location, temp_location);
- } else {
- DCHECK(temp_location.IsDoubleStackSlot());
- Move64(location, temp_location);
- }
- } else if (instruction->IsLoadLocal()) {
- int slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
- switch (instruction->GetType()) {
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- case Primitive::kPrimInt:
- case Primitive::kPrimNot:
- case Primitive::kPrimFloat:
- Move32(location, Location::StackSlot(slot));
- break;
-
- case Primitive::kPrimLong:
- case Primitive::kPrimDouble:
- Move64(location, Location::DoubleStackSlot(slot));
- break;
-
- default:
- LOG(FATAL) << "Unimplemented local type " << instruction->GetType();
- }
- } else {
- DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
- switch (instruction->GetType()) {
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- case Primitive::kPrimInt:
- case Primitive::kPrimNot:
- case Primitive::kPrimFloat:
- Move32(location, locations->Out());
- break;
-
- case Primitive::kPrimLong:
- case Primitive::kPrimDouble:
- Move64(location, locations->Out());
- break;
-
- default:
- LOG(FATAL) << "Unexpected type " << instruction->GetType();
- }
- }
-}
-
void CodeGeneratorX86::MoveConstant(Location location, int32_t value) {
DCHECK(location.IsRegister());
__ movl(location.AsRegister<Register>(), Immediate(value));
@@ -2230,6 +2145,18 @@ void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) {
switch (result_type) {
case Primitive::kPrimByte:
switch (input_type) {
+ case Primitive::kPrimLong: {
+ // Type conversion from long to byte is a result of code transformations.
+ HInstruction* input = conversion->InputAt(0);
+ Location input_location = input->IsConstant()
+ ? Location::ConstantLocation(input->AsConstant())
+ : Location::RegisterPairLocation(EAX, EDX);
+ locations->SetInAt(0, input_location);
+ // Make the output overlap to please the register allocator. This greatly simplifies
+ // the validation of the linear scan implementation
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+ break;
+ }
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimShort:
@@ -2250,6 +2177,8 @@ void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) {
case Primitive::kPrimShort:
switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to short is a result of code transformations.
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimByte:
@@ -2327,6 +2256,8 @@ void LocationsBuilderX86::VisitTypeConversion(HTypeConversion* conversion) {
case Primitive::kPrimChar:
switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to char is a result of code transformations.
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimByte:
@@ -2421,6 +2352,16 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio
switch (result_type) {
case Primitive::kPrimByte:
switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to byte is a result of code transformations.
+ if (in.IsRegisterPair()) {
+ __ movsxb(out.AsRegister<Register>(), in.AsRegisterPairLow<ByteRegister>());
+ } else {
+ DCHECK(in.GetConstant()->IsLongConstant());
+ int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
+ __ movl(out.AsRegister<Register>(), Immediate(static_cast<int8_t>(value)));
+ }
+ break;
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimShort:
@@ -2444,6 +2385,18 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio
case Primitive::kPrimShort:
switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to short is a result of code transformations.
+ if (in.IsRegisterPair()) {
+ __ movsxw(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>());
+ } else if (in.IsDoubleStackSlot()) {
+ __ movsxw(out.AsRegister<Register>(), Address(ESP, in.GetStackIndex()));
+ } else {
+ DCHECK(in.GetConstant()->IsLongConstant());
+ int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
+ __ movl(out.AsRegister<Register>(), Immediate(static_cast<int16_t>(value)));
+ }
+ break;
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimByte:
@@ -2580,6 +2533,18 @@ void InstructionCodeGeneratorX86::VisitTypeConversion(HTypeConversion* conversio
case Primitive::kPrimChar:
switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to short is a result of code transformations.
+ if (in.IsRegisterPair()) {
+ __ movzxw(out.AsRegister<Register>(), in.AsRegisterPairLow<Register>());
+ } else if (in.IsDoubleStackSlot()) {
+ __ movzxw(out.AsRegister<Register>(), Address(ESP, in.GetStackIndex()));
+ } else {
+ DCHECK(in.GetConstant()->IsLongConstant());
+ int64_t value = in.GetConstant()->AsLongConstant()->GetValue();
+ __ movl(out.AsRegister<Register>(), Immediate(static_cast<uint16_t>(value)));
+ }
+ break;
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimByte:
@@ -5513,14 +5478,6 @@ void InstructionCodeGeneratorX86::VisitBoundsCheck(HBoundsCheck* instruction) {
}
}
-void LocationsBuilderX86::VisitTemporary(HTemporary* temp) {
- temp->SetLocations(nullptr);
-}
-
-void InstructionCodeGeneratorX86::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
- // Nothing to do, this is driven by the code generator.
-}
-
void LocationsBuilderX86::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unreachable";
}
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 45e8ffa84f..2fb6d60ad5 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -317,7 +317,6 @@ class CodeGeneratorX86 : public CodeGenerator {
void GenerateFrameEntry() OVERRIDE;
void GenerateFrameExit() OVERRIDE;
void Bind(HBasicBlock* block) OVERRIDE;
- void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
void MoveConstant(Location destination, int32_t value) OVERRIDE;
void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 99396cd983..a53a6be3de 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1206,82 +1206,6 @@ void CodeGeneratorX86_64::Move(Location destination, Location source) {
}
}
-void CodeGeneratorX86_64::Move(HInstruction* instruction,
- Location location,
- HInstruction* move_for) {
- LocationSummary* locations = instruction->GetLocations();
- if (instruction->IsCurrentMethod()) {
- Move(location, Location::DoubleStackSlot(kCurrentMethodStackOffset));
- } else if (locations != nullptr && locations->Out().Equals(location)) {
- return;
- } else if (locations != nullptr && locations->Out().IsConstant()) {
- HConstant* const_to_move = locations->Out().GetConstant();
- if (const_to_move->IsIntConstant() || const_to_move->IsNullConstant()) {
- Immediate imm(GetInt32ValueOf(const_to_move));
- if (location.IsRegister()) {
- __ movl(location.AsRegister<CpuRegister>(), imm);
- } else if (location.IsStackSlot()) {
- __ movl(Address(CpuRegister(RSP), location.GetStackIndex()), imm);
- } else {
- DCHECK(location.IsConstant());
- DCHECK_EQ(location.GetConstant(), const_to_move);
- }
- } else if (const_to_move->IsLongConstant()) {
- int64_t value = const_to_move->AsLongConstant()->GetValue();
- if (location.IsRegister()) {
- Load64BitValue(location.AsRegister<CpuRegister>(), value);
- } else if (location.IsDoubleStackSlot()) {
- Store64BitValueToStack(location, value);
- } else {
- DCHECK(location.IsConstant());
- DCHECK_EQ(location.GetConstant(), const_to_move);
- }
- }
- } else if (instruction->IsLoadLocal()) {
- switch (instruction->GetType()) {
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- case Primitive::kPrimInt:
- case Primitive::kPrimNot:
- case Primitive::kPrimFloat:
- Move(location, Location::StackSlot(GetStackSlot(instruction->AsLoadLocal()->GetLocal())));
- break;
-
- case Primitive::kPrimLong:
- case Primitive::kPrimDouble:
- Move(location,
- Location::DoubleStackSlot(GetStackSlot(instruction->AsLoadLocal()->GetLocal())));
- break;
-
- default:
- LOG(FATAL) << "Unexpected local type " << instruction->GetType();
- }
- } else if (instruction->IsTemporary()) {
- Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
- Move(location, temp_location);
- } else {
- DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
- switch (instruction->GetType()) {
- case Primitive::kPrimBoolean:
- case Primitive::kPrimByte:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- case Primitive::kPrimInt:
- case Primitive::kPrimNot:
- case Primitive::kPrimLong:
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- Move(location, locations->Out());
- break;
-
- default:
- LOG(FATAL) << "Unexpected type " << instruction->GetType();
- }
- }
-}
-
void CodeGeneratorX86_64::MoveConstant(Location location, int32_t value) {
DCHECK(location.IsRegister());
Load64BitValue(location.AsRegister<CpuRegister>(), static_cast<int64_t>(value));
@@ -1627,14 +1551,16 @@ void LocationsBuilderX86_64::VisitSelect(HSelect* select) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
if (Primitive::IsFloatingPointType(select->GetType())) {
locations->SetInAt(0, Location::RequiresFpuRegister());
- // Since we can't use CMOV, there is no need to force 'true' into a register.
locations->SetInAt(1, Location::Any());
} else {
locations->SetInAt(0, Location::RequiresRegister());
if (SelectCanUseCMOV(select)) {
- locations->SetInAt(1, Location::RequiresRegister());
+ if (select->InputAt(1)->IsConstant()) {
+ locations->SetInAt(1, Location::RequiresRegister());
+ } else {
+ locations->SetInAt(1, Location::Any());
+ }
} else {
- // Since we can't use CMOV, there is no need to force 'true' into a register.
locations->SetInAt(1, Location::Any());
}
}
@@ -1650,7 +1576,7 @@ void InstructionCodeGeneratorX86_64::VisitSelect(HSelect* select) {
// If both the condition and the source types are integer, we can generate
// a CMOV to implement Select.
CpuRegister value_false = locations->InAt(0).AsRegister<CpuRegister>();
- CpuRegister value_true = locations->InAt(1).AsRegister<CpuRegister>();
+ Location value_true_loc = locations->InAt(1);
DCHECK(locations->InAt(0).Equals(locations->Out()));
HInstruction* select_condition = select->GetCondition();
@@ -1682,7 +1608,14 @@ void InstructionCodeGeneratorX86_64::VisitSelect(HSelect* select) {
// If the condition is true, overwrite the output, which already contains false.
// Generate the correct sized CMOV.
- __ cmov(cond, value_false, value_true, select->GetType() == Primitive::kPrimLong);
+ bool is_64_bit = Primitive::Is64BitType(select->GetType());
+ if (value_true_loc.IsRegister()) {
+ __ cmov(cond, value_false, value_true_loc.AsRegister<CpuRegister>(), is_64_bit);
+ } else {
+ __ cmov(cond,
+ value_false,
+ Address(CpuRegister(RSP), value_true_loc.GetStackIndex()), is_64_bit);
+ }
} else {
NearLabel false_target;
GenerateTestAndBranch<NearLabel>(select,
@@ -2439,6 +2372,8 @@ void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) {
switch (result_type) {
case Primitive::kPrimByte:
switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to byte is a result of code transformations.
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimShort:
@@ -2457,6 +2392,8 @@ void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) {
case Primitive::kPrimShort:
switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to short is a result of code transformations.
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimByte:
@@ -2534,6 +2471,8 @@ void LocationsBuilderX86_64::VisitTypeConversion(HTypeConversion* conversion) {
case Primitive::kPrimChar:
switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to char is a result of code transformations.
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimByte:
@@ -2628,6 +2567,8 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver
switch (result_type) {
case Primitive::kPrimByte:
switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to byte is a result of code transformations.
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimShort:
@@ -2636,13 +2577,12 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver
// Processing a Dex `int-to-byte' instruction.
if (in.IsRegister()) {
__ movsxb(out.AsRegister<CpuRegister>(), in.AsRegister<CpuRegister>());
- } else if (in.IsStackSlot()) {
+ } else if (in.IsStackSlot() || in.IsDoubleStackSlot()) {
__ movsxb(out.AsRegister<CpuRegister>(),
Address(CpuRegister(RSP), in.GetStackIndex()));
} else {
- DCHECK(in.GetConstant()->IsIntConstant());
__ movl(out.AsRegister<CpuRegister>(),
- Immediate(static_cast<int8_t>(in.GetConstant()->AsIntConstant()->GetValue())));
+ Immediate(static_cast<int8_t>(Int64FromConstant(in.GetConstant()))));
}
break;
@@ -2654,6 +2594,8 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver
case Primitive::kPrimShort:
switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to short is a result of code transformations.
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimByte:
@@ -2662,13 +2604,12 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver
// Processing a Dex `int-to-short' instruction.
if (in.IsRegister()) {
__ movsxw(out.AsRegister<CpuRegister>(), in.AsRegister<CpuRegister>());
- } else if (in.IsStackSlot()) {
+ } else if (in.IsStackSlot() || in.IsDoubleStackSlot()) {
__ movsxw(out.AsRegister<CpuRegister>(),
Address(CpuRegister(RSP), in.GetStackIndex()));
} else {
- DCHECK(in.GetConstant()->IsIntConstant());
__ movl(out.AsRegister<CpuRegister>(),
- Immediate(static_cast<int16_t>(in.GetConstant()->AsIntConstant()->GetValue())));
+ Immediate(static_cast<int16_t>(Int64FromConstant(in.GetConstant()))));
}
break;
@@ -2811,6 +2752,8 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver
case Primitive::kPrimChar:
switch (input_type) {
+ case Primitive::kPrimLong:
+ // Type conversion from long to char is a result of code transformations.
case Primitive::kPrimBoolean:
// Boolean input is a result of code transformations.
case Primitive::kPrimByte:
@@ -2819,14 +2762,12 @@ void InstructionCodeGeneratorX86_64::VisitTypeConversion(HTypeConversion* conver
// Processing a Dex `int-to-char' instruction.
if (in.IsRegister()) {
__ movzxw(out.AsRegister<CpuRegister>(), in.AsRegister<CpuRegister>());
- } else if (in.IsStackSlot()) {
+ } else if (in.IsStackSlot() || in.IsDoubleStackSlot()) {
__ movzxw(out.AsRegister<CpuRegister>(),
Address(CpuRegister(RSP), in.GetStackIndex()));
} else {
- DCHECK(in.GetConstant()->IsIntConstant());
__ movl(out.AsRegister<CpuRegister>(),
- Immediate(static_cast<uint16_t>(
- in.GetConstant()->AsIntConstant()->GetValue())));
+ Immediate(static_cast<uint16_t>(Int64FromConstant(in.GetConstant()))));
}
break;
@@ -5142,14 +5083,6 @@ void CodeGeneratorX86_64::MarkGCCard(CpuRegister temp,
}
}
-void LocationsBuilderX86_64::VisitTemporary(HTemporary* temp) {
- temp->SetLocations(nullptr);
-}
-
-void InstructionCodeGeneratorX86_64::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
- // Nothing to do, this is driven by the code generator.
-}
-
void LocationsBuilderX86_64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unimplemented";
}
@@ -5840,19 +5773,20 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
is_type_check_slow_path_fatal);
codegen_->AddSlowPath(type_check_slow_path);
- NearLabel done;
- // Avoid null check if we know obj is not null.
- if (instruction->MustDoNullCheck()) {
- __ testl(obj, obj);
- __ j(kEqual, &done);
- }
-
- // /* HeapReference<Class> */ temp = obj->klass_
- GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
-
switch (type_check_kind) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kArrayCheck: {
+ NearLabel done;
+ // Avoid null check if we know obj is not null.
+ if (instruction->MustDoNullCheck()) {
+ __ testl(obj, obj);
+ __ j(kEqual, &done);
+ }
+
+ // /* HeapReference<Class> */ temp = obj->klass_
+ GenerateReferenceLoadTwoRegisters(
+ instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
+
if (cls.IsRegister()) {
__ cmpl(temp, cls.AsRegister<CpuRegister>());
} else {
@@ -5862,10 +5796,22 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
// Jump to slow path for throwing the exception or doing a
// more involved array check.
__ j(kNotEqual, type_check_slow_path->GetEntryLabel());
+ __ Bind(&done);
break;
}
case TypeCheckKind::kAbstractClassCheck: {
+ NearLabel done;
+ // Avoid null check if we know obj is not null.
+ if (instruction->MustDoNullCheck()) {
+ __ testl(obj, obj);
+ __ j(kEqual, &done);
+ }
+
+ // /* HeapReference<Class> */ temp = obj->klass_
+ GenerateReferenceLoadTwoRegisters(
+ instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
+
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
NearLabel loop, compare_classes;
@@ -5896,10 +5842,22 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
__ cmpl(temp, Address(CpuRegister(RSP), cls.GetStackIndex()));
}
__ j(kNotEqual, &loop);
+ __ Bind(&done);
break;
}
case TypeCheckKind::kClassHierarchyCheck: {
+ NearLabel done;
+ // Avoid null check if we know obj is not null.
+ if (instruction->MustDoNullCheck()) {
+ __ testl(obj, obj);
+ __ j(kEqual, &done);
+ }
+
+ // /* HeapReference<Class> */ temp = obj->klass_
+ GenerateReferenceLoadTwoRegisters(
+ instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
+
// Walk over the class hierarchy to find a match.
NearLabel loop;
__ Bind(&loop);
@@ -5927,10 +5885,26 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
GenerateReferenceLoadTwoRegisters(
instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
__ jmp(type_check_slow_path->GetEntryLabel());
+ __ Bind(&done);
break;
}
case TypeCheckKind::kArrayObjectCheck: {
+ // We cannot use a NearLabel here, as its range might be too
+ // short in some cases when read barriers are enabled. This has
+ // been observed for instance when the code emitted for this
+ // case uses high x86-64 registers (R8-R15).
+ Label done;
+ // Avoid null check if we know obj is not null.
+ if (instruction->MustDoNullCheck()) {
+ __ testl(obj, obj);
+ __ j(kEqual, &done);
+ }
+
+ // /* HeapReference<Class> */ temp = obj->klass_
+ GenerateReferenceLoadTwoRegisters(
+ instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
+
// Do an exact check.
NearLabel check_non_primitive_component_type;
if (cls.IsRegister()) {
@@ -5969,11 +5943,23 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
GenerateReferenceLoadTwoRegisters(
instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
__ jmp(type_check_slow_path->GetEntryLabel());
+ __ Bind(&done);
break;
}
case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
+ NearLabel done;
+ // Avoid null check if we know obj is not null.
+ if (instruction->MustDoNullCheck()) {
+ __ testl(obj, obj);
+ __ j(kEqual, &done);
+ }
+
+ // /* HeapReference<Class> */ temp = obj->klass_
+ GenerateReferenceLoadTwoRegisters(
+ instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
+
// We always go into the type check slow path for the unresolved
// and interface check cases.
//
@@ -5992,9 +5978,9 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
// call to the runtime not using a type checking slow path).
// This should also be beneficial for the other cases above.
__ jmp(type_check_slow_path->GetEntryLabel());
+ __ Bind(&done);
break;
}
- __ Bind(&done);
__ Bind(type_check_slow_path->GetExitLabel());
}
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 72dddfddfa..97f6f84236 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -299,7 +299,6 @@ class CodeGeneratorX86_64 : public CodeGenerator {
void GenerateFrameEntry() OVERRIDE;
void GenerateFrameExit() OVERRIDE;
void Bind(HBasicBlock* block) OVERRIDE;
- void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
void MoveConstant(Location destination, int32_t value) OVERRIDE;
void MoveLocation(Location dst, Location src, Primitive::Type dst_type) OVERRIDE;
void AddLocationAsTemp(Location location, LocationSummary* locations) OVERRIDE;
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 322a577bbf..6be79fa75c 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -206,10 +206,13 @@ static void RunCode(CodeGenerator* codegen,
std::function<void(HGraph*)> hook_before_codegen,
bool has_result,
Expected expected) {
- ASSERT_TRUE(graph->IsInSsaForm());
-
- SSAChecker graph_checker(graph);
+ GraphChecker graph_checker(graph);
graph_checker.Run();
+ if (!graph_checker.IsValid()) {
+ for (auto error : graph_checker.GetErrors()) {
+ std::cout << error << std::endl;
+ }
+ }
ASSERT_TRUE(graph_checker.IsValid());
SsaLivenessAnalysis liveness(graph, codegen);
@@ -292,14 +295,9 @@ static void TestCode(const uint16_t* data,
for (InstructionSet target_isa : GetTargetISAs()) {
ArenaPool pool;
ArenaAllocator arena(&pool);
- HGraph* graph = CreateGraph(&arena);
- HGraphBuilder builder(graph);
- const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
- bool graph_built = builder.BuildGraph(*item);
- ASSERT_TRUE(graph_built);
+ HGraph* graph = CreateCFG(&arena, data);
// Remove suspend checks, they cannot be executed in this context.
RemoveSuspendChecks(graph);
- TransformToSsa(graph);
RunCode(target_isa, graph, [](HGraph*) {}, has_result, expected);
}
}
@@ -310,14 +308,9 @@ static void TestCodeLong(const uint16_t* data,
for (InstructionSet target_isa : GetTargetISAs()) {
ArenaPool pool;
ArenaAllocator arena(&pool);
- HGraph* graph = CreateGraph(&arena);
- HGraphBuilder builder(graph, Primitive::kPrimLong);
- const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
- bool graph_built = builder.BuildGraph(*item);
- ASSERT_TRUE(graph_built);
+ HGraph* graph = CreateCFG(&arena, data, Primitive::kPrimLong);
// Remove suspend checks, they cannot be executed in this context.
RemoveSuspendChecks(graph);
- TransformToSsa(graph);
RunCode(target_isa, graph, [](HGraph*) {}, has_result, expected);
}
}
@@ -640,6 +633,7 @@ TEST_F(CodegenTest, NonMaterializedCondition) {
ArenaAllocator allocator(&pool);
HGraph* graph = CreateGraph(&allocator);
+
HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
@@ -672,7 +666,7 @@ TEST_F(CodegenTest, NonMaterializedCondition) {
else_block->AddInstruction(new (&allocator) HReturn(constant1));
ASSERT_FALSE(equal->IsEmittedAtUseSite());
- TransformToSsa(graph);
+ graph->BuildDominatorTree();
PrepareForRegisterAllocation(graph).Run();
ASSERT_TRUE(equal->IsEmittedAtUseSite());
@@ -723,7 +717,7 @@ TEST_F(CodegenTest, MaterializedCondition1) {
HReturn ret(&cmp_lt);
code_block->AddInstruction(&ret);
- TransformToSsa(graph);
+ graph->BuildDominatorTree();
auto hook_before_codegen = [](HGraph* graph_in) {
HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0];
HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
@@ -777,9 +771,9 @@ TEST_F(CodegenTest, MaterializedCondition2) {
HIntConstant* cst_rhs = graph->GetIntConstant(rhs[i]);
HLessThan cmp_lt(cst_lhs, cst_rhs);
if_block->AddInstruction(&cmp_lt);
- // We insert a temporary to separate the HIf from the HLessThan and force
- // the materialization of the condition.
- HTemporary force_materialization(0);
+ // We insert a dummy instruction to separate the HIf from the HLessThan
+ // and force the materialization of the condition.
+ HMemoryBarrier force_materialization(MemBarrierKind::kAnyAny, 0);
if_block->AddInstruction(&force_materialization);
HIf if_lt(&cmp_lt);
if_block->AddInstruction(&if_lt);
@@ -791,7 +785,7 @@ TEST_F(CodegenTest, MaterializedCondition2) {
HReturn ret_ge(cst_ge);
if_false_block->AddInstruction(&ret_ge);
- TransformToSsa(graph);
+ graph->BuildDominatorTree();
auto hook_before_codegen = [](HGraph* graph_in) {
HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0];
HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
@@ -907,7 +901,7 @@ static void TestComparison(IfCondition condition,
block->AddInstruction(comparison);
block->AddInstruction(new (&allocator) HReturn(comparison));
- TransformToSsa(graph);
+ graph->BuildDominatorTree();
RunCode(target_isa, graph, [](HGraph*) {}, true, expected_result);
}
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index a8f65bf516..9c69f8c75b 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -56,7 +56,6 @@ class ConstantFoldingTest : public CommonCompilerTest {
const std::string& expected_after_dce,
std::function<void(HGraph*)> check_after_cf) {
ASSERT_NE(graph_, nullptr);
- TransformToSsa(graph_);
StringPrettyPrinter printer_before(graph_);
printer_before.VisitInsertionOrder();
@@ -67,9 +66,9 @@ class ConstantFoldingTest : public CommonCompilerTest {
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegenX86(graph_, *features_x86.get(), CompilerOptions());
HConstantFolding(graph_).Run();
- SSAChecker ssa_checker_cf(graph_);
- ssa_checker_cf.Run();
- ASSERT_TRUE(ssa_checker_cf.IsValid());
+ GraphChecker graph_checker_cf(graph_);
+ graph_checker_cf.Run();
+ ASSERT_TRUE(graph_checker_cf.IsValid());
StringPrettyPrinter printer_after_cf(graph_);
printer_after_cf.VisitInsertionOrder();
@@ -79,9 +78,9 @@ class ConstantFoldingTest : public CommonCompilerTest {
check_after_cf(graph_);
HDeadCodeElimination(graph_).Run();
- SSAChecker ssa_checker_dce(graph_);
- ssa_checker_dce.Run();
- ASSERT_TRUE(ssa_checker_dce.IsValid());
+ GraphChecker graph_checker_dce(graph_);
+ graph_checker_dce.Run();
+ ASSERT_TRUE(graph_checker_dce.IsValid());
StringPrettyPrinter printer_after_dce(graph_);
printer_after_dce.VisitInsertionOrder();
@@ -775,76 +774,87 @@ TEST_F(ConstantFoldingTest, UnsignedComparisonsWithZero) {
HInstruction* zero = graph_->GetIntConstant(0);
HInstruction* last;
block->AddInstruction(last = new (&allocator_) HAbove(zero, parameter));
- block->AddInstruction(new (&allocator_) HDeoptimize(last, 0));
+ block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
block->AddInstruction(last = new (&allocator_) HAbove(parameter, zero));
- block->AddInstruction(new (&allocator_) HDeoptimize(last, 0));
+ block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
block->AddInstruction(last = new (&allocator_) HAboveOrEqual(zero, parameter));
- block->AddInstruction(new (&allocator_) HDeoptimize(last, 0));
+ block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
block->AddInstruction(last = new (&allocator_) HAboveOrEqual(parameter, zero));
- block->AddInstruction(new (&allocator_) HDeoptimize(last, 0));
+ block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
block->AddInstruction(last = new (&allocator_) HBelow(zero, parameter));
- block->AddInstruction(new (&allocator_) HDeoptimize(last, 0));
+ block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
block->AddInstruction(last = new (&allocator_) HBelow(parameter, zero));
- block->AddInstruction(new (&allocator_) HDeoptimize(last, 0));
+ block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
block->AddInstruction(last = new (&allocator_) HBelowOrEqual(zero, parameter));
- block->AddInstruction(new (&allocator_) HDeoptimize(last, 0));
+ block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
block->AddInstruction(last = new (&allocator_) HBelowOrEqual(parameter, zero));
- block->AddInstruction(new (&allocator_) HDeoptimize(last, 0));
+ block->AddInstruction(new (&allocator_) HSelect(last, parameter, parameter, 0));
entry_block->AddInstruction(new (&allocator_) HGoto());
block->AddInstruction(new (&allocator_) HReturn(zero));
exit_block->AddInstruction(new (&allocator_) HExit());
+ graph_->BuildDominatorTree();
+
const std::string expected_before =
"BasicBlock 0, succ: 1\n"
- " 0: ParameterValue [16, 14, 12, 10, 8, 6, 4, 2]\n"
+ " 0: ParameterValue [17, 17, 16, 15, 15, 14, 13, 13, 12, 11, 11, 10, 9, 9, "
+ "8, 7, 7, 6, 5, 5, 4, 3, 3, 2]\n"
" 1: IntConstant [19, 16, 14, 12, 10, 8, 6, 4, 2]\n"
" 18: Goto 1\n"
"BasicBlock 1, pred: 0, succ: 2\n"
" 2: Above(1, 0) [3]\n"
- " 3: Deoptimize(2)\n"
+ " 3: Select(0, 0, 2)\n"
" 4: Above(0, 1) [5]\n"
- " 5: Deoptimize(4)\n"
+ " 5: Select(0, 0, 4)\n"
" 6: AboveOrEqual(1, 0) [7]\n"
- " 7: Deoptimize(6)\n"
+ " 7: Select(0, 0, 6)\n"
" 8: AboveOrEqual(0, 1) [9]\n"
- " 9: Deoptimize(8)\n"
+ " 9: Select(0, 0, 8)\n"
" 10: Below(1, 0) [11]\n"
- " 11: Deoptimize(10)\n"
+ " 11: Select(0, 0, 10)\n"
" 12: Below(0, 1) [13]\n"
- " 13: Deoptimize(12)\n"
+ " 13: Select(0, 0, 12)\n"
" 14: BelowOrEqual(1, 0) [15]\n"
- " 15: Deoptimize(14)\n"
+ " 15: Select(0, 0, 14)\n"
" 16: BelowOrEqual(0, 1) [17]\n"
- " 17: Deoptimize(16)\n"
+ " 17: Select(0, 0, 16)\n"
" 19: Return(1)\n"
"BasicBlock 2, pred: 1\n"
" 20: Exit\n";
const std::string expected_after_cf =
"BasicBlock 0, succ: 1\n"
- " 0: ParameterValue [16, 10, 6, 4]\n"
+ " 0: ParameterValue [17, 17, 16, 15, 15, 13, 13, 11, 11, 10, 9, 9, 7, 7, 6, 5, 5, 4, 3, 3]\n"
" 1: IntConstant [13, 3, 19, 16, 10, 6, 4]\n"
" 21: IntConstant [15, 9]\n"
" 18: Goto 1\n"
"BasicBlock 1, pred: 0, succ: 2\n"
- " 3: Deoptimize(1)\n"
+ " 3: Select(0, 0, 1)\n"
" 4: Above(0, 1) [5]\n"
- " 5: Deoptimize(4)\n"
+ " 5: Select(0, 0, 4)\n"
" 6: AboveOrEqual(1, 0) [7]\n"
- " 7: Deoptimize(6)\n"
- " 9: Deoptimize(21)\n"
+ " 7: Select(0, 0, 6)\n"
+ " 9: Select(0, 0, 21)\n"
" 10: Below(1, 0) [11]\n"
- " 11: Deoptimize(10)\n"
- " 13: Deoptimize(1)\n"
- " 15: Deoptimize(21)\n"
+ " 11: Select(0, 0, 10)\n"
+ " 13: Select(0, 0, 1)\n"
+ " 15: Select(0, 0, 21)\n"
" 16: BelowOrEqual(0, 1) [17]\n"
- " 17: Deoptimize(16)\n"
+ " 17: Select(0, 0, 16)\n"
" 19: Return(1)\n"
"BasicBlock 2, pred: 1\n"
" 20: Exit\n";
- const std::string expected_after_dce = expected_after_cf;
+ const std::string expected_after_dce =
+ "BasicBlock 0, succ: 1\n"
+ " 0: ParameterValue\n"
+ " 1: IntConstant [19]\n"
+ " 18: Goto 1\n"
+ "BasicBlock 1, pred: 0, succ: 2\n"
+ " 19: Return(1)\n"
+ "BasicBlock 2, pred: 1\n"
+ " 20: Exit\n";
auto check_after_cf = [](HGraph* graph) {
CHECK(graph != nullptr);
diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc
index f0f98efadb..930795b4f6 100644
--- a/compiler/optimizing/dead_code_elimination_test.cc
+++ b/compiler/optimizing/dead_code_elimination_test.cc
@@ -36,8 +36,6 @@ static void TestCode(const uint16_t* data,
HGraph* graph = CreateCFG(&allocator, data);
ASSERT_NE(graph, nullptr);
- TransformToSsa(graph);
-
StringPrettyPrinter printer_before(graph);
printer_before.VisitInsertionOrder();
std::string actual_before = printer_before.str();
@@ -47,9 +45,9 @@ static void TestCode(const uint16_t* data,
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegenX86(graph, *features_x86.get(), CompilerOptions());
HDeadCodeElimination(graph).Run();
- SSAChecker ssa_checker(graph);
- ssa_checker.Run();
- ASSERT_TRUE(ssa_checker.IsValid());
+ GraphChecker graph_checker(graph);
+ graph_checker.Run();
+ ASSERT_TRUE(graph_checker.IsValid());
StringPrettyPrinter printer_after(graph);
printer_after.VisitInsertionOrder();
diff --git a/compiler/optimizing/dominator_test.cc b/compiler/optimizing/dominator_test.cc
index feb8b2092a..50c677adf5 100644
--- a/compiler/optimizing/dominator_test.cc
+++ b/compiler/optimizing/dominator_test.cc
@@ -24,15 +24,12 @@
namespace art {
+class OptimizerTest : public CommonCompilerTest {};
+
static void TestCode(const uint16_t* data, const uint32_t* blocks, size_t blocks_length) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
- HGraphBuilder builder(graph);
- const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
- bool graph_built = builder.BuildGraph(*item);
- ASSERT_TRUE(graph_built);
- graph->BuildDominatorTree();
+ HGraph* graph = CreateCFG(&allocator, data);
ASSERT_EQ(graph->GetBlocks().size(), blocks_length);
for (size_t i = 0, e = blocks_length; i < e; ++i) {
if (blocks[i] == kInvalidBlockId) {
@@ -50,7 +47,7 @@ static void TestCode(const uint16_t* data, const uint32_t* blocks, size_t blocks
}
}
-TEST(OptimizerTest, ReturnVoid) {
+TEST_F(OptimizerTest, ReturnVoid) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID); // Block number 1
@@ -63,7 +60,7 @@ TEST(OptimizerTest, ReturnVoid) {
TestCode(data, dominators, sizeof(dominators) / sizeof(int));
}
-TEST(OptimizerTest, CFG1) {
+TEST_F(OptimizerTest, CFG1) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x100, // Block number 1
Instruction::RETURN_VOID); // Block number 2
@@ -78,7 +75,7 @@ TEST(OptimizerTest, CFG1) {
TestCode(data, dominators, sizeof(dominators) / sizeof(int));
}
-TEST(OptimizerTest, CFG2) {
+TEST_F(OptimizerTest, CFG2) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x100, // Block number 1
Instruction::GOTO | 0x100, // Block number 2
@@ -95,7 +92,7 @@ TEST(OptimizerTest, CFG2) {
TestCode(data, dominators, sizeof(dominators) / sizeof(int));
}
-TEST(OptimizerTest, CFG3) {
+TEST_F(OptimizerTest, CFG3) {
const uint16_t data1[] = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x200, // Block number 1
Instruction::RETURN_VOID, // Block number 2
@@ -126,7 +123,7 @@ TEST(OptimizerTest, CFG3) {
TestCode(data3, dominators, sizeof(dominators) / sizeof(int));
}
-TEST(OptimizerTest, CFG4) {
+TEST_F(OptimizerTest, CFG4) {
const uint16_t data1[] = ZERO_REGISTER_CODE_ITEM(
Instruction::NOP,
Instruction::GOTO | 0xFF00);
@@ -146,7 +143,7 @@ TEST(OptimizerTest, CFG4) {
TestCode(data2, dominators, sizeof(dominators) / sizeof(int));
}
-TEST(OptimizerTest, CFG5) {
+TEST_F(OptimizerTest, CFG5) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID, // Block number 1
Instruction::GOTO | 0x100, // Dead block
@@ -163,7 +160,7 @@ TEST(OptimizerTest, CFG5) {
TestCode(data, dominators, sizeof(dominators) / sizeof(int));
}
-TEST(OptimizerTest, CFG6) {
+TEST_F(OptimizerTest, CFG6) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
@@ -182,7 +179,7 @@ TEST(OptimizerTest, CFG6) {
TestCode(data, dominators, sizeof(dominators) / sizeof(int));
}
-TEST(OptimizerTest, CFG7) {
+TEST_F(OptimizerTest, CFG7) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3, // Block number 1
@@ -202,7 +199,7 @@ TEST(OptimizerTest, CFG7) {
TestCode(data, dominators, sizeof(dominators) / sizeof(int));
}
-TEST(OptimizerTest, CFG8) {
+TEST_F(OptimizerTest, CFG8) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3, // Block number 1
@@ -223,7 +220,7 @@ TEST(OptimizerTest, CFG8) {
TestCode(data, dominators, sizeof(dominators) / sizeof(int));
}
-TEST(OptimizerTest, CFG9) {
+TEST_F(OptimizerTest, CFG9) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3, // Block number 1
@@ -244,7 +241,7 @@ TEST(OptimizerTest, CFG9) {
TestCode(data, dominators, sizeof(dominators) / sizeof(int));
}
-TEST(OptimizerTest, CFG10) {
+TEST_F(OptimizerTest, CFG10) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 6, // Block number 1
diff --git a/compiler/optimizing/find_loops_test.cc b/compiler/optimizing/find_loops_test.cc
index 4770fa2eca..04789d9a2d 100644
--- a/compiler/optimizing/find_loops_test.cc
+++ b/compiler/optimizing/find_loops_test.cc
@@ -27,16 +27,9 @@
namespace art {
-static HGraph* TestCode(const uint16_t* data, ArenaAllocator* allocator) {
- HGraph* graph = CreateGraph(allocator);
- HGraphBuilder builder(graph);
- const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
- builder.BuildGraph(*item);
- graph->BuildDominatorTree();
- return graph;
-}
+class FindLoopsTest : public CommonCompilerTest {};
-TEST(FindLoopsTest, CFG1) {
+TEST_F(FindLoopsTest, CFG1) {
// Constant is not used.
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
@@ -44,26 +37,26 @@ TEST(FindLoopsTest, CFG1) {
ArenaPool arena;
ArenaAllocator allocator(&arena);
- HGraph* graph = TestCode(data, &allocator);
+ HGraph* graph = CreateCFG(&allocator, data);
for (HBasicBlock* block : graph->GetBlocks()) {
ASSERT_EQ(block->GetLoopInformation(), nullptr);
}
}
-TEST(FindLoopsTest, CFG2) {
+TEST_F(FindLoopsTest, CFG2) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN);
ArenaPool arena;
ArenaAllocator allocator(&arena);
- HGraph* graph = TestCode(data, &allocator);
+ HGraph* graph = CreateCFG(&allocator, data);
for (HBasicBlock* block : graph->GetBlocks()) {
ASSERT_EQ(block->GetLoopInformation(), nullptr);
}
}
-TEST(FindLoopsTest, CFG3) {
+TEST_F(FindLoopsTest, CFG3) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
Instruction::CONST_4 | 4 << 12 | 1 << 8,
@@ -73,13 +66,13 @@ TEST(FindLoopsTest, CFG3) {
ArenaPool arena;
ArenaAllocator allocator(&arena);
- HGraph* graph = TestCode(data, &allocator);
+ HGraph* graph = CreateCFG(&allocator, data);
for (HBasicBlock* block : graph->GetBlocks()) {
ASSERT_EQ(block->GetLoopInformation(), nullptr);
}
}
-TEST(FindLoopsTest, CFG4) {
+TEST_F(FindLoopsTest, CFG4) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 4,
@@ -90,13 +83,13 @@ TEST(FindLoopsTest, CFG4) {
ArenaPool arena;
ArenaAllocator allocator(&arena);
- HGraph* graph = TestCode(data, &allocator);
+ HGraph* graph = CreateCFG(&allocator, data);
for (HBasicBlock* block : graph->GetBlocks()) {
ASSERT_EQ(block->GetLoopInformation(), nullptr);
}
}
-TEST(FindLoopsTest, CFG5) {
+TEST_F(FindLoopsTest, CFG5) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
@@ -105,7 +98,7 @@ TEST(FindLoopsTest, CFG5) {
ArenaPool arena;
ArenaAllocator allocator(&arena);
- HGraph* graph = TestCode(data, &allocator);
+ HGraph* graph = CreateCFG(&allocator, data);
for (HBasicBlock* block : graph->GetBlocks()) {
ASSERT_EQ(block->GetLoopInformation(), nullptr);
}
@@ -137,7 +130,7 @@ static void TestBlock(HGraph* graph,
}
}
-TEST(FindLoopsTest, Loop1) {
+TEST_F(FindLoopsTest, Loop1) {
// Simple loop with one preheader and one back edge.
// var a = 0;
// while (a == a) {
@@ -151,7 +144,7 @@ TEST(FindLoopsTest, Loop1) {
ArenaPool arena;
ArenaAllocator allocator(&arena);
- HGraph* graph = TestCode(data, &allocator);
+ HGraph* graph = CreateCFG(&allocator, data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // pre header
@@ -162,7 +155,7 @@ TEST(FindLoopsTest, Loop1) {
TestBlock(graph, 5, false, kInvalidBlockId); // exit block
}
-TEST(FindLoopsTest, Loop2) {
+TEST_F(FindLoopsTest, Loop2) {
// Make sure we support a preheader of a loop not being the first predecessor
// in the predecessor list of the header.
// var a = 0;
@@ -179,7 +172,7 @@ TEST(FindLoopsTest, Loop2) {
ArenaPool arena;
ArenaAllocator allocator(&arena);
- HGraph* graph = TestCode(data, &allocator);
+ HGraph* graph = CreateCFG(&allocator, data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // goto block
@@ -191,7 +184,7 @@ TEST(FindLoopsTest, Loop2) {
TestBlock(graph, 6, false, kInvalidBlockId); // exit block
}
-TEST(FindLoopsTest, Loop3) {
+TEST_F(FindLoopsTest, Loop3) {
// Make sure we create a preheader of a loop when a header originally has two
// incoming blocks and one back edge.
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
@@ -204,7 +197,7 @@ TEST(FindLoopsTest, Loop3) {
ArenaPool arena;
ArenaAllocator allocator(&arena);
- HGraph* graph = TestCode(data, &allocator);
+ HGraph* graph = CreateCFG(&allocator, data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // goto block
@@ -218,7 +211,7 @@ TEST(FindLoopsTest, Loop3) {
TestBlock(graph, 8, false, kInvalidBlockId); // synthesized pre header
}
-TEST(FindLoopsTest, Loop4) {
+TEST_F(FindLoopsTest, Loop4) {
// Test loop with originally two back edges.
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
@@ -230,7 +223,7 @@ TEST(FindLoopsTest, Loop4) {
ArenaPool arena;
ArenaAllocator allocator(&arena);
- HGraph* graph = TestCode(data, &allocator);
+ HGraph* graph = CreateCFG(&allocator, data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // pre header
@@ -244,7 +237,7 @@ TEST(FindLoopsTest, Loop4) {
}
-TEST(FindLoopsTest, Loop5) {
+TEST_F(FindLoopsTest, Loop5) {
// Test loop with two exit edges.
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
@@ -256,7 +249,7 @@ TEST(FindLoopsTest, Loop5) {
ArenaPool arena;
ArenaAllocator allocator(&arena);
- HGraph* graph = TestCode(data, &allocator);
+ HGraph* graph = CreateCFG(&allocator, data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // pre header
@@ -270,7 +263,7 @@ TEST(FindLoopsTest, Loop5) {
TestBlock(graph, 8, false, kInvalidBlockId); // synthesized block at the loop exit
}
-TEST(FindLoopsTest, InnerLoop) {
+TEST_F(FindLoopsTest, InnerLoop) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 6,
@@ -281,7 +274,7 @@ TEST(FindLoopsTest, InnerLoop) {
ArenaPool arena;
ArenaAllocator allocator(&arena);
- HGraph* graph = TestCode(data, &allocator);
+ HGraph* graph = CreateCFG(&allocator, data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // pre header of outer loop
@@ -301,7 +294,7 @@ TEST(FindLoopsTest, InnerLoop) {
*graph->GetBlocks()[3]->GetLoopInformation()));
}
-TEST(FindLoopsTest, TwoLoops) {
+TEST_F(FindLoopsTest, TwoLoops) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
@@ -312,7 +305,7 @@ TEST(FindLoopsTest, TwoLoops) {
ArenaPool arena;
ArenaAllocator allocator(&arena);
- HGraph* graph = TestCode(data, &allocator);
+ HGraph* graph = CreateCFG(&allocator, data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // pre header of first loop
@@ -331,7 +324,7 @@ TEST(FindLoopsTest, TwoLoops) {
*graph->GetBlocks()[4]->GetLoopInformation()));
}
-TEST(FindLoopsTest, NonNaturalLoop) {
+TEST_F(FindLoopsTest, NonNaturalLoop) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
@@ -342,14 +335,14 @@ TEST(FindLoopsTest, NonNaturalLoop) {
ArenaPool arena;
ArenaAllocator allocator(&arena);
- HGraph* graph = TestCode(data, &allocator);
+ HGraph* graph = CreateCFG(&allocator, data);
ASSERT_TRUE(graph->GetBlocks()[3]->IsLoopHeader());
HLoopInformation* info = graph->GetBlocks()[3]->GetLoopInformation();
ASSERT_EQ(1u, info->NumberOfBackEdges());
ASSERT_FALSE(info->GetHeader()->Dominates(info->GetBackEdges()[0]));
}
-TEST(FindLoopsTest, DoWhileLoop) {
+TEST_F(FindLoopsTest, DoWhileLoop) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::GOTO | 0x0100,
@@ -358,7 +351,7 @@ TEST(FindLoopsTest, DoWhileLoop) {
ArenaPool arena;
ArenaAllocator allocator(&arena);
- HGraph* graph = TestCode(data, &allocator);
+ HGraph* graph = CreateCFG(&allocator, data);
TestBlock(graph, 0, false, kInvalidBlockId); // entry block
TestBlock(graph, 1, false, kInvalidBlockId); // pre header of first loop
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index 962e77dfc9..e6e9177841 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -149,6 +149,103 @@ void GraphChecker::VisitBasicBlock(HBasicBlock* block) {
}
current->Accept(this);
}
+
+ // Ensure that catch blocks are not normal successors, and normal blocks are
+ // never exceptional successors.
+ for (HBasicBlock* successor : block->GetNormalSuccessors()) {
+ if (successor->IsCatchBlock()) {
+ AddError(StringPrintf("Catch block %d is a normal successor of block %d.",
+ successor->GetBlockId(),
+ block->GetBlockId()));
+ }
+ }
+ for (HBasicBlock* successor : block->GetExceptionalSuccessors()) {
+ if (!successor->IsCatchBlock()) {
+ AddError(StringPrintf("Normal block %d is an exceptional successor of block %d.",
+ successor->GetBlockId(),
+ block->GetBlockId()));
+ }
+ }
+
+ // Ensure dominated blocks have `block` as the dominator.
+ for (HBasicBlock* dominated : block->GetDominatedBlocks()) {
+ if (dominated->GetDominator() != block) {
+ AddError(StringPrintf("Block %d should be the dominator of %d.",
+ block->GetBlockId(),
+ dominated->GetBlockId()));
+ }
+ }
+
+ // Ensure there is no critical edge (i.e., an edge connecting a
+ // block with multiple successors to a block with multiple
+ // predecessors). Exceptional edges are synthesized and hence
+ // not accounted for.
+ if (block->GetSuccessors().size() > 1) {
+ for (HBasicBlock* successor : block->GetNormalSuccessors()) {
+ if (successor->IsExitBlock() &&
+ block->IsSingleTryBoundary() &&
+ block->GetPredecessors().size() == 1u &&
+ block->GetSinglePredecessor()->GetLastInstruction()->IsThrow()) {
+ // Allowed critical edge Throw->TryBoundary->Exit.
+ } else if (successor->GetPredecessors().size() > 1) {
+ AddError(StringPrintf("Critical edge between blocks %d and %d.",
+ block->GetBlockId(),
+ successor->GetBlockId()));
+ }
+ }
+ }
+
+ // Ensure try membership information is consistent.
+ if (block->IsCatchBlock()) {
+ if (block->IsTryBlock()) {
+ const HTryBoundary& try_entry = block->GetTryCatchInformation()->GetTryEntry();
+ AddError(StringPrintf("Catch blocks should not be try blocks but catch block %d "
+ "has try entry %s:%d.",
+ block->GetBlockId(),
+ try_entry.DebugName(),
+ try_entry.GetId()));
+ }
+
+ if (block->IsLoopHeader()) {
+ AddError(StringPrintf("Catch blocks should not be loop headers but catch block %d is.",
+ block->GetBlockId()));
+ }
+ } else {
+ for (HBasicBlock* predecessor : block->GetPredecessors()) {
+ const HTryBoundary* incoming_try_entry = predecessor->ComputeTryEntryOfSuccessors();
+ if (block->IsTryBlock()) {
+ const HTryBoundary& stored_try_entry = block->GetTryCatchInformation()->GetTryEntry();
+ if (incoming_try_entry == nullptr) {
+ AddError(StringPrintf("Block %d has try entry %s:%d but no try entry follows "
+ "from predecessor %d.",
+ block->GetBlockId(),
+ stored_try_entry.DebugName(),
+ stored_try_entry.GetId(),
+ predecessor->GetBlockId()));
+ } else if (!incoming_try_entry->HasSameExceptionHandlersAs(stored_try_entry)) {
+ AddError(StringPrintf("Block %d has try entry %s:%d which is not consistent "
+ "with %s:%d that follows from predecessor %d.",
+ block->GetBlockId(),
+ stored_try_entry.DebugName(),
+ stored_try_entry.GetId(),
+ incoming_try_entry->DebugName(),
+ incoming_try_entry->GetId(),
+ predecessor->GetBlockId()));
+ }
+ } else if (incoming_try_entry != nullptr) {
+ AddError(StringPrintf("Block %d is not a try block but try entry %s:%d follows "
+ "from predecessor %d.",
+ block->GetBlockId(),
+ incoming_try_entry->DebugName(),
+ incoming_try_entry->GetId(),
+ predecessor->GetBlockId()));
+ }
+ }
+ }
+
+ if (block->IsLoopHeader()) {
+ HandleLoop(block);
+ }
}
void GraphChecker::VisitBoundsCheck(HBoundsCheck* check) {
@@ -168,7 +265,7 @@ void GraphChecker::VisitTryBoundary(HTryBoundary* try_boundary) {
// Ensure that all exception handlers are catch blocks.
// Note that a normal-flow successor may be a catch block before CFG
- // simplification. We only test normal-flow successors in SsaChecker.
+ // simplification. We only test normal-flow successors in GraphChecker.
for (HBasicBlock* handler : handlers) {
if (!handler->IsCatchBlock()) {
AddError(StringPrintf("Block %d with %s:%d has exceptional successor %d which "
@@ -303,6 +400,88 @@ void GraphChecker::VisitInstruction(HInstruction* instruction) {
input->GetId()));
}
}
+
+ // Ensure an instruction dominates all its uses.
+ for (HUseIterator<HInstruction*> use_it(instruction->GetUses());
+ !use_it.Done(); use_it.Advance()) {
+ HInstruction* use = use_it.Current()->GetUser();
+ if (!use->IsPhi() && !instruction->StrictlyDominates(use)) {
+ AddError(StringPrintf("Instruction %s:%d in block %d does not dominate "
+ "use %s:%d in block %d.",
+ instruction->DebugName(),
+ instruction->GetId(),
+ current_block_->GetBlockId(),
+ use->DebugName(),
+ use->GetId(),
+ use->GetBlock()->GetBlockId()));
+ }
+ }
+
+ if (instruction->NeedsEnvironment() && !instruction->HasEnvironment()) {
+ AddError(StringPrintf("Instruction %s:%d in block %d requires an environment "
+ "but does not have one.",
+ instruction->DebugName(),
+ instruction->GetId(),
+ current_block_->GetBlockId()));
+ }
+
+ // Ensure an instruction having an environment is dominated by the
+ // instructions contained in the environment.
+ for (HEnvironment* environment = instruction->GetEnvironment();
+ environment != nullptr;
+ environment = environment->GetParent()) {
+ for (size_t i = 0, e = environment->Size(); i < e; ++i) {
+ HInstruction* env_instruction = environment->GetInstructionAt(i);
+ if (env_instruction != nullptr
+ && !env_instruction->StrictlyDominates(instruction)) {
+ AddError(StringPrintf("Instruction %d in environment of instruction %d "
+ "from block %d does not dominate instruction %d.",
+ env_instruction->GetId(),
+ instruction->GetId(),
+ current_block_->GetBlockId(),
+ instruction->GetId()));
+ }
+ }
+ }
+
+ // Ensure that reference type instructions have reference type info.
+ if (instruction->GetType() == Primitive::kPrimNot) {
+ ScopedObjectAccess soa(Thread::Current());
+ if (!instruction->GetReferenceTypeInfo().IsValid()) {
+ AddError(StringPrintf("Reference type instruction %s:%d does not have "
+ "valid reference type information.",
+ instruction->DebugName(),
+ instruction->GetId()));
+ }
+ }
+
+ if (instruction->CanThrowIntoCatchBlock()) {
+ // Find the top-level environment. This corresponds to the environment of
+ // the catch block since we do not inline methods with try/catch.
+ HEnvironment* environment = instruction->GetEnvironment();
+ while (environment->GetParent() != nullptr) {
+ environment = environment->GetParent();
+ }
+
+ // Find all catch blocks and test that `instruction` has an environment
+ // value for each one.
+ const HTryBoundary& entry = instruction->GetBlock()->GetTryCatchInformation()->GetTryEntry();
+ for (HBasicBlock* catch_block : entry.GetExceptionHandlers()) {
+ for (HInstructionIterator phi_it(catch_block->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
+ HPhi* catch_phi = phi_it.Current()->AsPhi();
+ if (environment->GetInstructionAt(catch_phi->GetRegNumber()) == nullptr) {
+ AddError(StringPrintf("Instruction %s:%d throws into catch block %d "
+ "with catch phi %d for vreg %d but its "
+ "corresponding environment slot is empty.",
+ instruction->DebugName(),
+ instruction->GetId(),
+ catch_block->GetBlockId(),
+ catch_phi->GetId(),
+ catch_phi->GetRegNumber()));
+ }
+ }
+ }
+ }
}
void GraphChecker::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
@@ -371,108 +550,7 @@ void GraphChecker::VisitInstanceOf(HInstanceOf* instruction) {
}
}
-void SSAChecker::VisitBasicBlock(HBasicBlock* block) {
- super_type::VisitBasicBlock(block);
-
- // Ensure that catch blocks are not normal successors, and normal blocks are
- // never exceptional successors.
- for (HBasicBlock* successor : block->GetNormalSuccessors()) {
- if (successor->IsCatchBlock()) {
- AddError(StringPrintf("Catch block %d is a normal successor of block %d.",
- successor->GetBlockId(),
- block->GetBlockId()));
- }
- }
- for (HBasicBlock* successor : block->GetExceptionalSuccessors()) {
- if (!successor->IsCatchBlock()) {
- AddError(StringPrintf("Normal block %d is an exceptional successor of block %d.",
- successor->GetBlockId(),
- block->GetBlockId()));
- }
- }
-
- // Ensure dominated blocks have `block` as the dominator.
- for (HBasicBlock* dominated : block->GetDominatedBlocks()) {
- if (dominated->GetDominator() != block) {
- AddError(StringPrintf("Block %d should be the dominator of %d.",
- block->GetBlockId(),
- dominated->GetBlockId()));
- }
- }
-
- // Ensure there is no critical edge (i.e., an edge connecting a
- // block with multiple successors to a block with multiple
- // predecessors). Exceptional edges are synthesized and hence
- // not accounted for.
- if (block->GetSuccessors().size() > 1) {
- for (HBasicBlock* successor : block->GetNormalSuccessors()) {
- if (successor->IsExitBlock() &&
- block->IsSingleTryBoundary() &&
- block->GetPredecessors().size() == 1u &&
- block->GetSinglePredecessor()->GetLastInstruction()->IsThrow()) {
- // Allowed critical edge Throw->TryBoundary->Exit.
- } else if (successor->GetPredecessors().size() > 1) {
- AddError(StringPrintf("Critical edge between blocks %d and %d.",
- block->GetBlockId(),
- successor->GetBlockId()));
- }
- }
- }
-
- // Ensure try membership information is consistent.
- if (block->IsCatchBlock()) {
- if (block->IsTryBlock()) {
- const HTryBoundary& try_entry = block->GetTryCatchInformation()->GetTryEntry();
- AddError(StringPrintf("Catch blocks should not be try blocks but catch block %d "
- "has try entry %s:%d.",
- block->GetBlockId(),
- try_entry.DebugName(),
- try_entry.GetId()));
- }
-
- if (block->IsLoopHeader()) {
- AddError(StringPrintf("Catch blocks should not be loop headers but catch block %d is.",
- block->GetBlockId()));
- }
- } else {
- for (HBasicBlock* predecessor : block->GetPredecessors()) {
- const HTryBoundary* incoming_try_entry = predecessor->ComputeTryEntryOfSuccessors();
- if (block->IsTryBlock()) {
- const HTryBoundary& stored_try_entry = block->GetTryCatchInformation()->GetTryEntry();
- if (incoming_try_entry == nullptr) {
- AddError(StringPrintf("Block %d has try entry %s:%d but no try entry follows "
- "from predecessor %d.",
- block->GetBlockId(),
- stored_try_entry.DebugName(),
- stored_try_entry.GetId(),
- predecessor->GetBlockId()));
- } else if (!incoming_try_entry->HasSameExceptionHandlersAs(stored_try_entry)) {
- AddError(StringPrintf("Block %d has try entry %s:%d which is not consistent "
- "with %s:%d that follows from predecessor %d.",
- block->GetBlockId(),
- stored_try_entry.DebugName(),
- stored_try_entry.GetId(),
- incoming_try_entry->DebugName(),
- incoming_try_entry->GetId(),
- predecessor->GetBlockId()));
- }
- } else if (incoming_try_entry != nullptr) {
- AddError(StringPrintf("Block %d is not a try block but try entry %s:%d follows "
- "from predecessor %d.",
- block->GetBlockId(),
- incoming_try_entry->DebugName(),
- incoming_try_entry->GetId(),
- predecessor->GetBlockId()));
- }
- }
- }
-
- if (block->IsLoopHeader()) {
- CheckLoop(block);
- }
-}
-
-void SSAChecker::CheckLoop(HBasicBlock* loop_header) {
+void GraphChecker::HandleLoop(HBasicBlock* loop_header) {
int id = loop_header->GetBlockId();
HLoopInformation* loop_information = loop_header->GetLoopInformation();
@@ -582,92 +660,6 @@ void SSAChecker::CheckLoop(HBasicBlock* loop_header) {
}
}
-void SSAChecker::VisitInstruction(HInstruction* instruction) {
- super_type::VisitInstruction(instruction);
-
- // Ensure an instruction dominates all its uses.
- for (HUseIterator<HInstruction*> use_it(instruction->GetUses());
- !use_it.Done(); use_it.Advance()) {
- HInstruction* use = use_it.Current()->GetUser();
- if (!use->IsPhi() && !instruction->StrictlyDominates(use)) {
- AddError(StringPrintf("Instruction %s:%d in block %d does not dominate "
- "use %s:%d in block %d.",
- instruction->DebugName(),
- instruction->GetId(),
- current_block_->GetBlockId(),
- use->DebugName(),
- use->GetId(),
- use->GetBlock()->GetBlockId()));
- }
- }
-
- if (instruction->NeedsEnvironment() && !instruction->HasEnvironment()) {
- AddError(StringPrintf("Instruction %s:%d in block %d requires an environment "
- "but does not have one.",
- instruction->DebugName(),
- instruction->GetId(),
- current_block_->GetBlockId()));
- }
-
- // Ensure an instruction having an environment is dominated by the
- // instructions contained in the environment.
- for (HEnvironment* environment = instruction->GetEnvironment();
- environment != nullptr;
- environment = environment->GetParent()) {
- for (size_t i = 0, e = environment->Size(); i < e; ++i) {
- HInstruction* env_instruction = environment->GetInstructionAt(i);
- if (env_instruction != nullptr
- && !env_instruction->StrictlyDominates(instruction)) {
- AddError(StringPrintf("Instruction %d in environment of instruction %d "
- "from block %d does not dominate instruction %d.",
- env_instruction->GetId(),
- instruction->GetId(),
- current_block_->GetBlockId(),
- instruction->GetId()));
- }
- }
- }
-
- // Ensure that reference type instructions have reference type info.
- if (instruction->GetType() == Primitive::kPrimNot) {
- ScopedObjectAccess soa(Thread::Current());
- if (!instruction->GetReferenceTypeInfo().IsValid()) {
- AddError(StringPrintf("Reference type instruction %s:%d does not have "
- "valid reference type information.",
- instruction->DebugName(),
- instruction->GetId()));
- }
- }
-
- if (instruction->CanThrowIntoCatchBlock()) {
- // Find the top-level environment. This corresponds to the environment of
- // the catch block since we do not inline methods with try/catch.
- HEnvironment* environment = instruction->GetEnvironment();
- while (environment->GetParent() != nullptr) {
- environment = environment->GetParent();
- }
-
- // Find all catch blocks and test that `instruction` has an environment
- // value for each one.
- const HTryBoundary& entry = instruction->GetBlock()->GetTryCatchInformation()->GetTryEntry();
- for (HBasicBlock* catch_block : entry.GetExceptionHandlers()) {
- for (HInstructionIterator phi_it(catch_block->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
- HPhi* catch_phi = phi_it.Current()->AsPhi();
- if (environment->GetInstructionAt(catch_phi->GetRegNumber()) == nullptr) {
- AddError(StringPrintf("Instruction %s:%d throws into catch block %d "
- "with catch phi %d for vreg %d but its "
- "corresponding environment slot is empty.",
- instruction->DebugName(),
- instruction->GetId(),
- catch_block->GetBlockId(),
- catch_phi->GetId(),
- catch_phi->GetRegNumber()));
- }
- }
- }
- }
-}
-
static Primitive::Type PrimitiveKind(Primitive::Type type) {
switch (type) {
case Primitive::kPrimBoolean:
@@ -710,7 +702,7 @@ static bool IsConstantEquivalent(HInstruction* insn1, HInstruction* insn2, BitVe
}
}
-void SSAChecker::VisitPhi(HPhi* phi) {
+void GraphChecker::VisitPhi(HPhi* phi) {
VisitInstruction(phi);
// Ensure the first input of a phi is not itself.
@@ -846,7 +838,7 @@ void SSAChecker::VisitPhi(HPhi* phi) {
}
}
-void SSAChecker::HandleBooleanInput(HInstruction* instruction, size_t input_index) {
+void GraphChecker::HandleBooleanInput(HInstruction* instruction, size_t input_index) {
HInstruction* input = instruction->InputAt(input_index);
if (input->IsIntConstant()) {
int32_t value = input->AsIntConstant()->GetValue();
@@ -876,7 +868,7 @@ void SSAChecker::HandleBooleanInput(HInstruction* instruction, size_t input_inde
}
}
-void SSAChecker::VisitPackedSwitch(HPackedSwitch* instruction) {
+void GraphChecker::VisitPackedSwitch(HPackedSwitch* instruction) {
VisitInstruction(instruction);
// Check that the number of block successors matches the switch count plus
// one for the default block.
@@ -892,22 +884,22 @@ void SSAChecker::VisitPackedSwitch(HPackedSwitch* instruction) {
}
}
-void SSAChecker::VisitIf(HIf* instruction) {
+void GraphChecker::VisitIf(HIf* instruction) {
VisitInstruction(instruction);
HandleBooleanInput(instruction, 0);
}
-void SSAChecker::VisitSelect(HSelect* instruction) {
+void GraphChecker::VisitSelect(HSelect* instruction) {
VisitInstruction(instruction);
HandleBooleanInput(instruction, 2);
}
-void SSAChecker::VisitBooleanNot(HBooleanNot* instruction) {
+void GraphChecker::VisitBooleanNot(HBooleanNot* instruction) {
VisitInstruction(instruction);
HandleBooleanInput(instruction, 0);
}
-void SSAChecker::VisitCondition(HCondition* op) {
+void GraphChecker::VisitCondition(HCondition* op) {
VisitInstruction(op);
if (op->GetType() != Primitive::kPrimBoolean) {
AddError(StringPrintf(
@@ -937,7 +929,7 @@ void SSAChecker::VisitCondition(HCondition* op) {
}
}
-void SSAChecker::VisitBinaryOperation(HBinaryOperation* op) {
+void GraphChecker::VisitBinaryOperation(HBinaryOperation* op) {
VisitInstruction(op);
if (op->IsUShr() || op->IsShr() || op->IsShl() || op->IsRor()) {
if (PrimitiveKind(op->InputAt(1)->GetType()) != Primitive::kPrimInt) {
@@ -979,7 +971,7 @@ void SSAChecker::VisitBinaryOperation(HBinaryOperation* op) {
}
}
-void SSAChecker::VisitConstant(HConstant* instruction) {
+void GraphChecker::VisitConstant(HConstant* instruction) {
HBasicBlock* block = instruction->GetBlock();
if (!block->IsEntryBlock()) {
AddError(StringPrintf(
@@ -990,7 +982,7 @@ void SSAChecker::VisitConstant(HConstant* instruction) {
}
}
-void SSAChecker::VisitBoundType(HBoundType* instruction) {
+void GraphChecker::VisitBoundType(HBoundType* instruction) {
VisitInstruction(instruction);
ScopedObjectAccess soa(Thread::Current());
diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h
index 8724cde5dd..52252cd3d4 100644
--- a/compiler/optimizing/graph_checker.h
+++ b/compiler/optimizing/graph_checker.h
@@ -32,34 +32,38 @@ class GraphChecker : public HGraphDelegateVisitor {
dump_prefix_(dump_prefix),
seen_ids_(graph->GetArena(), graph->GetCurrentInstructionId(), false) {}
- // Check the whole graph (in insertion order).
- virtual void Run() { VisitInsertionOrder(); }
+ // Check the whole graph (in reverse post-order).
+ void Run() {
+ // VisitReversePostOrder is used instead of VisitInsertionOrder,
+ // as the latter might visit dead blocks removed by the dominator
+ // computation.
+ VisitReversePostOrder();
+ }
- // Check `block`.
void VisitBasicBlock(HBasicBlock* block) OVERRIDE;
- // Check `instruction`.
void VisitInstruction(HInstruction* instruction) OVERRIDE;
+ void VisitPhi(HPhi* phi) OVERRIDE;
- // Perform control-flow graph checks on instruction.
- void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE;
-
- // Check that the HasBoundsChecks() flag is set for bounds checks.
+ void VisitBinaryOperation(HBinaryOperation* op) OVERRIDE;
+ void VisitBooleanNot(HBooleanNot* instruction) OVERRIDE;
+ void VisitBoundType(HBoundType* instruction) OVERRIDE;
void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE;
-
- // Check successors of blocks ending in TryBoundary.
- void VisitTryBoundary(HTryBoundary* try_boundary) OVERRIDE;
-
- // Check that LoadException is the first instruction in a catch block.
- void VisitLoadException(HLoadException* load) OVERRIDE;
-
- // Check that HCheckCast and HInstanceOf have HLoadClass as second input.
void VisitCheckCast(HCheckCast* check) OVERRIDE;
+ void VisitCondition(HCondition* op) OVERRIDE;
+ void VisitConstant(HConstant* instruction) OVERRIDE;
+ void VisitIf(HIf* instruction) OVERRIDE;
void VisitInstanceOf(HInstanceOf* check) OVERRIDE;
-
- // Check that the Return and ReturnVoid jump to the exit block.
+ void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE;
+ void VisitLoadException(HLoadException* load) OVERRIDE;
+ void VisitPackedSwitch(HPackedSwitch* instruction) OVERRIDE;
void VisitReturn(HReturn* ret) OVERRIDE;
void VisitReturnVoid(HReturnVoid* ret) OVERRIDE;
+ void VisitSelect(HSelect* instruction) OVERRIDE;
+ void VisitTryBoundary(HTryBoundary* try_boundary) OVERRIDE;
+
+ void HandleLoop(HBasicBlock* loop_header);
+ void HandleBooleanInput(HInstruction* instruction, size_t input_index);
// Was the last visit of the graph valid?
bool IsValid() const {
@@ -97,46 +101,6 @@ class GraphChecker : public HGraphDelegateVisitor {
DISALLOW_COPY_AND_ASSIGN(GraphChecker);
};
-
-// An SSA graph visitor performing various checks.
-class SSAChecker : public GraphChecker {
- public:
- typedef GraphChecker super_type;
-
- explicit SSAChecker(HGraph* graph)
- : GraphChecker(graph, "art::SSAChecker: ") {}
-
- // Check the whole graph (in reverse post-order).
- void Run() OVERRIDE {
- // VisitReversePostOrder is used instead of VisitInsertionOrder,
- // as the latter might visit dead blocks removed by the dominator
- // computation.
- VisitReversePostOrder();
- }
-
- // Perform SSA form checks on `block`.
- void VisitBasicBlock(HBasicBlock* block) OVERRIDE;
- // Loop-related checks from block `loop_header`.
- void CheckLoop(HBasicBlock* loop_header);
-
- // Perform SSA form checks on instructions.
- void VisitInstruction(HInstruction* instruction) OVERRIDE;
- void VisitPhi(HPhi* phi) OVERRIDE;
- void VisitBinaryOperation(HBinaryOperation* op) OVERRIDE;
- void VisitCondition(HCondition* op) OVERRIDE;
- void VisitIf(HIf* instruction) OVERRIDE;
- void VisitPackedSwitch(HPackedSwitch* instruction) OVERRIDE;
- void VisitSelect(HSelect* instruction) OVERRIDE;
- void VisitBooleanNot(HBooleanNot* instruction) OVERRIDE;
- void VisitConstant(HConstant* instruction) OVERRIDE;
- void VisitBoundType(HBoundType* instruction) OVERRIDE;
-
- void HandleBooleanInput(HInstruction* instruction, size_t input_index);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SSAChecker);
-};
-
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_GRAPH_CHECKER_H_
diff --git a/compiler/optimizing/graph_checker_test.cc b/compiler/optimizing/graph_checker_test.cc
index d10df4ce3f..2b8231942b 100644
--- a/compiler/optimizing/graph_checker_test.cc
+++ b/compiler/optimizing/graph_checker_test.cc
@@ -38,6 +38,7 @@ HGraph* CreateSimpleCFG(ArenaAllocator* allocator) {
graph->AddBlock(exit_block);
graph->SetExitBlock(exit_block);
entry_block->AddSuccessor(exit_block);
+ graph->BuildDominatorTree();
return graph;
}
@@ -52,28 +53,16 @@ static void TestCode(const uint16_t* data) {
ASSERT_TRUE(graph_checker.IsValid());
}
-static void TestCodeSSA(const uint16_t* data) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateCFG(&allocator, data);
- ASSERT_NE(graph, nullptr);
-
- TransformToSsa(graph);
+class GraphCheckerTest : public CommonCompilerTest {};
- SSAChecker ssa_checker(graph);
- ssa_checker.Run();
- ASSERT_TRUE(ssa_checker.IsValid());
-}
-
-
-TEST(GraphChecker, ReturnVoid) {
+TEST_F(GraphCheckerTest, ReturnVoid) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID);
TestCode(data);
}
-TEST(GraphChecker, CFG1) {
+TEST_F(GraphCheckerTest, CFG1) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x100,
Instruction::RETURN_VOID);
@@ -81,7 +70,7 @@ TEST(GraphChecker, CFG1) {
TestCode(data);
}
-TEST(GraphChecker, CFG2) {
+TEST_F(GraphCheckerTest, CFG2) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
@@ -91,7 +80,7 @@ TEST(GraphChecker, CFG2) {
TestCode(data);
}
-TEST(GraphChecker, CFG3) {
+TEST_F(GraphCheckerTest, CFG3) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
@@ -103,7 +92,7 @@ TEST(GraphChecker, CFG3) {
// Test case with an invalid graph containing inconsistent
// predecessor/successor arcs in CFG.
-TEST(GraphChecker, InconsistentPredecessorsAndSuccessors) {
+TEST_F(GraphCheckerTest, InconsistentPredecessorsAndSuccessors) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
@@ -121,7 +110,7 @@ TEST(GraphChecker, InconsistentPredecessorsAndSuccessors) {
// Test case with an invalid graph containing a non-branch last
// instruction in a block.
-TEST(GraphChecker, BlockEndingWithNonBranchInstruction) {
+TEST_F(GraphCheckerTest, BlockEndingWithNonBranchInstruction) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
@@ -141,9 +130,7 @@ TEST(GraphChecker, BlockEndingWithNonBranchInstruction) {
ASSERT_FALSE(graph_checker.IsValid());
}
-class SSACheckerTest : public CommonCompilerTest {};
-
-TEST_F(SSACheckerTest, SSAPhi) {
+TEST_F(GraphCheckerTest, SSAPhi) {
// This code creates one Phi function during the conversion to SSA form.
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
@@ -151,7 +138,7 @@ TEST_F(SSACheckerTest, SSAPhi) {
Instruction::CONST_4 | 4 << 12 | 0,
Instruction::RETURN | 0 << 8);
- TestCodeSSA(data);
+ TestCode(data);
}
} // namespace art
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 9d796c1004..4cf0eb1565 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -22,6 +22,7 @@
#include <sstream>
#include "bounds_check_elimination.h"
+#include "builder.h"
#include "code_generator.h"
#include "dead_code_elimination.h"
#include "disassembler.h"
@@ -31,7 +32,6 @@
#include "optimization.h"
#include "reference_type_propagation.h"
#include "register_allocator.h"
-#include "ssa_builder.h"
#include "ssa_liveness_analysis.h"
#include "utils/assembler.h"
@@ -368,11 +368,13 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
}
void VisitCheckCast(HCheckCast* check_cast) OVERRIDE {
+ StartAttributeStream("check_kind") << check_cast->GetTypeCheckKind();
StartAttributeStream("must_do_null_check") << std::boolalpha
<< check_cast->MustDoNullCheck() << std::noboolalpha;
}
void VisitInstanceOf(HInstanceOf* instance_of) OVERRIDE {
+ StartAttributeStream("check_kind") << instance_of->GetTypeCheckKind();
StartAttributeStream("must_do_null_check") << std::boolalpha
<< instance_of->MustDoNullCheck() << std::noboolalpha;
}
@@ -508,7 +510,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
|| IsPass(HDeadCodeElimination::kInitialDeadCodeEliminationPassName)
|| IsPass(BoundsCheckElimination::kBoundsCheckEliminationPassName)
|| IsPass(RegisterAllocator::kRegisterAllocatorPassName)
- || IsPass(SsaBuilder::kSsaBuilderPassName)) {
+ || IsPass(HGraphBuilder::kBuilderPassName)) {
HLoopInformation* info = instruction->GetBlock()->GetLoopInformation();
if (info == nullptr) {
StartAttributeStream("loop") << "none";
@@ -525,7 +527,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
}
}
- if ((IsPass(SsaBuilder::kSsaBuilderPassName)
+ if ((IsPass(HGraphBuilder::kBuilderPassName)
|| IsPass(HInliner::kInlinerPassName))
&& (instruction->GetType() == Primitive::kPrimNot)) {
ReferenceTypeInfo info = instruction->IsLoadClass()
@@ -545,7 +547,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
// doesn't run or doesn't inline anything, the NullConstant remains untyped.
// So we should check NullConstants for validity only after reference type propagation.
DCHECK(graph_in_bad_state_ ||
- (!is_after_pass_ && IsPass(SsaBuilder::kSsaBuilderPassName)))
+ (!is_after_pass_ && IsPass(HGraphBuilder::kBuilderPassName)))
<< instruction->DebugName() << instruction->GetId() << " has invalid rti "
<< (is_after_pass_ ? "after" : "before") << " pass " << pass_name_;
}
diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc
index 1f4eaf3cfd..56dc08826b 100644
--- a/compiler/optimizing/gvn_test.cc
+++ b/compiler/optimizing/gvn_test.cc
@@ -100,7 +100,7 @@ TEST_F(GVNTest, LocalFieldElimination) {
ASSERT_EQ(different_offset->GetBlock(), block);
ASSERT_EQ(use_after_kill->GetBlock(), block);
- TransformToSsa(graph);
+ graph->BuildDominatorTree();
SideEffectsAnalysis side_effects(graph);
side_effects.Run();
GVNOptimization(graph, side_effects).Run();
@@ -182,7 +182,7 @@ TEST_F(GVNTest, GlobalFieldElimination) {
0));
join->AddInstruction(new (&allocator) HExit());
- TransformToSsa(graph);
+ graph->BuildDominatorTree();
SideEffectsAnalysis side_effects(graph);
side_effects.Run();
GVNOptimization(graph, side_effects).Run();
@@ -288,7 +288,7 @@ TEST_F(GVNTest, LoopFieldElimination) {
ASSERT_EQ(field_get_in_loop_body->GetBlock(), loop_body);
ASSERT_EQ(field_get_in_exit->GetBlock(), exit);
- TransformToSsa(graph);
+ graph->BuildDominatorTree();
{
SideEffectsAnalysis side_effects(graph);
side_effects.Run();
@@ -364,7 +364,7 @@ TEST_F(GVNTest, LoopSideEffects) {
inner_loop_exit->AddInstruction(new (&allocator) HGoto());
outer_loop_exit->AddInstruction(new (&allocator) HExit());
- TransformToSsa(graph);
+ graph->BuildDominatorTree();
ASSERT_TRUE(inner_loop_header->GetLoopInformation()->IsIn(
*outer_loop_header->GetLoopInformation()));
diff --git a/compiler/optimizing/induction_var_analysis_test.cc b/compiler/optimizing/induction_var_analysis_test.cc
index 29a1845658..89e4690de2 100644
--- a/compiler/optimizing/induction_var_analysis_test.cc
+++ b/compiler/optimizing/induction_var_analysis_test.cc
@@ -86,39 +86,28 @@ class InductionVarAnalysisTest : public CommonCompilerTest {
constant1_ = graph_->GetIntConstant(1);
constant100_ = graph_->GetIntConstant(100);
float_constant0_ = graph_->GetFloatConstant(0.0f);
- induc_ = new (&allocator_) HLocal(n);
- entry_->AddInstruction(induc_);
- entry_->AddInstruction(new (&allocator_) HStoreLocal(induc_, constant0_));
- tmp_ = new (&allocator_) HLocal(n + 1);
- entry_->AddInstruction(tmp_);
- entry_->AddInstruction(new (&allocator_) HStoreLocal(tmp_, constant100_));
- dum_ = new (&allocator_) HLocal(n + 2);
- entry_->AddInstruction(dum_);
return_->AddInstruction(new (&allocator_) HReturnVoid());
exit_->AddInstruction(new (&allocator_) HExit());
// Provide loop instructions.
for (int d = 0; d < n; d++) {
- basic_[d] = new (&allocator_) HLocal(d);
- entry_->AddInstruction(basic_[d]);
- loop_preheader_[d]->AddInstruction(new (&allocator_) HStoreLocal(basic_[d], constant0_));
+ basic_[d] = new (&allocator_) HPhi(&allocator_, d, 0, Primitive::kPrimInt);
loop_preheader_[d]->AddInstruction(new (&allocator_) HGoto());
- HInstruction* load = new (&allocator_) HLoadLocal(basic_[d], Primitive::kPrimInt);
- loop_header_[d]->AddInstruction(load);
- HInstruction* compare = new (&allocator_) HLessThan(load, constant100_);
+ loop_header_[d]->AddPhi(basic_[d]);
+ HInstruction* compare = new (&allocator_) HLessThan(basic_[d], constant100_);
loop_header_[d]->AddInstruction(compare);
loop_header_[d]->AddInstruction(new (&allocator_) HIf(compare));
- load = new (&allocator_) HLoadLocal(basic_[d], Primitive::kPrimInt);
- loop_body_[d]->AddInstruction(load);
- increment_[d] = new (&allocator_) HAdd(Primitive::kPrimInt, load, constant1_);
+ increment_[d] = new (&allocator_) HAdd(Primitive::kPrimInt, basic_[d], constant1_);
loop_body_[d]->AddInstruction(increment_[d]);
- loop_body_[d]->AddInstruction(new (&allocator_) HStoreLocal(basic_[d], increment_[d]));
loop_body_[d]->AddInstruction(new (&allocator_) HGoto());
+
+ basic_[d]->AddInput(constant0_);
+ basic_[d]->AddInput(increment_[d]);
}
}
// Builds if-statement at depth d.
- void BuildIf(int d, HBasicBlock** ifT, HBasicBlock **ifF) {
+ HPhi* BuildIf(int d, HBasicBlock** ifT, HBasicBlock **ifF) {
HBasicBlock* cond = new (&allocator_) HBasicBlock(graph_);
HBasicBlock* ifTrue = new (&allocator_) HBasicBlock(graph_);
HBasicBlock* ifFalse = new (&allocator_) HBasicBlock(graph_);
@@ -134,6 +123,10 @@ class InductionVarAnalysisTest : public CommonCompilerTest {
cond->AddInstruction(new (&allocator_) HIf(parameter_));
*ifT = ifTrue;
*ifF = ifFalse;
+
+ HPhi* select_phi = new (&allocator_) HPhi(&allocator_, -1, 0, Primitive::kPrimInt);
+ loop_body_[d]->AddPhi(select_phi);
+ return select_phi;
}
// Inserts instruction right before increment at depth d.
@@ -142,25 +135,20 @@ class InductionVarAnalysisTest : public CommonCompilerTest {
return instruction;
}
- // Inserts local load at depth d.
- HInstruction* InsertLocalLoad(HLocal* local, int d) {
- return InsertInstruction(new (&allocator_) HLoadLocal(local, Primitive::kPrimInt), d);
- }
-
- // Inserts local store at depth d.
- HInstruction* InsertLocalStore(HLocal* local, HInstruction* rhs, int d) {
- return InsertInstruction(new (&allocator_) HStoreLocal(local, rhs), d);
+ // Inserts a phi to loop header at depth d and returns it.
+ HPhi* InsertLoopPhi(int vreg, int d) {
+ HPhi* phi = new (&allocator_) HPhi(&allocator_, vreg, 0, Primitive::kPrimInt);
+ loop_header_[d]->AddPhi(phi);
+ return phi;
}
- // Inserts an array store with given local as subscript at depth d to
+ // Inserts an array store with given `subscript` at depth d to
// enable tests to inspect the computed induction at that point easily.
- HInstruction* InsertArrayStore(HLocal* subscript, int d) {
- HInstruction* load = InsertInstruction(
- new (&allocator_) HLoadLocal(subscript, Primitive::kPrimInt), d);
+ HInstruction* InsertArrayStore(HInstruction* subscript, int d) {
// ArraySet is given a float value in order to avoid SsaBuilder typing
// it from the array's non-existent reference type info.
return InsertInstruction(new (&allocator_) HArraySet(
- parameter_, load, float_constant0_, Primitive::kPrimFloat, 0), d);
+ parameter_, subscript, float_constant0_, Primitive::kPrimFloat, 0), d);
}
// Returns induction information of instruction in loop at depth d.
@@ -171,7 +159,7 @@ class InductionVarAnalysisTest : public CommonCompilerTest {
// Performs InductionVarAnalysis (after proper set up).
void PerformInductionVarAnalysis() {
- TransformToSsa(graph_);
+ graph_->BuildDominatorTree();
iva_ = new (&allocator_) HInductionVarAnalysis(graph_);
iva_->Run();
}
@@ -191,16 +179,13 @@ class InductionVarAnalysisTest : public CommonCompilerTest {
HInstruction* constant1_;
HInstruction* constant100_;
HInstruction* float_constant0_;
- HLocal* induc_; // "vreg_n", the "k"
- HLocal* tmp_; // "vreg_n+1"
- HLocal* dum_; // "vreg_n+2"
// Loop specifics.
HBasicBlock* loop_preheader_[10];
HBasicBlock* loop_header_[10];
HBasicBlock* loop_body_[10];
HInstruction* increment_[10];
- HLocal* basic_[10]; // "vreg_d", the "i_d"
+ HPhi* basic_[10]; // "vreg_d", the "i_d"
};
//
@@ -216,7 +201,7 @@ TEST_F(InductionVarAnalysisTest, ProperLoopSetup) {
// ..
// }
BuildLoopNest(10);
- TransformToSsa(graph_);
+ graph_->BuildDominatorTree();
ASSERT_EQ(entry_->GetLoopInformation(), nullptr);
for (int d = 0; d < 1; d++) {
ASSERT_EQ(loop_preheader_[d]->GetLoopInformation(),
@@ -258,20 +243,15 @@ TEST_F(InductionVarAnalysisTest, FindDerivedInduction) {
// }
BuildLoopNest(1);
HInstruction *add = InsertInstruction(
- new (&allocator_) HAdd(Primitive::kPrimInt, constant100_, InsertLocalLoad(basic_[0], 0)), 0);
- InsertLocalStore(induc_, add, 0);
+ new (&allocator_) HAdd(Primitive::kPrimInt, constant100_, basic_[0]), 0);
HInstruction *sub = InsertInstruction(
- new (&allocator_) HSub(Primitive::kPrimInt, constant100_, InsertLocalLoad(basic_[0], 0)), 0);
- InsertLocalStore(induc_, sub, 0);
+ new (&allocator_) HSub(Primitive::kPrimInt, constant100_, basic_[0]), 0);
HInstruction *mul = InsertInstruction(
- new (&allocator_) HMul(Primitive::kPrimInt, constant100_, InsertLocalLoad(basic_[0], 0)), 0);
- InsertLocalStore(induc_, mul, 0);
+ new (&allocator_) HMul(Primitive::kPrimInt, constant100_, basic_[0]), 0);
HInstruction *shl = InsertInstruction(
- new (&allocator_) HShl(Primitive::kPrimInt, InsertLocalLoad(basic_[0], 0), constant1_), 0);
- InsertLocalStore(induc_, shl, 0);
+ new (&allocator_) HShl(Primitive::kPrimInt, basic_[0], constant1_), 0);
HInstruction *neg = InsertInstruction(
- new (&allocator_) HNeg(Primitive::kPrimInt, InsertLocalLoad(basic_[0], 0)), 0);
- InsertLocalStore(induc_, neg, 0);
+ new (&allocator_) HNeg(Primitive::kPrimInt, basic_[0]), 0);
PerformInductionVarAnalysis();
EXPECT_STREQ("((1) * i + (100))", GetInductionInfo(add, 0).c_str());
@@ -291,14 +271,16 @@ TEST_F(InductionVarAnalysisTest, FindChainInduction) {
// a[k] = 0;
// }
BuildLoopNest(1);
+ HPhi* k = InsertLoopPhi(0, 0);
+ k->AddInput(constant0_);
+
HInstruction *add = InsertInstruction(
- new (&allocator_) HAdd(Primitive::kPrimInt, InsertLocalLoad(induc_, 0), constant100_), 0);
- InsertLocalStore(induc_, add, 0);
- HInstruction* store1 = InsertArrayStore(induc_, 0);
+ new (&allocator_) HAdd(Primitive::kPrimInt, k, constant100_), 0);
+ HInstruction* store1 = InsertArrayStore(add, 0);
HInstruction *sub = InsertInstruction(
- new (&allocator_) HSub(Primitive::kPrimInt, InsertLocalLoad(induc_, 0), constant1_), 0);
- InsertLocalStore(induc_, sub, 0);
- HInstruction* store2 = InsertArrayStore(induc_, 0);
+ new (&allocator_) HSub(Primitive::kPrimInt, add, constant1_), 0);
+ HInstruction* store2 = InsertArrayStore(sub, 0);
+ k->AddInput(sub);
PerformInductionVarAnalysis();
EXPECT_STREQ("(((100) - (1)) * i + (100))",
@@ -316,23 +298,24 @@ TEST_F(InductionVarAnalysisTest, FindTwoWayBasicInduction) {
// a[k] = 0;
// }
BuildLoopNest(1);
+ HPhi* k_header = InsertLoopPhi(0, 0);
+ k_header->AddInput(constant0_);
+
HBasicBlock* ifTrue;
HBasicBlock* ifFalse;
- BuildIf(0, &ifTrue, &ifFalse);
+ HPhi* k_body = BuildIf(0, &ifTrue, &ifFalse);
+
// True-branch.
- HInstruction* load1 = new (&allocator_) HLoadLocal(induc_, Primitive::kPrimInt);
- ifTrue->AddInstruction(load1);
- HInstruction* inc1 = new (&allocator_) HAdd(Primitive::kPrimInt, load1, constant1_);
+ HInstruction* inc1 = new (&allocator_) HAdd(Primitive::kPrimInt, k_header, constant1_);
ifTrue->AddInstruction(inc1);
- ifTrue->AddInstruction(new (&allocator_) HStoreLocal(induc_, inc1));
+ k_body->AddInput(inc1);
// False-branch.
- HInstruction* load2 = new (&allocator_) HLoadLocal(induc_, Primitive::kPrimInt);
- ifFalse->AddInstruction(load2);
- HInstruction* inc2 = new (&allocator_) HAdd(Primitive::kPrimInt, load2, constant1_);
+ HInstruction* inc2 = new (&allocator_) HAdd(Primitive::kPrimInt, k_header, constant1_);
ifFalse->AddInstruction(inc2);
- ifFalse->AddInstruction(new (&allocator_) HStoreLocal(induc_, inc2));
+ k_body->AddInput(inc2);
// Merge over a phi.
- HInstruction* store = InsertArrayStore(induc_, 0);
+ HInstruction* store = InsertArrayStore(k_body, 0);
+ k_header->AddInput(k_body);
PerformInductionVarAnalysis();
EXPECT_STREQ("((1) * i + (1))", GetInductionInfo(store->InputAt(1), 0).c_str());
@@ -348,21 +331,18 @@ TEST_F(InductionVarAnalysisTest, FindTwoWayDerivedInduction) {
BuildLoopNest(1);
HBasicBlock* ifTrue;
HBasicBlock* ifFalse;
- BuildIf(0, &ifTrue, &ifFalse);
+ HPhi* k = BuildIf(0, &ifTrue, &ifFalse);
+
// True-branch.
- HInstruction* load1 = new (&allocator_) HLoadLocal(basic_[0], Primitive::kPrimInt);
- ifTrue->AddInstruction(load1);
- HInstruction* inc1 = new (&allocator_) HAdd(Primitive::kPrimInt, load1, constant1_);
+ HInstruction* inc1 = new (&allocator_) HAdd(Primitive::kPrimInt, basic_[0], constant1_);
ifTrue->AddInstruction(inc1);
- ifTrue->AddInstruction(new (&allocator_) HStoreLocal(induc_, inc1));
+ k->AddInput(inc1);
// False-branch.
- HInstruction* load2 = new (&allocator_) HLoadLocal(basic_[0], Primitive::kPrimInt);
- ifFalse->AddInstruction(load2);
- HInstruction* inc2 = new (&allocator_) HAdd(Primitive::kPrimInt, load2, constant1_);
+ HInstruction* inc2 = new (&allocator_) HAdd(Primitive::kPrimInt, basic_[0], constant1_);
ifFalse->AddInstruction(inc2);
- ifFalse->AddInstruction(new (&allocator_) HStoreLocal(induc_, inc2));
+ k->AddInput(inc2);
// Merge over a phi.
- HInstruction* store = InsertArrayStore(induc_, 0);
+ HInstruction* store = InsertArrayStore(k, 0);
PerformInductionVarAnalysis();
EXPECT_STREQ("((1) * i + (1))", GetInductionInfo(store->InputAt(1), 0).c_str());
@@ -376,10 +356,13 @@ TEST_F(InductionVarAnalysisTest, FindFirstOrderWrapAroundInduction) {
// k = 100 - i;
// }
BuildLoopNest(1);
- HInstruction* store = InsertArrayStore(induc_, 0);
+ HPhi* k = InsertLoopPhi(0, 0);
+ k->AddInput(constant0_);
+
+ HInstruction* store = InsertArrayStore(k, 0);
HInstruction *sub = InsertInstruction(
- new (&allocator_) HSub(Primitive::kPrimInt, constant100_, InsertLocalLoad(basic_[0], 0)), 0);
- InsertLocalStore(induc_, sub, 0);
+ new (&allocator_) HSub(Primitive::kPrimInt, constant100_, basic_[0]), 0);
+ k->AddInput(sub);
PerformInductionVarAnalysis();
EXPECT_STREQ("wrap((0), (( - (1)) * i + (100)))",
@@ -396,11 +379,16 @@ TEST_F(InductionVarAnalysisTest, FindSecondOrderWrapAroundInduction) {
// t = 100 - i;
// }
BuildLoopNest(1);
- HInstruction* store = InsertArrayStore(induc_, 0);
- InsertLocalStore(induc_, InsertLocalLoad(tmp_, 0), 0);
+ HPhi* k = InsertLoopPhi(0, 0);
+ k->AddInput(constant0_);
+ HPhi* t = InsertLoopPhi(1, 0);
+ t->AddInput(constant100_);
+
+ HInstruction* store = InsertArrayStore(k, 0);
+ k->AddInput(t);
HInstruction *sub = InsertInstruction(
- new (&allocator_) HSub(Primitive::kPrimInt, constant100_, InsertLocalLoad(basic_[0], 0)), 0);
- InsertLocalStore(tmp_, sub, 0);
+ new (&allocator_) HSub(Primitive::kPrimInt, constant100_, basic_[0], 0), 0);
+ t->AddInput(sub);
PerformInductionVarAnalysis();
EXPECT_STREQ("wrap((0), wrap((100), (( - (1)) * i + (100))))",
@@ -419,26 +407,21 @@ TEST_F(InductionVarAnalysisTest, FindWrapAroundDerivedInduction) {
// k = i << 1;
// }
BuildLoopNest(1);
+ HPhi* k = InsertLoopPhi(0, 0);
+ k->AddInput(constant0_);
+
HInstruction *add = InsertInstruction(
- new (&allocator_) HAdd(Primitive::kPrimInt, InsertLocalLoad(induc_, 0), constant100_), 0);
- InsertLocalStore(tmp_, add, 0);
+ new (&allocator_) HAdd(Primitive::kPrimInt, k, constant100_), 0);
HInstruction *sub = InsertInstruction(
- new (&allocator_) HSub(Primitive::kPrimInt, InsertLocalLoad(induc_, 0), constant100_), 0);
- InsertLocalStore(tmp_, sub, 0);
+ new (&allocator_) HSub(Primitive::kPrimInt, k, constant100_), 0);
HInstruction *mul = InsertInstruction(
- new (&allocator_) HMul(Primitive::kPrimInt, InsertLocalLoad(induc_, 0), constant100_), 0);
- InsertLocalStore(tmp_, mul, 0);
+ new (&allocator_) HMul(Primitive::kPrimInt, k, constant100_), 0);
HInstruction *shl = InsertInstruction(
- new (&allocator_) HShl(Primitive::kPrimInt, InsertLocalLoad(induc_, 0), constant1_), 0);
- InsertLocalStore(tmp_, shl, 0);
+ new (&allocator_) HShl(Primitive::kPrimInt, k, constant1_), 0);
HInstruction *neg = InsertInstruction(
- new (&allocator_) HNeg(Primitive::kPrimInt, InsertLocalLoad(induc_, 0)), 0);
- InsertLocalStore(tmp_, neg, 0);
- InsertLocalStore(
- induc_,
- InsertInstruction(
- new (&allocator_)
- HShl(Primitive::kPrimInt, InsertLocalLoad(basic_[0], 0), constant1_), 0), 0);
+ new (&allocator_) HNeg(Primitive::kPrimInt, k), 0);
+ k->AddInput(
+ InsertInstruction(new (&allocator_) HShl(Primitive::kPrimInt, basic_[0], constant1_), 0));
PerformInductionVarAnalysis();
EXPECT_STREQ("wrap((100), ((2) * i + (100)))", GetInductionInfo(add, 0).c_str());
@@ -461,11 +444,15 @@ TEST_F(InductionVarAnalysisTest, FindPeriodicInduction) {
// k = d;
// }
BuildLoopNest(1);
- HInstruction* store1 = InsertArrayStore(induc_, 0);
- HInstruction* store2 = InsertArrayStore(tmp_, 0);
- InsertLocalStore(dum_, InsertLocalLoad(tmp_, 0), 0);
- InsertLocalStore(tmp_, InsertLocalLoad(induc_, 0), 0);
- InsertLocalStore(induc_, InsertLocalLoad(dum_, 0), 0);
+ HPhi* k = InsertLoopPhi(0, 0);
+ k->AddInput(constant0_);
+ HPhi* t = InsertLoopPhi(1, 0);
+ t->AddInput(constant100_);
+
+ HInstruction* store1 = InsertArrayStore(k, 0);
+ HInstruction* store2 = InsertArrayStore(t, 0);
+ k->AddInput(t);
+ t->AddInput(k);
PerformInductionVarAnalysis();
EXPECT_STREQ("periodic((0), (100))", GetInductionInfo(store1->InputAt(1), 0).c_str());
@@ -480,10 +467,13 @@ TEST_F(InductionVarAnalysisTest, FindIdiomaticPeriodicInduction) {
// k = 1 - k;
// }
BuildLoopNest(1);
- HInstruction* store = InsertArrayStore(induc_, 0);
+ HPhi* k = InsertLoopPhi(0, 0);
+ k->AddInput(constant0_);
+
+ HInstruction* store = InsertArrayStore(k, 0);
HInstruction *sub = InsertInstruction(
- new (&allocator_) HSub(Primitive::kPrimInt, constant1_, InsertLocalLoad(induc_, 0)), 0);
- InsertLocalStore(induc_, sub, 0);
+ new (&allocator_) HSub(Primitive::kPrimInt, constant1_, k), 0);
+ k->AddInput(sub);
PerformInductionVarAnalysis();
EXPECT_STREQ("periodic((0), (1))", GetInductionInfo(store->InputAt(1), 0).c_str());
@@ -502,26 +492,24 @@ TEST_F(InductionVarAnalysisTest, FindDerivedPeriodicInduction) {
// t = - k;
// }
BuildLoopNest(1);
- InsertLocalStore(
- induc_,
- InsertInstruction(new (&allocator_)
- HSub(Primitive::kPrimInt, constant1_, InsertLocalLoad(induc_, 0)), 0), 0);
+ HPhi* k_header = InsertLoopPhi(0, 0);
+ k_header->AddInput(constant0_);
+
+ HInstruction* k_body = InsertInstruction(
+ new (&allocator_) HSub(Primitive::kPrimInt, constant1_, k_header), 0);
+ k_header->AddInput(k_body);
+
// Derived expressions.
HInstruction *add = InsertInstruction(
- new (&allocator_) HAdd(Primitive::kPrimInt, InsertLocalLoad(induc_, 0), constant100_), 0);
- InsertLocalStore(tmp_, add, 0);
+ new (&allocator_) HAdd(Primitive::kPrimInt, k_body, constant100_), 0);
HInstruction *sub = InsertInstruction(
- new (&allocator_) HSub(Primitive::kPrimInt, InsertLocalLoad(induc_, 0), constant100_), 0);
- InsertLocalStore(tmp_, sub, 0);
+ new (&allocator_) HSub(Primitive::kPrimInt, k_body, constant100_), 0);
HInstruction *mul = InsertInstruction(
- new (&allocator_) HMul(Primitive::kPrimInt, InsertLocalLoad(induc_, 0), constant100_), 0);
- InsertLocalStore(tmp_, mul, 0);
+ new (&allocator_) HMul(Primitive::kPrimInt, k_body, constant100_), 0);
HInstruction *shl = InsertInstruction(
- new (&allocator_) HShl(Primitive::kPrimInt, InsertLocalLoad(induc_, 0), constant1_), 0);
- InsertLocalStore(tmp_, shl, 0);
+ new (&allocator_) HShl(Primitive::kPrimInt, k_body, constant1_), 0);
HInstruction *neg = InsertInstruction(
- new (&allocator_) HNeg(Primitive::kPrimInt, InsertLocalLoad(induc_, 0)), 0);
- InsertLocalStore(tmp_, neg, 0);
+ new (&allocator_) HNeg(Primitive::kPrimInt, k_body), 0);
PerformInductionVarAnalysis();
EXPECT_STREQ("periodic(((1) + (100)), (100))", GetInductionInfo(add, 0).c_str());
@@ -543,10 +531,20 @@ TEST_F(InductionVarAnalysisTest, FindDeepLoopInduction) {
// ..
// }
BuildLoopNest(10);
+
+ HPhi* k[10];
+ for (int d = 0; d < 10; d++) {
+ k[d] = InsertLoopPhi(0, d);
+ }
+
HInstruction *inc = InsertInstruction(
- new (&allocator_) HAdd(Primitive::kPrimInt, constant1_, InsertLocalLoad(induc_, 9)), 9);
- InsertLocalStore(induc_, inc, 9);
- HInstruction* store = InsertArrayStore(induc_, 9);
+ new (&allocator_) HAdd(Primitive::kPrimInt, constant1_, k[9]), 9);
+ HInstruction* store = InsertArrayStore(inc, 9);
+
+ for (int d = 0; d < 10; d++) {
+ k[d]->AddInput((d != 0) ? k[d - 1] : constant0_);
+ k[d]->AddInput((d != 9) ? k[d + 1] : inc);
+ }
PerformInductionVarAnalysis();
// Avoid exact phi number, since that depends on the SSA building phase.
diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc
index eda9c01a01..55a654e301 100644
--- a/compiler/optimizing/induction_var_range_test.cc
+++ b/compiler/optimizing/induction_var_range_test.cc
@@ -86,25 +86,20 @@ class InductionVarRangeTest : public CommonCompilerTest {
loop_body->AddSuccessor(loop_header);
return_block->AddSuccessor(exit_block_);
// Instructions.
- HLocal* induc = new (&allocator_) HLocal(0);
- entry_block_->AddInstruction(induc);
- loop_preheader_->AddInstruction(
- new (&allocator_) HStoreLocal(induc, graph_->GetIntConstant(lower))); // i = l
loop_preheader_->AddInstruction(new (&allocator_) HGoto());
- HInstruction* load = new (&allocator_) HLoadLocal(induc, Primitive::kPrimInt);
- loop_header->AddInstruction(load);
+ HPhi* phi = new (&allocator_) HPhi(&allocator_, 0, 0, Primitive::kPrimInt);
+ loop_header->AddPhi(phi);
+ phi->AddInput(graph_->GetIntConstant(lower)); // i = l
if (stride > 0) {
- condition_ = new (&allocator_) HLessThan(load, upper); // i < u
+ condition_ = new (&allocator_) HLessThan(phi, upper); // i < u
} else {
- condition_ = new (&allocator_) HGreaterThan(load, upper); // i > u
+ condition_ = new (&allocator_) HGreaterThan(phi, upper); // i > u
}
loop_header->AddInstruction(condition_);
loop_header->AddInstruction(new (&allocator_) HIf(condition_));
- load = new (&allocator_) HLoadLocal(induc, Primitive::kPrimInt);
- loop_body->AddInstruction(load);
- increment_ = new (&allocator_) HAdd(Primitive::kPrimInt, load, graph_->GetIntConstant(stride));
- loop_body->AddInstruction(increment_);
- loop_body->AddInstruction(new (&allocator_) HStoreLocal(induc, increment_)); // i += s
+ increment_ = new (&allocator_) HAdd(Primitive::kPrimInt, phi, graph_->GetIntConstant(stride));
+ loop_body->AddInstruction(increment_); // i += s
+ phi->AddInput(increment_);
loop_body->AddInstruction(new (&allocator_) HGoto());
return_block->AddInstruction(new (&allocator_) HReturnVoid());
exit_block_->AddInstruction(new (&allocator_) HExit());
@@ -112,7 +107,7 @@ class InductionVarRangeTest : public CommonCompilerTest {
/** Constructs SSA and performs induction variable analysis. */
void PerformInductionVarAnalysis() {
- TransformToSsa(graph_);
+ graph_->BuildDominatorTree();
iva_->Run();
}
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index a8841d31c5..68e96fba74 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -378,7 +378,7 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
// Run type propagation to get the guard typed, and eventually propagate the
// type of the receiver.
- ReferenceTypePropagation rtp_fixup(graph_, handles_);
+ ReferenceTypePropagation rtp_fixup(graph_, handles_, /* is_first_run */ false);
rtp_fixup.Run();
MaybeRecordStat(kInlinedMonomorphicCall);
@@ -420,6 +420,9 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
actual_method = new_method;
} else if (actual_method != new_method) {
// Different methods, bailout.
+ VLOG(compiler) << "Call to " << PrettyMethod(resolved_method)
+ << " from inline cache is not inlined because it resolves"
+ << " to different methods";
return false;
}
}
@@ -474,7 +477,7 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
deoptimize->CopyEnvironmentFrom(invoke_instruction->GetEnvironment());
// Run type propagation to get the guard typed.
- ReferenceTypePropagation rtp_fixup(graph_, handles_);
+ ReferenceTypePropagation rtp_fixup(graph_, handles_, /* is_first_run */ false);
rtp_fixup.Run();
MaybeRecordStat(kInlinedPolymorphicCall);
@@ -612,8 +615,9 @@ bool HInliner::TryPatternSubstitution(HInvoke* invoke_instruction,
// TODO: Needs null check.
return false;
}
+ Handle<mirror::DexCache> dex_cache(handles_->NewHandle(resolved_method->GetDexCache()));
HInstruction* obj = GetInvokeInputForArgVRegIndex(invoke_instruction, data.object_arg);
- HInstanceFieldGet* iget = CreateInstanceFieldGet(resolved_method, data.field_idx, obj);
+ HInstanceFieldGet* iget = CreateInstanceFieldGet(dex_cache, data.field_idx, obj);
DCHECK_EQ(iget->GetFieldOffset().Uint32Value(), data.field_offset);
DCHECK_EQ(iget->IsVolatile() ? 1u : 0u, data.is_volatile);
invoke_instruction->GetBlock()->InsertInstructionBefore(iget, invoke_instruction);
@@ -626,9 +630,10 @@ bool HInliner::TryPatternSubstitution(HInvoke* invoke_instruction,
// TODO: Needs null check.
return false;
}
+ Handle<mirror::DexCache> dex_cache(handles_->NewHandle(resolved_method->GetDexCache()));
HInstruction* obj = GetInvokeInputForArgVRegIndex(invoke_instruction, data.object_arg);
HInstruction* value = GetInvokeInputForArgVRegIndex(invoke_instruction, data.src_arg);
- HInstanceFieldSet* iput = CreateInstanceFieldSet(resolved_method, data.field_idx, obj, value);
+ HInstanceFieldSet* iput = CreateInstanceFieldSet(dex_cache, data.field_idx, obj, value);
DCHECK_EQ(iput->GetFieldOffset().Uint32Value(), data.field_offset);
DCHECK_EQ(iput->IsVolatile() ? 1u : 0u, data.is_volatile);
invoke_instruction->GetBlock()->InsertInstructionBefore(iput, invoke_instruction);
@@ -638,6 +643,59 @@ bool HInliner::TryPatternSubstitution(HInvoke* invoke_instruction,
}
break;
}
+ case kInlineOpConstructor: {
+ const InlineConstructorData& data = inline_method.d.constructor_data;
+ // Get the indexes to arrays for easier processing.
+ uint16_t iput_field_indexes[] = {
+ data.iput0_field_index, data.iput1_field_index, data.iput2_field_index
+ };
+ uint16_t iput_args[] = { data.iput0_arg, data.iput1_arg, data.iput2_arg };
+ static_assert(arraysize(iput_args) == arraysize(iput_field_indexes), "Size mismatch");
+ // Count valid field indexes.
+ size_t number_of_iputs = 0u;
+ while (number_of_iputs != arraysize(iput_field_indexes) &&
+ iput_field_indexes[number_of_iputs] != DexFile::kDexNoIndex16) {
+ // Check that there are no duplicate valid field indexes.
+ DCHECK_EQ(0, std::count(iput_field_indexes + number_of_iputs + 1,
+ iput_field_indexes + arraysize(iput_field_indexes),
+ iput_field_indexes[number_of_iputs]));
+ ++number_of_iputs;
+ }
+ // Check that there are no valid field indexes in the rest of the array.
+ DCHECK_EQ(0, std::count_if(iput_field_indexes + number_of_iputs,
+ iput_field_indexes + arraysize(iput_field_indexes),
+ [](uint16_t index) { return index != DexFile::kDexNoIndex16; }));
+
+ // Create HInstanceFieldSet for each IPUT that stores non-zero data.
+ Handle<mirror::DexCache> dex_cache;
+ HInstruction* obj = GetInvokeInputForArgVRegIndex(invoke_instruction, /* this */ 0u);
+ bool needs_constructor_barrier = false;
+ for (size_t i = 0; i != number_of_iputs; ++i) {
+ HInstruction* value = GetInvokeInputForArgVRegIndex(invoke_instruction, iput_args[i]);
+ if (!value->IsConstant() ||
+ (!value->AsConstant()->IsZero() && !value->IsNullConstant())) {
+ if (dex_cache.GetReference() == nullptr) {
+ dex_cache = handles_->NewHandle(resolved_method->GetDexCache());
+ }
+ uint16_t field_index = iput_field_indexes[i];
+ HInstanceFieldSet* iput = CreateInstanceFieldSet(dex_cache, field_index, obj, value);
+ invoke_instruction->GetBlock()->InsertInstructionBefore(iput, invoke_instruction);
+
+ // Check whether the field is final. If it is, we need to add a barrier.
+ size_t pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
+ ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size);
+ DCHECK(resolved_field != nullptr);
+ if (resolved_field->IsFinal()) {
+ needs_constructor_barrier = true;
+ }
+ }
+ }
+ if (needs_constructor_barrier) {
+ HMemoryBarrier* barrier = new (graph_->GetArena()) HMemoryBarrier(kStoreStore, kNoDexPc);
+ invoke_instruction->GetBlock()->InsertInstructionBefore(barrier, invoke_instruction);
+ }
+ break;
+ }
default:
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
@@ -652,11 +710,10 @@ bool HInliner::TryPatternSubstitution(HInvoke* invoke_instruction,
return true;
}
-HInstanceFieldGet* HInliner::CreateInstanceFieldGet(ArtMethod* resolved_method,
+HInstanceFieldGet* HInliner::CreateInstanceFieldGet(Handle<mirror::DexCache> dex_cache,
uint32_t field_index,
HInstruction* obj)
SHARED_REQUIRES(Locks::mutator_lock_) {
- Handle<mirror::DexCache> dex_cache(handles_->NewHandle(resolved_method->GetDexCache()));
size_t pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size);
DCHECK(resolved_field != nullptr);
@@ -667,24 +724,23 @@ HInstanceFieldGet* HInliner::CreateInstanceFieldGet(ArtMethod* resolved_method,
resolved_field->IsVolatile(),
field_index,
resolved_field->GetDeclaringClass()->GetDexClassDefIndex(),
- *resolved_method->GetDexFile(),
+ *dex_cache->GetDexFile(),
dex_cache,
// Read barrier generates a runtime call in slow path and we need a valid
// dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537.
/* dex_pc */ 0);
if (iget->GetType() == Primitive::kPrimNot) {
- ReferenceTypePropagation rtp(graph_, handles_);
+ ReferenceTypePropagation rtp(graph_, handles_, /* is_first_run */ false);
rtp.Visit(iget);
}
return iget;
}
-HInstanceFieldSet* HInliner::CreateInstanceFieldSet(ArtMethod* resolved_method,
+HInstanceFieldSet* HInliner::CreateInstanceFieldSet(Handle<mirror::DexCache> dex_cache,
uint32_t field_index,
HInstruction* obj,
HInstruction* value)
SHARED_REQUIRES(Locks::mutator_lock_) {
- Handle<mirror::DexCache> dex_cache(handles_->NewHandle(resolved_method->GetDexCache()));
size_t pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size);
DCHECK(resolved_field != nullptr);
@@ -696,13 +752,14 @@ HInstanceFieldSet* HInliner::CreateInstanceFieldSet(ArtMethod* resolved_method,
resolved_field->IsVolatile(),
field_index,
resolved_field->GetDeclaringClass()->GetDexClassDefIndex(),
- *resolved_method->GetDexFile(),
+ *dex_cache->GetDexFile(),
dex_cache,
// Read barrier generates a runtime call in slow path and we need a valid
// dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537.
/* dex_pc */ 0);
return iput;
}
+
bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
HInvoke* invoke_instruction,
bool same_dex_file,
@@ -772,7 +829,7 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
resolved_method->GetQuickenedInfo(),
dex_cache);
- if (!builder.BuildGraph(*code_item)) {
+ if (builder.BuildGraph(*code_item, handles_) != kAnalysisSuccess) {
VLOG(compiler) << "Method " << PrettyMethod(method_index, callee_dex_file)
<< " could not be built, so cannot be inlined";
return false;
@@ -785,12 +842,6 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
return false;
}
- if (callee_graph->TryBuildingSsa(handles_) != kAnalysisSuccess) {
- VLOG(compiler) << "Method " << PrettyMethod(method_index, callee_dex_file)
- << " could not be transformed to SSA";
- return false;
- }
-
size_t parameter_index = 0;
for (HInstructionIterator instructions(callee_graph->GetEntryBlock()->GetInstructions());
!instructions.Done();
@@ -935,12 +986,18 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
if (current->IsNewInstance() &&
(current->AsNewInstance()->GetEntrypoint() == kQuickAllocObjectWithAccessCheck)) {
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, callee_dex_file)
+ << " could not be inlined because it is using an entrypoint"
+ << " with access checks";
// Allocation entrypoint does not handle inlined frames.
return false;
}
if (current->IsNewArray() &&
(current->AsNewArray()->GetEntrypoint() == kQuickAllocArrayWithAccessCheck)) {
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, callee_dex_file)
+ << " could not be inlined because it is using an entrypoint"
+ << " with access checks";
// Allocation entrypoint does not handle inlined frames.
return false;
}
@@ -950,6 +1007,9 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method,
current->IsUnresolvedStaticFieldSet() ||
current->IsUnresolvedInstanceFieldSet()) {
// Entrypoint for unresolved fields does not handle inlined frames.
+ VLOG(compiler) << "Method " << PrettyMethod(method_index, callee_dex_file)
+ << " could not be inlined because it is using an unresolved"
+ << " entrypoint";
return false;
}
}
@@ -991,13 +1051,13 @@ void HInliner::FixUpReturnReferenceType(ArtMethod* resolved_method,
if (invoke_rti.IsStrictSupertypeOf(return_rti)
|| (return_rti.IsExact() && !invoke_rti.IsExact())
|| !return_replacement->CanBeNull()) {
- ReferenceTypePropagation(graph_, handles_).Run();
+ ReferenceTypePropagation(graph_, handles_, /* is_first_run */ false).Run();
}
}
} else if (return_replacement->IsInstanceOf()) {
if (do_rtp) {
// Inlining InstanceOf into an If may put a tighter bound on reference types.
- ReferenceTypePropagation(graph_, handles_).Run();
+ ReferenceTypePropagation(graph_, handles_, /* is_first_run */ false).Run();
}
}
}
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 0127d55192..7d343c62eb 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -70,11 +70,11 @@ class HInliner : public HOptimization {
SHARED_REQUIRES(Locks::mutator_lock_);
// Create a new HInstanceFieldGet.
- HInstanceFieldGet* CreateInstanceFieldGet(ArtMethod* resolved_method,
+ HInstanceFieldGet* CreateInstanceFieldGet(Handle<mirror::DexCache> dex_cache,
uint32_t field_index,
HInstruction* obj);
// Create a new HInstanceFieldSet.
- HInstanceFieldSet* CreateInstanceFieldSet(ArtMethod* resolved_method,
+ HInstanceFieldSet* CreateInstanceFieldSet(Handle<mirror::DexCache> dex_cache,
uint32_t field_index,
HInstruction* obj,
HInstruction* value);
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 0029cc3650..a48d06f3d0 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -757,11 +757,97 @@ void InstructionSimplifierVisitor::VisitArraySet(HArraySet* instruction) {
}
}
+static bool IsTypeConversionImplicit(Primitive::Type input_type, Primitive::Type result_type) {
+ // Besides conversion to the same type, widening integral conversions are implicit,
+ // excluding conversions to long and the byte->char conversion where we need to
+ // clear the high 16 bits of the 32-bit sign-extended representation of byte.
+ return result_type == input_type ||
+ (result_type == Primitive::kPrimInt && input_type == Primitive::kPrimByte) ||
+ (result_type == Primitive::kPrimInt && input_type == Primitive::kPrimShort) ||
+ (result_type == Primitive::kPrimInt && input_type == Primitive::kPrimChar) ||
+ (result_type == Primitive::kPrimShort && input_type == Primitive::kPrimByte);
+}
+
+static bool IsTypeConversionLossless(Primitive::Type input_type, Primitive::Type result_type) {
+ // The conversion to a larger type is loss-less with the exception of two cases,
+ // - conversion to char, the only unsigned type, where we may lose some bits, and
+ // - conversion from float to long, the only FP to integral conversion with smaller FP type.
+ // For integral to FP conversions this holds because the FP mantissa is large enough.
+ DCHECK_NE(input_type, result_type);
+ return Primitive::ComponentSize(result_type) > Primitive::ComponentSize(input_type) &&
+ result_type != Primitive::kPrimChar &&
+ !(result_type == Primitive::kPrimLong && input_type == Primitive::kPrimFloat);
+}
+
void InstructionSimplifierVisitor::VisitTypeConversion(HTypeConversion* instruction) {
- if (instruction->GetResultType() == instruction->GetInputType()) {
- // Remove the instruction if it's converting to the same type.
- instruction->ReplaceWith(instruction->GetInput());
+ HInstruction* input = instruction->GetInput();
+ Primitive::Type input_type = input->GetType();
+ Primitive::Type result_type = instruction->GetResultType();
+ if (IsTypeConversionImplicit(input_type, result_type)) {
+ // Remove the implicit conversion; this includes conversion to the same type.
+ instruction->ReplaceWith(input);
instruction->GetBlock()->RemoveInstruction(instruction);
+ RecordSimplification();
+ return;
+ }
+
+ if (input->IsTypeConversion()) {
+ HTypeConversion* input_conversion = input->AsTypeConversion();
+ HInstruction* original_input = input_conversion->GetInput();
+ Primitive::Type original_type = original_input->GetType();
+
+ // When the first conversion is lossless, a direct conversion from the original type
+ // to the final type yields the same result, even for a lossy second conversion, for
+ // example float->double->int or int->double->float.
+ bool is_first_conversion_lossless = IsTypeConversionLossless(original_type, input_type);
+
+ // For integral conversions, see if the first conversion loses only bits that the second
+ // doesn't need, i.e. the final type is no wider than the intermediate. If so, direct
+ // conversion yields the same result, for example long->int->short or int->char->short.
+ bool integral_conversions_with_non_widening_second =
+ Primitive::IsIntegralType(input_type) &&
+ Primitive::IsIntegralType(original_type) &&
+ Primitive::IsIntegralType(result_type) &&
+ Primitive::ComponentSize(result_type) <= Primitive::ComponentSize(input_type);
+
+ if (is_first_conversion_lossless || integral_conversions_with_non_widening_second) {
+ // If the merged conversion is implicit, do the simplification unconditionally.
+ if (IsTypeConversionImplicit(original_type, result_type)) {
+ instruction->ReplaceWith(original_input);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ if (!input_conversion->HasUses()) {
+ // Don't wait for DCE.
+ input_conversion->GetBlock()->RemoveInstruction(input_conversion);
+ }
+ RecordSimplification();
+ return;
+ }
+ // Otherwise simplify only if the first conversion has no other use.
+ if (input_conversion->HasOnlyOneNonEnvironmentUse()) {
+ input_conversion->ReplaceWith(original_input);
+ input_conversion->GetBlock()->RemoveInstruction(input_conversion);
+ RecordSimplification();
+ return;
+ }
+ }
+ } else if (input->IsAnd() &&
+ Primitive::IsIntegralType(result_type) &&
+ input->HasOnlyOneNonEnvironmentUse()) {
+ DCHECK(Primitive::IsIntegralType(input_type));
+ HAnd* input_and = input->AsAnd();
+ HConstant* constant = input_and->GetConstantRight();
+ if (constant != nullptr) {
+ int64_t value = Int64FromConstant(constant);
+ DCHECK_NE(value, -1); // "& -1" would have been optimized away in VisitAnd().
+ size_t trailing_ones = CTZ(~static_cast<uint64_t>(value));
+ if (trailing_ones >= kBitsPerByte * Primitive::ComponentSize(result_type)) {
+ // The `HAnd` is useless, for example in `(byte) (x & 0xff)`, get rid of it.
+ input_and->ReplaceWith(input_and->GetLeastConstantLeft());
+ input_and->GetBlock()->RemoveInstruction(input_and);
+ RecordSimplification();
+ return;
+ }
+ }
}
}
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index a6be324730..db39bc8eec 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -481,6 +481,7 @@ static Intrinsics GetIntrinsic(InlineMethod method) {
case kInlineOpNonWideConst:
case kInlineOpIGet:
case kInlineOpIPut:
+ case kInlineOpConstructor:
return Intrinsics::kNone;
// String init cases, not intrinsics.
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 96a3c3c2f1..00a158b10a 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -1580,6 +1580,251 @@ void IntrinsicCodeGeneratorARM::VisitSystemArrayCopy(HInvoke* invoke) {
__ Bind(slow_path->GetExitLabel());
}
+static void CreateFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
+ // If the graph is debuggable, all callee-saved floating-point registers are blocked by
+ // the code generator. Furthermore, the register allocator creates fixed live intervals
+ // for all caller-saved registers because we are doing a function call. As a result, if
+ // the input and output locations are unallocated, the register allocator runs out of
+ // registers and fails; however, a debuggable graph is not the common case.
+ if (invoke->GetBlock()->GetGraph()->IsDebuggable()) {
+ return;
+ }
+
+ DCHECK_EQ(invoke->GetNumberOfArguments(), 1U);
+ DCHECK_EQ(invoke->InputAt(0)->GetType(), Primitive::kPrimDouble);
+ DCHECK_EQ(invoke->GetType(), Primitive::kPrimDouble);
+
+ LocationSummary* const locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ const InvokeRuntimeCallingConvention calling_convention;
+
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ // Native code uses the soft float ABI.
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+}
+
+static void CreateFPFPToFPCallLocations(ArenaAllocator* arena, HInvoke* invoke) {
+ // If the graph is debuggable, all callee-saved floating-point registers are blocked by
+ // the code generator. Furthermore, the register allocator creates fixed live intervals
+ // for all caller-saved registers because we are doing a function call. As a result, if
+ // the input and output locations are unallocated, the register allocator runs out of
+ // registers and fails; however, a debuggable graph is not the common case.
+ if (invoke->GetBlock()->GetGraph()->IsDebuggable()) {
+ return;
+ }
+
+ DCHECK_EQ(invoke->GetNumberOfArguments(), 2U);
+ DCHECK_EQ(invoke->InputAt(0)->GetType(), Primitive::kPrimDouble);
+ DCHECK_EQ(invoke->InputAt(1)->GetType(), Primitive::kPrimDouble);
+ DCHECK_EQ(invoke->GetType(), Primitive::kPrimDouble);
+
+ LocationSummary* const locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ const InvokeRuntimeCallingConvention calling_convention;
+
+ locations->SetInAt(0, Location::RequiresFpuRegister());
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ locations->SetOut(Location::RequiresFpuRegister());
+ // Native code uses the soft float ABI.
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(3)));
+}
+
+static void GenFPToFPCall(HInvoke* invoke,
+ ArmAssembler* assembler,
+ CodeGeneratorARM* codegen,
+ QuickEntrypointEnum entry) {
+ LocationSummary* const locations = invoke->GetLocations();
+ const InvokeRuntimeCallingConvention calling_convention;
+
+ DCHECK_EQ(invoke->GetNumberOfArguments(), 1U);
+ DCHECK(locations->WillCall() && locations->Intrinsified());
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(0)));
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(1)));
+
+ __ LoadFromOffset(kLoadWord, LR, TR, GetThreadOffset<kArmWordSize>(entry).Int32Value());
+ // Native code uses the soft float ABI.
+ __ vmovrrd(calling_convention.GetRegisterAt(0),
+ calling_convention.GetRegisterAt(1),
+ FromLowSToD(locations->InAt(0).AsFpuRegisterPairLow<SRegister>()));
+ __ blx(LR);
+ codegen->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ vmovdrr(FromLowSToD(locations->Out().AsFpuRegisterPairLow<SRegister>()),
+ calling_convention.GetRegisterAt(0),
+ calling_convention.GetRegisterAt(1));
+}
+
+static void GenFPFPToFPCall(HInvoke* invoke,
+ ArmAssembler* assembler,
+ CodeGeneratorARM* codegen,
+ QuickEntrypointEnum entry) {
+ LocationSummary* const locations = invoke->GetLocations();
+ const InvokeRuntimeCallingConvention calling_convention;
+
+ DCHECK_EQ(invoke->GetNumberOfArguments(), 2U);
+ DCHECK(locations->WillCall() && locations->Intrinsified());
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(0)));
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(1)));
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(2)));
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(calling_convention.GetRegisterAt(3)));
+
+ __ LoadFromOffset(kLoadWord, LR, TR, GetThreadOffset<kArmWordSize>(entry).Int32Value());
+ // Native code uses the soft float ABI.
+ __ vmovrrd(calling_convention.GetRegisterAt(0),
+ calling_convention.GetRegisterAt(1),
+ FromLowSToD(locations->InAt(0).AsFpuRegisterPairLow<SRegister>()));
+ __ vmovrrd(calling_convention.GetRegisterAt(2),
+ calling_convention.GetRegisterAt(3),
+ FromLowSToD(locations->InAt(1).AsFpuRegisterPairLow<SRegister>()));
+ __ blx(LR);
+ codegen->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ vmovdrr(FromLowSToD(locations->Out().AsFpuRegisterPairLow<SRegister>()),
+ calling_convention.GetRegisterAt(0),
+ calling_convention.GetRegisterAt(1));
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathCos(HInvoke* invoke) {
+ CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathCos(HInvoke* invoke) {
+ GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickCos);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathSin(HInvoke* invoke) {
+ CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathSin(HInvoke* invoke) {
+ GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickSin);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathAcos(HInvoke* invoke) {
+ CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathAcos(HInvoke* invoke) {
+ GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickAcos);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathAsin(HInvoke* invoke) {
+ CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathAsin(HInvoke* invoke) {
+ GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickAsin);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathAtan(HInvoke* invoke) {
+ CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathAtan(HInvoke* invoke) {
+ GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickAtan);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathCbrt(HInvoke* invoke) {
+ CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathCbrt(HInvoke* invoke) {
+ GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickCbrt);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathCosh(HInvoke* invoke) {
+ CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathCosh(HInvoke* invoke) {
+ GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickCosh);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathExp(HInvoke* invoke) {
+ CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathExp(HInvoke* invoke) {
+ GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickExp);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathExpm1(HInvoke* invoke) {
+ CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathExpm1(HInvoke* invoke) {
+ GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickExpm1);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathLog(HInvoke* invoke) {
+ CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathLog(HInvoke* invoke) {
+ GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickLog);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathLog10(HInvoke* invoke) {
+ CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathLog10(HInvoke* invoke) {
+ GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickLog10);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathSinh(HInvoke* invoke) {
+ CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathSinh(HInvoke* invoke) {
+ GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickSinh);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathTan(HInvoke* invoke) {
+ CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathTan(HInvoke* invoke) {
+ GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickTan);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathTanh(HInvoke* invoke) {
+ CreateFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathTanh(HInvoke* invoke) {
+ GenFPToFPCall(invoke, GetAssembler(), codegen_, kQuickTanh);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathAtan2(HInvoke* invoke) {
+ CreateFPFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathAtan2(HInvoke* invoke) {
+ GenFPFPToFPCall(invoke, GetAssembler(), codegen_, kQuickAtan2);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathHypot(HInvoke* invoke) {
+ CreateFPFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathHypot(HInvoke* invoke) {
+ GenFPFPToFPCall(invoke, GetAssembler(), codegen_, kQuickHypot);
+}
+
+void IntrinsicLocationsBuilderARM::VisitMathNextAfter(HInvoke* invoke) {
+ CreateFPFPToFPCallLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM::VisitMathNextAfter(HInvoke* invoke) {
+ GenFPFPToFPCall(invoke, GetAssembler(), codegen_, kQuickNextAfter);
+}
+
// Unimplemented intrinsics.
#define UNIMPLEMENTED_INTRINSIC(Name) \
@@ -1610,23 +1855,6 @@ UNIMPLEMENTED_INTRINSIC(UnsafeCASLong) // High register pressure.
UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
-UNIMPLEMENTED_INTRINSIC(MathCos)
-UNIMPLEMENTED_INTRINSIC(MathSin)
-UNIMPLEMENTED_INTRINSIC(MathAcos)
-UNIMPLEMENTED_INTRINSIC(MathAsin)
-UNIMPLEMENTED_INTRINSIC(MathAtan)
-UNIMPLEMENTED_INTRINSIC(MathAtan2)
-UNIMPLEMENTED_INTRINSIC(MathCbrt)
-UNIMPLEMENTED_INTRINSIC(MathCosh)
-UNIMPLEMENTED_INTRINSIC(MathExp)
-UNIMPLEMENTED_INTRINSIC(MathExpm1)
-UNIMPLEMENTED_INTRINSIC(MathHypot)
-UNIMPLEMENTED_INTRINSIC(MathLog)
-UNIMPLEMENTED_INTRINSIC(MathLog10)
-UNIMPLEMENTED_INTRINSIC(MathNextAfter)
-UNIMPLEMENTED_INTRINSIC(MathSinh)
-UNIMPLEMENTED_INTRINSIC(MathTan)
-UNIMPLEMENTED_INTRINSIC(MathTanh)
UNIMPLEMENTED_INTRINSIC(FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite)
diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc
index 2b63ec8971..9fb32f4001 100644
--- a/compiler/optimizing/licm_test.cc
+++ b/compiler/optimizing/licm_test.cc
@@ -76,7 +76,7 @@ class LICMTest : public CommonCompilerTest {
// Performs LICM optimizations (after proper set up).
void PerformLICM() {
- TransformToSsa(graph_);
+ graph_->BuildDominatorTree();
SideEffectsAnalysis side_effects(graph_);
side_effects.Run();
LICM(graph_, side_effects).Run();
diff --git a/compiler/optimizing/linearize_test.cc b/compiler/optimizing/linearize_test.cc
index ed275b1544..13e14c53b5 100644
--- a/compiler/optimizing/linearize_test.cc
+++ b/compiler/optimizing/linearize_test.cc
@@ -39,14 +39,7 @@ template <size_t number_of_blocks>
static void TestCode(const uint16_t* data, const uint32_t (&expected_order)[number_of_blocks]) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
- HGraphBuilder builder(graph);
- const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
- bool graph_built = builder.BuildGraph(*item);
- ASSERT_TRUE(graph_built);
-
- TransformToSsa(graph);
-
+ HGraph* graph = CreateCFG(&allocator, data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc
index 991f8f70ea..3202493c3a 100644
--- a/compiler/optimizing/live_ranges_test.cc
+++ b/compiler/optimizing/live_ranges_test.cc
@@ -32,14 +32,10 @@ namespace art {
class LiveRangesTest : public CommonCompilerTest {};
static HGraph* BuildGraph(const uint16_t* data, ArenaAllocator* allocator) {
- HGraph* graph = CreateGraph(allocator);
- HGraphBuilder builder(graph);
- const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
- builder.BuildGraph(*item);
+ HGraph* graph = CreateCFG(allocator, data);
// Suspend checks implementation may change in the future, and this test relies
// on how instructions are ordered.
RemoveSuspendChecks(graph);
- TransformToSsa(graph);
// `Inline` conditions into ifs.
PrepareForRegisterAllocation(graph).Run();
return graph;
@@ -303,13 +299,12 @@ TEST_F(LiveRangesTest, Loop2) {
* 12: equal
* 14: if +++++
* | \ +
- * | 18: suspend
- * | 20: add
- * | 22: goto
+ * | 18: add
+ * | 20: goto
* |
- * 26: return
+ * 24: return
* |
- * 30: exit
+ * 28: exit
*
* We want to make sure the phi at 10 has a lifetime hole after the add at 20.
*/
@@ -345,18 +340,18 @@ TEST_F(LiveRangesTest, Loop2) {
interval = phi->GetLiveInterval();
range = interval->GetFirstRange();
ASSERT_EQ(10u, range->GetStart());
- ASSERT_EQ(21u, range->GetEnd());
+ ASSERT_EQ(19u, range->GetEnd());
range = range->GetNext();
ASSERT_TRUE(range != nullptr);
- ASSERT_EQ(24u, range->GetStart());
- ASSERT_EQ(26u, range->GetEnd());
+ ASSERT_EQ(22u, range->GetStart());
+ ASSERT_EQ(24u, range->GetEnd());
// Test for the add instruction.
HAdd* add = liveness.GetInstructionFromSsaIndex(2)->AsAdd();
interval = add->GetLiveInterval();
range = interval->GetFirstRange();
- ASSERT_EQ(20u, range->GetStart());
- ASSERT_EQ(24u, range->GetEnd());
+ ASSERT_EQ(18u, range->GetStart());
+ ASSERT_EQ(22u, range->GetEnd());
ASSERT_TRUE(range->GetNext() == nullptr);
}
diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc
index 7736eedae1..92a987cb1d 100644
--- a/compiler/optimizing/liveness_test.cc
+++ b/compiler/optimizing/liveness_test.cc
@@ -46,12 +46,7 @@ static void DumpBitVector(BitVector* vector,
static void TestCode(const uint16_t* data, const char* expected) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
- HGraphBuilder builder(graph);
- const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
- bool graph_built = builder.BuildGraph(*item);
- ASSERT_TRUE(graph_built);
- TransformToSsa(graph);
+ HGraph* graph = CreateCFG(&allocator, data);
// `Inline` conditions into ifs.
PrepareForRegisterAllocation(graph).Run();
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 9a97f54d54..8eaac0bbd3 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -61,7 +61,7 @@ class ReferenceInfo : public ArenaObject<kArenaAllocMisc> {
(use->IsStaticFieldSet() && (reference_ == use->InputAt(1))) ||
(use->IsUnresolvedStaticFieldSet() && (reference_ == use->InputAt(0))) ||
(use->IsArraySet() && (reference_ == use->InputAt(2)))) {
- // reference_ is merged to a phi/HSelect, passed to a callee, or stored to heap.
+ // reference_ is merged to HPhi/HSelect, passed to a callee, or stored to heap.
// reference_ isn't the only name that can refer to its value anymore.
is_singleton_ = false;
is_singleton_and_not_returned_ = false;
@@ -458,6 +458,10 @@ class HeapLocationCollector : public HGraphVisitor {
CreateReferenceInfoForReferenceType(instruction);
}
+ void VisitSelect(HSelect* instruction) OVERRIDE {
+ CreateReferenceInfoForReferenceType(instruction);
+ }
+
void VisitDeoptimize(HDeoptimize* instruction ATTRIBUTE_UNUSED) OVERRIDE {
may_deoptimize_ = true;
}
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index f269885907..ca66f631a6 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -27,6 +27,15 @@
namespace art {
+void HGraph::InitializeInexactObjectRTI(StackHandleScopeCollection* handles) {
+ ScopedObjectAccess soa(Thread::Current());
+ // Create the inexact Object reference type and store it in the HGraph.
+ ClassLinker* linker = Runtime::Current()->GetClassLinker();
+ inexact_object_rti_ = ReferenceTypeInfo::Create(
+ handles->NewHandle(linker->GetClassRoot(ClassLinker::kJavaLangObject)),
+ /* is_exact */ false);
+}
+
void HGraph::AddBlock(HBasicBlock* block) {
block->SetBlockId(blocks_.size());
blocks_.push_back(block);
@@ -236,29 +245,6 @@ void HGraph::ComputeDominanceInformation() {
}
}
-GraphAnalysisResult HGraph::TryBuildingSsa(StackHandleScopeCollection* handles) {
- GraphAnalysisResult result = BuildDominatorTree();
- if (result != kAnalysisSuccess) {
- return result;
- }
-
- // Create the inexact Object reference type and store it in the HGraph.
- ScopedObjectAccess soa(Thread::Current());
- ClassLinker* linker = Runtime::Current()->GetClassLinker();
- inexact_object_rti_ = ReferenceTypeInfo::Create(
- handles->NewHandle(linker->GetClassRoot(ClassLinker::kJavaLangObject)),
- /* is_exact */ false);
-
- // Tranforms graph to SSA form.
- result = SsaBuilder(this, handles).BuildSsa();
- if (result != kAnalysisSuccess) {
- return result;
- }
-
- in_ssa_form_ = true;
- return kAnalysisSuccess;
-}
-
HBasicBlock* HGraph::SplitEdge(HBasicBlock* block, HBasicBlock* successor) {
HBasicBlock* new_block = new (arena_) HBasicBlock(this, successor->GetDexPc());
AddBlock(new_block);
@@ -1592,7 +1578,7 @@ void HBasicBlock::DisconnectAndDelete() {
loop_info->Remove(this);
if (loop_info->IsBackEdge(*this)) {
// If this was the last back edge of the loop, we deliberately leave the
- // loop in an inconsistent state and will fail SSAChecker unless the
+ // loop in an inconsistent state and will fail GraphChecker unless the
// entire loop is removed during the pass.
loop_info->RemoveBackEdge(this);
}
@@ -1631,7 +1617,7 @@ void HBasicBlock::DisconnectAndDelete() {
} else if (num_pred_successors == 0u) {
// The predecessor has no remaining successors and therefore must be dead.
// We deliberately leave it without a control-flow instruction so that the
- // SSAChecker fails unless it is not removed during the pass too.
+ // GraphChecker fails unless it is not removed during the pass too.
predecessor->RemoveInstruction(last_instruction);
} else {
// There are multiple successors left. The removed block might be a successor
@@ -2387,4 +2373,26 @@ std::ostream& operator<<(std::ostream& os, const MoveOperands& rhs) {
return os;
}
+std::ostream& operator<<(std::ostream& os, TypeCheckKind rhs) {
+ switch (rhs) {
+ case TypeCheckKind::kUnresolvedCheck:
+ return os << "unresolved_check";
+ case TypeCheckKind::kExactCheck:
+ return os << "exact_check";
+ case TypeCheckKind::kClassHierarchyCheck:
+ return os << "class_hierarchy_check";
+ case TypeCheckKind::kAbstractClassCheck:
+ return os << "abstract_class_check";
+ case TypeCheckKind::kInterfaceCheck:
+ return os << "interface_check";
+ case TypeCheckKind::kArrayObjectCheck:
+ return os << "array_object_check";
+ case TypeCheckKind::kArrayCheck:
+ return os << "array_check";
+ default:
+ LOG(FATAL) << "Unknown TypeCheckKind: " << static_cast<int>(rhs);
+ UNREACHABLE();
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index daec096f3e..18b256f48e 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -98,6 +98,7 @@ enum IfCondition {
};
enum GraphAnalysisResult {
+ kAnalysisInvalidBytecode,
kAnalysisFailThrowCatchLoop,
kAnalysisFailAmbiguousArrayOp,
kAnalysisSuccess,
@@ -308,10 +309,14 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
blocks_.reserve(kDefaultNumberOfBlocks);
}
+ // Acquires and stores RTI of inexact Object to be used when creating HNullConstant.
+ void InitializeInexactObjectRTI(StackHandleScopeCollection* handles);
+
ArenaAllocator* GetArena() const { return arena_; }
const ArenaVector<HBasicBlock*>& GetBlocks() const { return blocks_; }
bool IsInSsaForm() const { return in_ssa_form_; }
+ void SetInSsaForm() { in_ssa_form_ = true; }
HBasicBlock* GetEntryBlock() const { return entry_block_; }
HBasicBlock* GetExitBlock() const { return exit_block_; }
@@ -322,11 +327,6 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
void AddBlock(HBasicBlock* block);
- // Try building the SSA form of this graph, with dominance computation and
- // loop recognition. Returns a code specifying that it was successful or the
- // reason for failure.
- GraphAnalysisResult TryBuildingSsa(StackHandleScopeCollection* handles);
-
void ComputeDominanceInformation();
void ClearDominanceInformation();
void ClearLoopInformation();
@@ -1235,7 +1235,6 @@ class HLoopInformationOutwardIterator : public ValueObject {
M(StoreLocal, Instruction) \
M(Sub, BinaryOperation) \
M(SuspendCheck, Instruction) \
- M(Temporary, Instruction) \
M(Throw, Instruction) \
M(TryBoundary, Instruction) \
M(TypeConversion, Instruction) \
@@ -4941,33 +4940,6 @@ class HBoundsCheck : public HExpression<2> {
DISALLOW_COPY_AND_ASSIGN(HBoundsCheck);
};
-/**
- * Some DEX instructions are folded into multiple HInstructions that need
- * to stay live until the last HInstruction. This class
- * is used as a marker for the baseline compiler to ensure its preceding
- * HInstruction stays live. `index` represents the stack location index of the
- * instruction (the actual offset is computed as index * vreg_size).
- */
-class HTemporary : public HTemplateInstruction<0> {
- public:
- explicit HTemporary(size_t index, uint32_t dex_pc = kNoDexPc)
- : HTemplateInstruction(SideEffects::None(), dex_pc), index_(index) {}
-
- size_t GetIndex() const { return index_; }
-
- Primitive::Type GetType() const OVERRIDE {
- // The previous instruction is the one that will be stored in the temporary location.
- DCHECK(GetPrevious() != nullptr);
- return GetPrevious()->GetType();
- }
-
- DECLARE_INSTRUCTION(Temporary);
-
- private:
- const size_t index_;
- DISALLOW_COPY_AND_ASSIGN(HTemporary);
-};
-
class HSuspendCheck : public HTemplateInstruction<0> {
public:
explicit HSuspendCheck(uint32_t dex_pc)
@@ -5451,6 +5423,8 @@ enum class TypeCheckKind {
kArrayCheck // No optimization yet when checking against a generic array.
};
+std::ostream& operator<<(std::ostream& os, TypeCheckKind rhs);
+
class HInstanceOf : public HExpression<2> {
public:
HInstanceOf(HInstruction* object,
@@ -6023,9 +5997,14 @@ class HBlocksInLoopReversePostOrderIterator : public ValueObject {
};
inline int64_t Int64FromConstant(HConstant* constant) {
- DCHECK(constant->IsIntConstant() || constant->IsLongConstant());
- return constant->IsIntConstant() ? constant->AsIntConstant()->GetValue()
- : constant->AsLongConstant()->GetValue();
+ if (constant->IsIntConstant()) {
+ return constant->AsIntConstant()->GetValue();
+ } else if (constant->IsLongConstant()) {
+ return constant->AsLongConstant()->GetValue();
+ } else {
+ DCHECK(constant->IsNullConstant());
+ return 0;
+ }
}
inline bool IsSameDexFile(const DexFile& lhs, const DexFile& rhs) {
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index dcd8e7d216..12b748b7b6 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -186,18 +186,10 @@ class PassObserver : public ValueObject {
// Validate the HGraph if running in debug mode.
if (kIsDebugBuild) {
if (!graph_in_bad_state_) {
- if (graph_->IsInSsaForm()) {
- SSAChecker checker(graph_);
- checker.Run();
- if (!checker.IsValid()) {
- LOG(FATAL) << "Error after " << pass_name << ": " << Dumpable<SSAChecker>(checker);
- }
- } else {
- GraphChecker checker(graph_);
- checker.Run();
- if (!checker.IsValid()) {
- LOG(FATAL) << "Error after " << pass_name << ": " << Dumpable<GraphChecker>(checker);
- }
+ GraphChecker checker(graph_);
+ checker.Run();
+ if (!checker.IsValid()) {
+ LOG(FATAL) << "Error after " << pass_name << ": " << Dumpable<GraphChecker>(checker);
}
}
}
@@ -665,6 +657,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
&& compiler_driver->RequiresConstructorBarrier(Thread::Current(),
dex_compilation_unit.GetDexFile(),
dex_compilation_unit.GetClassDefIndex());
+
HGraph* graph = new (arena) HGraph(
arena,
dex_file,
@@ -675,6 +668,21 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
compiler_driver->GetCompilerOptions().GetDebuggable(),
osr);
+ const uint8_t* interpreter_metadata = nullptr;
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> loader(hs.NewHandle(
+ soa.Decode<mirror::ClassLoader*>(class_loader)));
+ ArtMethod* art_method = compiler_driver->ResolveMethod(
+ soa, dex_cache, loader, &dex_compilation_unit, method_idx, invoke_type);
+ // We may not get a method, for example if its class is erroneous.
+ if (art_method != nullptr) {
+ graph->SetArtMethod(art_method);
+ interpreter_metadata = art_method->GetQuickenedInfo();
+ }
+ }
+
std::unique_ptr<CodeGenerator> codegen(
CodeGenerator::Create(graph,
instruction_set,
@@ -692,74 +700,55 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
visualizer_output_.get(),
compiler_driver);
- const uint8_t* interpreter_metadata = nullptr;
- {
- ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ClassLoader> loader(hs.NewHandle(
- soa.Decode<mirror::ClassLoader*>(class_loader)));
- ArtMethod* art_method = compiler_driver->ResolveMethod(
- soa, dex_cache, loader, &dex_compilation_unit, method_idx, invoke_type);
- // We may not get a method, for example if its class is erroneous.
- if (art_method != nullptr) {
- graph->SetArtMethod(art_method);
- interpreter_metadata = art_method->GetQuickenedInfo();
- }
- }
- HGraphBuilder builder(graph,
- &dex_compilation_unit,
- &dex_compilation_unit,
- &dex_file,
- compiler_driver,
- compilation_stats_.get(),
- interpreter_metadata,
- dex_cache);
-
VLOG(compiler) << "Building " << pass_observer.GetMethodName();
{
- PassScope scope(HGraphBuilder::kBuilderPassName, &pass_observer);
- if (!builder.BuildGraph(*code_item)) {
- pass_observer.SetGraphInBadState();
- return nullptr;
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScopeCollection handles(soa.Self());
+ // Do not hold `mutator_lock_` between optimizations.
+ ScopedThreadSuspension sts(soa.Self(), kNative);
+
+ {
+ PassScope scope(HGraphBuilder::kBuilderPassName, &pass_observer);
+ HGraphBuilder builder(graph,
+ &dex_compilation_unit,
+ &dex_compilation_unit,
+ &dex_file,
+ compiler_driver,
+ compilation_stats_.get(),
+ interpreter_metadata,
+ dex_cache);
+ GraphAnalysisResult result = builder.BuildGraph(*code_item, &handles);
+ if (result != kAnalysisSuccess) {
+ switch (result) {
+ case kAnalysisInvalidBytecode:
+ break;
+ case kAnalysisFailThrowCatchLoop:
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledThrowCatchLoop);
+ break;
+ case kAnalysisFailAmbiguousArrayOp:
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledAmbiguousArrayOp);
+ break;
+ case kAnalysisSuccess:
+ UNREACHABLE();
+ }
+ pass_observer.SetGraphInBadState();
+ return nullptr;
+ }
}
- }
- VLOG(compiler) << "Optimizing " << pass_observer.GetMethodName();
+ RunOptimizations(graph,
+ codegen.get(),
+ compiler_driver,
+ compilation_stats_.get(),
+ dex_compilation_unit,
+ &pass_observer,
+ &handles);
- ScopedObjectAccess soa(Thread::Current());
- StackHandleScopeCollection handles(soa.Self());
- ScopedThreadSuspension sts(soa.Self(), kNative);
-
- {
- PassScope scope(SsaBuilder::kSsaBuilderPassName, &pass_observer);
- GraphAnalysisResult result = graph->TryBuildingSsa(&handles);
- if (result != kAnalysisSuccess) {
- switch (result) {
- case kAnalysisFailThrowCatchLoop:
- MaybeRecordStat(MethodCompilationStat::kNotCompiledThrowCatchLoop);
- break;
- case kAnalysisFailAmbiguousArrayOp:
- MaybeRecordStat(MethodCompilationStat::kNotCompiledAmbiguousArrayOp);
- break;
- case kAnalysisSuccess:
- UNREACHABLE();
- }
- pass_observer.SetGraphInBadState();
- return nullptr;
- }
+ codegen->Compile(code_allocator);
+ pass_observer.DumpDisassembly();
}
- RunOptimizations(graph,
- codegen.get(),
- compiler_driver,
- compilation_stats_.get(),
- dex_compilation_unit,
- &pass_observer,
- &handles);
- codegen->Compile(code_allocator);
- pass_observer.DumpDisassembly();
-
if (kArenaAllocatorCountAllocations) {
if (arena->BytesAllocated() > 4 * MB) {
MemStats mem_stats(arena->GetMemStats());
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 5a910433b4..0c7648edc2 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -64,10 +64,12 @@ LiveInterval* BuildInterval(const size_t ranges[][2],
void RemoveSuspendChecks(HGraph* graph) {
for (HBasicBlock* block : graph->GetBlocks()) {
- for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* current = it.Current();
- if (current->IsSuspendCheck()) {
- current->GetBlock()->RemoveInstruction(current);
+ if (block != nullptr) {
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* current = it.Current();
+ if (current->IsSuspendCheck()) {
+ current->GetBlock()->RemoveInstruction(current);
+ }
}
}
}
@@ -83,12 +85,17 @@ inline HGraph* CreateGraph(ArenaAllocator* allocator) {
inline HGraph* CreateCFG(ArenaAllocator* allocator,
const uint16_t* data,
Primitive::Type return_type = Primitive::kPrimInt) {
- HGraph* graph = CreateGraph(allocator);
- HGraphBuilder builder(graph, return_type);
const DexFile::CodeItem* item =
reinterpret_cast<const DexFile::CodeItem*>(data);
- bool graph_built = builder.BuildGraph(*item);
- return graph_built ? graph : nullptr;
+ HGraph* graph = CreateGraph(allocator);
+
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScopeCollection handles(soa.Self());
+ HGraphBuilder builder(graph, return_type);
+ bool graph_built = (builder.BuildGraph(*item, &handles) == kAnalysisSuccess);
+ return graph_built ? graph : nullptr;
+ }
}
// Naive string diff data type.
@@ -114,12 +121,6 @@ inline bool IsRemoved(HInstruction* instruction) {
return instruction->GetBlock() == nullptr;
}
-inline void TransformToSsa(HGraph* graph) {
- ScopedObjectAccess soa(Thread::Current());
- StackHandleScopeCollection handles(soa.Self());
- EXPECT_EQ(graph->TryBuildingSsa(&handles), kAnalysisSuccess);
-}
-
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_
diff --git a/compiler/optimizing/pretty_printer_test.cc b/compiler/optimizing/pretty_printer_test.cc
index c56100dfa1..2de0c1be72 100644
--- a/compiler/optimizing/pretty_printer_test.cc
+++ b/compiler/optimizing/pretty_printer_test.cc
@@ -30,17 +30,15 @@ namespace art {
static void TestCode(const uint16_t* data, const char* expected) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
- HGraphBuilder builder(graph);
- const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
- bool graph_built = builder.BuildGraph(*item);
- ASSERT_TRUE(graph_built);
+ HGraph* graph = CreateCFG(&allocator, data);
StringPrettyPrinter printer(graph);
printer.VisitInsertionOrder();
ASSERT_STREQ(expected, printer.str().c_str());
}
-TEST(PrettyPrinterTest, ReturnVoid) {
+class PrettyPrinterTest : public CommonCompilerTest {};
+
+TEST_F(PrettyPrinterTest, ReturnVoid) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID);
@@ -56,7 +54,7 @@ TEST(PrettyPrinterTest, ReturnVoid) {
TestCode(data, expected);
}
-TEST(PrettyPrinterTest, CFG1) {
+TEST_F(PrettyPrinterTest, CFG1) {
const char* expected =
"BasicBlock 0, succ: 1\n"
" 3: SuspendCheck\n"
@@ -76,7 +74,7 @@ TEST(PrettyPrinterTest, CFG1) {
TestCode(data, expected);
}
-TEST(PrettyPrinterTest, CFG2) {
+TEST_F(PrettyPrinterTest, CFG2) {
const char* expected =
"BasicBlock 0, succ: 1\n"
" 4: SuspendCheck\n"
@@ -98,7 +96,7 @@ TEST(PrettyPrinterTest, CFG2) {
TestCode(data, expected);
}
-TEST(PrettyPrinterTest, CFG3) {
+TEST_F(PrettyPrinterTest, CFG3) {
const char* expected =
"BasicBlock 0, succ: 1\n"
" 4: SuspendCheck\n"
@@ -134,16 +132,16 @@ TEST(PrettyPrinterTest, CFG3) {
TestCode(data3, expected);
}
-TEST(PrettyPrinterTest, CFG4) {
+TEST_F(PrettyPrinterTest, CFG4) {
const char* expected =
- "BasicBlock 0, succ: 1\n"
+ "BasicBlock 0, succ: 3\n"
" 3: SuspendCheck\n"
- " 4: Goto 1\n"
- "BasicBlock 1, pred: 0, 1, succ: 1\n"
+ " 4: Goto 3\n"
+ "BasicBlock 1, pred: 3, 1, succ: 1\n"
" 0: SuspendCheck\n"
" 1: Goto 1\n"
- "BasicBlock 2\n"
- " 2: Exit\n";
+ "BasicBlock 3, pred: 0, succ: 1\n"
+ " 5: Goto 1\n";
const uint16_t data1[] = ZERO_REGISTER_CODE_ITEM(
Instruction::NOP,
@@ -157,15 +155,13 @@ TEST(PrettyPrinterTest, CFG4) {
TestCode(data2, expected);
}
-TEST(PrettyPrinterTest, CFG5) {
+TEST_F(PrettyPrinterTest, CFG5) {
const char* expected =
"BasicBlock 0, succ: 1\n"
" 3: SuspendCheck\n"
" 4: Goto 1\n"
- "BasicBlock 1, pred: 0, 2, succ: 3\n"
+ "BasicBlock 1, pred: 0, succ: 3\n"
" 0: ReturnVoid\n"
- "BasicBlock 2, succ: 1\n"
- " 1: Goto 1\n"
"BasicBlock 3, pred: 1\n"
" 2: Exit\n";
@@ -177,25 +173,23 @@ TEST(PrettyPrinterTest, CFG5) {
TestCode(data, expected);
}
-TEST(PrettyPrinterTest, CFG6) {
+TEST_F(PrettyPrinterTest, CFG6) {
const char* expected =
"BasicBlock 0, succ: 1\n"
- " 0: Local [4, 3, 2]\n"
- " 1: IntConstant [2]\n"
+ " 1: IntConstant [5, 5]\n"
" 10: SuspendCheck\n"
" 11: Goto 1\n"
- "BasicBlock 1, pred: 0, succ: 3, 2\n"
- " 2: StoreLocal(0, 1)\n"
- " 3: LoadLocal(0) [5]\n"
- " 4: LoadLocal(0) [5]\n"
- " 5: Equal(3, 4) [6]\n"
+ "BasicBlock 1, pred: 0, succ: 5, 2\n"
+ " 5: Equal(1, 1) [6]\n"
" 6: If(5)\n"
"BasicBlock 2, pred: 1, succ: 3\n"
" 7: Goto 3\n"
- "BasicBlock 3, pred: 1, 2, succ: 4\n"
+ "BasicBlock 3, pred: 5, 2, succ: 4\n"
" 8: ReturnVoid\n"
"BasicBlock 4, pred: 3\n"
- " 9: Exit\n";
+ " 9: Exit\n"
+ "BasicBlock 5, pred: 1, succ: 3\n"
+ " 12: Goto 3\n";
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
@@ -206,26 +200,24 @@ TEST(PrettyPrinterTest, CFG6) {
TestCode(data, expected);
}
-TEST(PrettyPrinterTest, CFG7) {
+TEST_F(PrettyPrinterTest, CFG7) {
const char* expected =
"BasicBlock 0, succ: 1\n"
- " 0: Local [4, 3, 2]\n"
- " 1: IntConstant [2]\n"
+ " 1: IntConstant [5, 5]\n"
" 11: SuspendCheck\n"
" 12: Goto 1\n"
- "BasicBlock 1, pred: 0, succ: 3, 2\n"
- " 2: StoreLocal(0, 1)\n"
- " 3: LoadLocal(0) [5]\n"
- " 4: LoadLocal(0) [5]\n"
- " 5: Equal(3, 4) [6]\n"
+ "BasicBlock 1, pred: 0, succ: 5, 6\n"
+ " 5: Equal(1, 1) [6]\n"
" 6: If(5)\n"
- "BasicBlock 2, pred: 1, 3, succ: 3\n"
+ "BasicBlock 2, pred: 6, 3, succ: 3\n"
" 7: Goto 3\n"
- "BasicBlock 3, pred: 1, 2, succ: 2\n"
+ "BasicBlock 3, pred: 5, 2, succ: 2\n"
" 8: SuspendCheck\n"
" 9: Goto 2\n"
- "BasicBlock 4\n"
- " 10: Exit\n";
+ "BasicBlock 5, pred: 1, succ: 3\n"
+ " 13: Goto 3\n"
+ "BasicBlock 6, pred: 1, succ: 2\n"
+ " 14: Goto 2\n";
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
@@ -236,15 +228,13 @@ TEST(PrettyPrinterTest, CFG7) {
TestCode(data, expected);
}
-TEST(PrettyPrinterTest, IntConstant) {
+TEST_F(PrettyPrinterTest, IntConstant) {
const char* expected =
"BasicBlock 0, succ: 1\n"
- " 0: Local [2]\n"
- " 1: IntConstant [2]\n"
+ " 1: IntConstant\n"
" 5: SuspendCheck\n"
" 6: Goto 1\n"
"BasicBlock 1, pred: 0, succ: 2\n"
- " 2: StoreLocal(0, 1)\n"
" 3: ReturnVoid\n"
"BasicBlock 2, pred: 1\n"
" 4: Exit\n";
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 1224a48fa0..deaa415ed4 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -55,10 +55,12 @@ class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor {
public:
RTPVisitor(HGraph* graph,
HandleCache* handle_cache,
- ArenaVector<HInstruction*>* worklist)
+ ArenaVector<HInstruction*>* worklist,
+ bool is_first_run)
: HGraphDelegateVisitor(graph),
handle_cache_(handle_cache),
- worklist_(worklist) {}
+ worklist_(worklist),
+ is_first_run_(is_first_run) {}
void VisitNewInstance(HNewInstance* new_instance) OVERRIDE;
void VisitLoadClass(HLoadClass* load_class) OVERRIDE;
@@ -86,14 +88,17 @@ class ReferenceTypePropagation::RTPVisitor : public HGraphDelegateVisitor {
private:
HandleCache* handle_cache_;
ArenaVector<HInstruction*>* worklist_;
+ const bool is_first_run_;
};
ReferenceTypePropagation::ReferenceTypePropagation(HGraph* graph,
StackHandleScopeCollection* handles,
+ bool is_first_run,
const char* name)
: HOptimization(graph, name),
handle_cache_(handles),
- worklist_(graph->GetArena()->Adapter(kArenaAllocReferenceTypePropagation)) {
+ worklist_(graph->GetArena()->Adapter(kArenaAllocReferenceTypePropagation)),
+ is_first_run_(is_first_run) {
}
void ReferenceTypePropagation::ValidateTypes() {
@@ -125,7 +130,7 @@ void ReferenceTypePropagation::ValidateTypes() {
}
void ReferenceTypePropagation::Visit(HInstruction* instruction) {
- RTPVisitor visitor(graph_, &handle_cache_, &worklist_);
+ RTPVisitor visitor(graph_, &handle_cache_, &worklist_, is_first_run_);
instruction->Accept(&visitor);
}
@@ -144,7 +149,7 @@ void ReferenceTypePropagation::Run() {
}
void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) {
- RTPVisitor visitor(graph_, &handle_cache_, &worklist_);
+ RTPVisitor visitor(graph_, &handle_cache_, &worklist_, is_first_run_);
// Handle Phis first as there might be instructions in the same block who depend on them.
for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
VisitPhi(it.Current()->AsPhi());
@@ -620,6 +625,7 @@ void ReferenceTypePropagation::RTPVisitor::VisitCheckCast(HCheckCast* check_cast
DCHECK_EQ(bound_type->InputAt(0), check_cast->InputAt(0));
if (class_rti.IsValid()) {
+ DCHECK(is_first_run_);
// This is the first run of RTP and class is resolved.
bound_type->SetUpperBound(class_rti, /* CheckCast succeeds for nulls. */ true);
} else {
@@ -636,6 +642,12 @@ void ReferenceTypePropagation::VisitPhi(HPhi* phi) {
}
if (phi->GetBlock()->IsLoopHeader()) {
+ if (!is_first_run_ && graph_->IsCompilingOsr()) {
+ // Don't update the type of a loop phi when compiling OSR: we may have done
+ // speculative optimizations dominating that phi, that do not hold at the
+ // point the interpreter jumps to that loop header.
+ return;
+ }
ScopedObjectAccess soa(Thread::Current());
// Set the initial type for the phi. Use the non back edge input for reaching
// a fixed point faster.
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index a7f10a65ab..028a6fc514 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -33,6 +33,7 @@ class ReferenceTypePropagation : public HOptimization {
public:
ReferenceTypePropagation(HGraph* graph,
StackHandleScopeCollection* handles,
+ bool is_first_run,
const char* name = kReferenceTypePropagationPassName);
// Visit a single instruction.
@@ -93,6 +94,8 @@ class ReferenceTypePropagation : public HOptimization {
ArenaVector<HInstruction*> worklist_;
+ // Whether this reference type propagation is the first run we are doing.
+ const bool is_first_run_;
static constexpr size_t kDefaultWorklistSize = 8;
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 5cd30adb45..b8d76b912e 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -994,10 +994,6 @@ bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
return false;
}
- // We use the first use to compare with other intervals. If this interval
- // is used after any active intervals, we will spill this interval.
- size_t first_use = current->FirstUseAfter(current->GetStart());
-
// First set all registers as not being used.
size_t* next_use = registers_array_;
for (size_t i = 0; i < number_of_registers_; ++i) {
@@ -1011,7 +1007,7 @@ bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
if (active->IsFixed()) {
next_use[active->GetRegister()] = current->GetStart();
} else {
- size_t use = active->FirstUseAfter(current->GetStart());
+ size_t use = active->FirstRegisterUseAfter(current->GetStart());
if (use != kNoLifetime) {
next_use[active->GetRegister()] = use;
}
@@ -1052,16 +1048,16 @@ bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
DCHECK(current->IsHighInterval());
reg = current->GetRegister();
// When allocating the low part, we made sure the high register was available.
- DCHECK_LT(first_use, next_use[reg]);
+ DCHECK_LT(first_register_use, next_use[reg]);
} else if (current->IsLowInterval()) {
- reg = FindAvailableRegisterPair(next_use, first_use);
+ reg = FindAvailableRegisterPair(next_use, first_register_use);
// We should spill if both registers are not available.
- should_spill = (first_use >= next_use[reg])
- || (first_use >= next_use[GetHighForLowRegister(reg)]);
+ should_spill = (first_register_use >= next_use[reg])
+ || (first_register_use >= next_use[GetHighForLowRegister(reg)]);
} else {
DCHECK(!current->IsHighInterval());
reg = FindAvailableRegister(next_use, current);
- should_spill = (first_use >= next_use[reg]);
+ should_spill = (first_register_use >= next_use[reg]);
}
DCHECK_NE(reg, kNoRegister);
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 572faa841e..a9de7c3e59 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -38,11 +38,7 @@ class RegisterAllocatorTest : public CommonCompilerTest {};
static bool Check(const uint16_t* data) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
- HGraphBuilder builder(graph);
- const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
- builder.BuildGraph(*item);
- TransformToSsa(graph);
+ HGraph* graph = CreateCFG(&allocator, data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
@@ -254,15 +250,6 @@ TEST_F(RegisterAllocatorTest, Loop2) {
ASSERT_TRUE(Check(data));
}
-static HGraph* BuildSSAGraph(const uint16_t* data, ArenaAllocator* allocator) {
- HGraph* graph = CreateGraph(allocator);
- HGraphBuilder builder(graph);
- const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
- builder.BuildGraph(*item);
- TransformToSsa(graph);
- return graph;
-}
-
TEST_F(RegisterAllocatorTest, Loop3) {
/*
* Test the following snippet:
@@ -302,7 +289,7 @@ TEST_F(RegisterAllocatorTest, Loop3) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = BuildSSAGraph(data, &allocator);
+ HGraph* graph = CreateCFG(&allocator, data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
@@ -336,7 +323,7 @@ TEST_F(RegisterAllocatorTest, FirstRegisterUse) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = BuildSSAGraph(data, &allocator);
+ HGraph* graph = CreateCFG(&allocator, data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
@@ -390,7 +377,7 @@ TEST_F(RegisterAllocatorTest, DeadPhi) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = BuildSSAGraph(data, &allocator);
+ HGraph* graph = CreateCFG(&allocator, data);
SsaDeadPhiElimination(graph).Run();
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
@@ -414,7 +401,7 @@ TEST_F(RegisterAllocatorTest, FreeUntil) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = BuildSSAGraph(data, &allocator);
+ HGraph* graph = CreateCFG(&allocator, data);
SsaDeadPhiElimination(graph).Run();
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 165d09d1a5..43f2499b24 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -430,8 +430,6 @@ void SsaBuilder::RemoveRedundantUninitializedStrings() {
}
for (HNewInstance* new_instance : uninitialized_strings_) {
- DCHECK(new_instance->IsStringAlloc());
-
// Replace NewInstance of String with NullConstant if not used prior to
// calling StringFactory. In case of deoptimization, the interpreter is
// expected to skip null check on the `this` argument of the StringFactory call.
@@ -440,10 +438,26 @@ void SsaBuilder::RemoveRedundantUninitializedStrings() {
new_instance->GetBlock()->RemoveInstruction(new_instance);
// Remove LoadClass if not needed any more.
- HLoadClass* load_class = new_instance->InputAt(0)->AsLoadClass();
+ HInstruction* input = new_instance->InputAt(0);
+ HLoadClass* load_class = nullptr;
+
+ // If the class was not present in the dex cache at the point of building
+ // the graph, the builder inserted a HClinitCheck in between. Since the String
+ // class is always initialized at the point of running Java code, we can remove
+ // that check.
+ if (input->IsClinitCheck()) {
+ load_class = input->InputAt(0)->AsLoadClass();
+ input->ReplaceWith(load_class);
+ input->GetBlock()->RemoveInstruction(input);
+ } else {
+ load_class = input->AsLoadClass();
+ DCHECK(new_instance->IsStringAlloc());
+ DCHECK(!load_class->NeedsAccessCheck()) << "String class is always accessible";
+ }
DCHECK(load_class != nullptr);
- DCHECK(!load_class->NeedsAccessCheck()) << "String class is always accessible";
if (!load_class->HasUses()) {
+ // Even if the HLoadClass needs access check, we can remove it, as we know the
+ // String class does not need it.
load_class->GetBlock()->RemoveInstruction(load_class);
}
}
@@ -451,6 +465,8 @@ void SsaBuilder::RemoveRedundantUninitializedStrings() {
}
GraphAnalysisResult SsaBuilder::BuildSsa() {
+ DCHECK(!GetGraph()->IsInSsaForm());
+
// 1) Visit in reverse post order. We need to have all predecessors of a block
// visited (with the exception of loops) in order to create the right environment
// for that block. For loops, we create phis whose inputs will be set in 2).
@@ -483,7 +499,7 @@ GraphAnalysisResult SsaBuilder::BuildSsa() {
// 6) Compute type of reference type instructions. The pass assumes that
// NullConstant has been fixed up.
- ReferenceTypePropagation(GetGraph(), handles_).Run();
+ ReferenceTypePropagation(GetGraph(), handles_, /* is_first_run */ true).Run();
// 7) Step 1) duplicated ArrayGet instructions with ambiguous type (int/float
// or long/double) and marked ArraySets with ambiguous input type. Now that RTP
@@ -533,6 +549,7 @@ GraphAnalysisResult SsaBuilder::BuildSsa() {
}
}
+ GetGraph()->SetInSsaForm();
return kAnalysisSuccess;
}
@@ -899,11 +916,6 @@ void SsaBuilder::VisitInstruction(HInstruction* instruction) {
}
}
-void SsaBuilder::VisitTemporary(HTemporary* temp) {
- // Temporaries are only used by the baseline register allocator.
- temp->GetBlock()->RemoveInstruction(temp);
-}
-
void SsaBuilder::VisitArrayGet(HArrayGet* aget) {
Primitive::Type type = aget->GetType();
DCHECK(!Primitive::IsFloatingPointType(type));
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index ccef8ea380..2dae9c2de0 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -75,13 +75,10 @@ class SsaBuilder : public HGraphVisitor {
void VisitLoadLocal(HLoadLocal* load) OVERRIDE;
void VisitStoreLocal(HStoreLocal* store) OVERRIDE;
void VisitInstruction(HInstruction* instruction) OVERRIDE;
- void VisitTemporary(HTemporary* instruction) OVERRIDE;
void VisitArrayGet(HArrayGet* aget) OVERRIDE;
void VisitArraySet(HArraySet* aset) OVERRIDE;
void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE;
- static constexpr const char* kSsaBuilderPassName = "ssa_builder";
-
private:
void SetLoopHeaderPhiInputs();
void FixEnvironmentPhis();
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 1dd35080a6..83e9dacb1a 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -226,7 +226,7 @@ void SsaLivenessAnalysis::ComputeLiveRanges() {
// The only instructions which may not be recorded in the environments
// are constants created by the SSA builder as typed equivalents of
// untyped constants from the bytecode, or phis with only such constants
- // as inputs (verified by SSAChecker). Their raw binary value must
+ // as inputs (verified by GraphChecker). Their raw binary value must
// therefore be the same and we only need to keep alive one.
} else {
size_t phi_input_index = successor->GetPredecessorIndexOf(block);
diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc
index d2885a8fd7..a6880921c5 100644
--- a/compiler/optimizing/ssa_test.cc
+++ b/compiler/optimizing/ssa_test.cc
@@ -79,13 +79,7 @@ static void ReNumberInstructions(HGraph* graph) {
static void TestCode(const uint16_t* data, const char* expected) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
- HGraphBuilder builder(graph);
- const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
- bool graph_built = builder.BuildGraph(*item);
- ASSERT_TRUE(graph_built);
-
- TransformToSsa(graph);
+ HGraph* graph = CreateCFG(&allocator, data);
// Suspend checks implementation may change in the future, and this test relies
// on how instructions are ordered.
RemoveSuspendChecks(graph);
diff --git a/compiler/optimizing/suspend_check_test.cc b/compiler/optimizing/suspend_check_test.cc
index b6c704c1b1..15cd4e8a08 100644
--- a/compiler/optimizing/suspend_check_test.cc
+++ b/compiler/optimizing/suspend_check_test.cc
@@ -18,6 +18,7 @@
#include "dex_instruction.h"
#include "nodes.h"
#include "optimizing_unit_test.h"
+#include "pretty_printer.h"
#include "gtest/gtest.h"
@@ -30,20 +31,17 @@ namespace art {
static void TestCode(const uint16_t* data) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
- HGraphBuilder builder(graph);
- const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
- bool graph_built = builder.BuildGraph(*item);
- ASSERT_TRUE(graph_built);
-
- HBasicBlock* first_block = graph->GetEntryBlock()->GetSuccessors()[0];
- HInstruction* first_instruction = first_block->GetFirstInstruction();
- // Account for some tests having a store local as first instruction.
- ASSERT_TRUE(first_instruction->IsSuspendCheck()
- || first_instruction->GetNext()->IsSuspendCheck());
+ HGraph* graph = CreateCFG(&allocator, data);
+ HBasicBlock* first_block = graph->GetEntryBlock()->GetSingleSuccessor();
+ HBasicBlock* loop_header = first_block->GetSingleSuccessor();
+ ASSERT_TRUE(loop_header->IsLoopHeader());
+ ASSERT_EQ(loop_header->GetLoopInformation()->GetPreHeader(), first_block);
+ ASSERT_TRUE(loop_header->GetFirstInstruction()->IsSuspendCheck());
}
-TEST(CodegenTest, CFG1) {
+class SuspendCheckTest : public CommonCompilerTest {};
+
+TEST_F(SuspendCheckTest, CFG1) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::NOP,
Instruction::GOTO | 0xFF00);
@@ -51,14 +49,14 @@ TEST(CodegenTest, CFG1) {
TestCode(data);
}
-TEST(CodegenTest, CFG2) {
+TEST_F(SuspendCheckTest, CFG2) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO_32, 0, 0);
TestCode(data);
}
-TEST(CodegenTest, CFG3) {
+TEST_F(SuspendCheckTest, CFG3) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 0xFFFF,
@@ -67,7 +65,7 @@ TEST(CodegenTest, CFG3) {
TestCode(data);
}
-TEST(CodegenTest, CFG4) {
+TEST_F(SuspendCheckTest, CFG4) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_NE, 0xFFFF,
@@ -76,7 +74,7 @@ TEST(CodegenTest, CFG4) {
TestCode(data);
}
-TEST(CodegenTest, CFG5) {
+TEST_F(SuspendCheckTest, CFG5) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQZ, 0xFFFF,
@@ -85,7 +83,7 @@ TEST(CodegenTest, CFG5) {
TestCode(data);
}
-TEST(CodegenTest, CFG6) {
+TEST_F(SuspendCheckTest, CFG6) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_NEZ, 0xFFFF,
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 7138a46890..3efef70f77 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -326,6 +326,14 @@ void X86Assembler::cmovl(Condition condition, Register dst, Register src) {
}
+void X86Assembler::cmovl(Condition condition, Register dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x40 + condition);
+ EmitOperand(dst, src);
+}
+
+
void X86Assembler::setb(Condition condition, Register dst) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x0F);
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 759a41e80e..00ff7bdbbd 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -363,6 +363,7 @@ class X86Assembler FINAL : public Assembler {
void leal(Register dst, const Address& src);
void cmovl(Condition condition, Register dst, Register src);
+ void cmovl(Condition condition, Register dst, const Address& src);
void setb(Condition condition, Register dst);
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index 0fd098227a..d0d51473fe 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -332,6 +332,21 @@ TEST_F(AssemblerX86Test, UComisdAddr) {
}
+TEST_F(AssemblerX86Test, CmovlAddress) {
+ GetAssembler()->cmovl(x86::kEqual, x86::Register(x86::EAX), x86::Address(
+ x86::Register(x86::EDI), x86::Register(x86::EBX), x86::TIMES_4, 12));
+ GetAssembler()->cmovl(x86::kNotEqual, x86::Register(x86::EDI), x86::Address(
+ x86::Register(x86::ESI), x86::Register(x86::EBX), x86::TIMES_4, 12));
+ GetAssembler()->cmovl(x86::kEqual, x86::Register(x86::EDI), x86::Address(
+ x86::Register(x86::EDI), x86::Register(x86::EAX), x86::TIMES_4, 12));
+ const char* expected =
+ "cmovzl 0xc(%EDI,%EBX,4), %eax\n"
+ "cmovnzl 0xc(%ESI,%EBX,4), %edi\n"
+ "cmovzl 0xc(%EDI,%EAX,4), %edi\n";
+
+ DriverStr(expected, "cmovl_address");
+}
+
/////////////////
// Near labels //
/////////////////
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 10f5a005e1..d86ad1be5f 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -223,6 +223,19 @@ void X86_64Assembler::cmov(Condition c, CpuRegister dst, CpuRegister src, bool i
}
+void X86_64Assembler::cmov(Condition c, CpuRegister dst, const Address& src, bool is64bit) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ if (is64bit) {
+ EmitRex64(dst, src);
+ } else {
+ EmitOptionalRex32(dst, src);
+ }
+ EmitUint8(0x0F);
+ EmitUint8(0x40 + c);
+ EmitOperand(dst.LowBits(), src);
+}
+
+
void X86_64Assembler::movzxb(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalByteRegNormalizingRex32(dst, src);
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 6f0847eb61..f00cb12413 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -366,6 +366,7 @@ class X86_64Assembler FINAL : public Assembler {
void cmov(Condition c, CpuRegister dst, CpuRegister src); // This is the 64b version.
void cmov(Condition c, CpuRegister dst, CpuRegister src, bool is64bit);
+ void cmov(Condition c, CpuRegister dst, const Address& src, bool is64bit);
void movzxb(CpuRegister dst, CpuRegister src);
void movzxb(CpuRegister dst, const Address& src);
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 8a87fca96a..4f65709810 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -1371,6 +1371,37 @@ TEST_F(AssemblerX86_64Test, PopcntqAddress) {
DriverStr(expected, "popcntq_address");
}
+TEST_F(AssemblerX86_64Test, CmovlAddress) {
+ GetAssembler()->cmov(x86_64::kEqual, x86_64::CpuRegister(x86_64::R10), x86_64::Address(
+ x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::RBX), x86_64::TIMES_4, 12), false);
+ GetAssembler()->cmov(x86_64::kNotEqual, x86_64::CpuRegister(x86_64::RDI), x86_64::Address(
+ x86_64::CpuRegister(x86_64::R10), x86_64::CpuRegister(x86_64::RBX), x86_64::TIMES_4, 12), false);
+ GetAssembler()->cmov(x86_64::kEqual, x86_64::CpuRegister(x86_64::RDI), x86_64::Address(
+ x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_4, 12), false);
+ const char* expected =
+ "cmovzl 0xc(%RDI,%RBX,4), %R10d\n"
+ "cmovnzl 0xc(%R10,%RBX,4), %edi\n"
+ "cmovzl 0xc(%RDI,%R9,4), %edi\n";
+
+ DriverStr(expected, "cmovl_address");
+}
+
+TEST_F(AssemblerX86_64Test, CmovqAddress) {
+ GetAssembler()->cmov(x86_64::kEqual, x86_64::CpuRegister(x86_64::R10), x86_64::Address(
+ x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::RBX), x86_64::TIMES_4, 12), true);
+ GetAssembler()->cmov(x86_64::kNotEqual, x86_64::CpuRegister(x86_64::RDI), x86_64::Address(
+ x86_64::CpuRegister(x86_64::R10), x86_64::CpuRegister(x86_64::RBX), x86_64::TIMES_4, 12), true);
+ GetAssembler()->cmov(x86_64::kEqual, x86_64::CpuRegister(x86_64::RDI), x86_64::Address(
+ x86_64::CpuRegister(x86_64::RDI), x86_64::CpuRegister(x86_64::R9), x86_64::TIMES_4, 12), true);
+ const char* expected =
+ "cmovzq 0xc(%RDI,%RBX,4), %R10\n"
+ "cmovnzq 0xc(%R10,%RBX,4), %rdi\n"
+ "cmovzq 0xc(%RDI,%R9,4), %rdi\n";
+
+ DriverStr(expected, "cmovq_address");
+}
+
+
/////////////////
// Near labels //
/////////////////
diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc
index 5e2cf6b81d..0e709eb419 100644
--- a/disassembler/disassembler_arm.cc
+++ b/disassembler/disassembler_arm.cc
@@ -1152,8 +1152,10 @@ size_t DisassemblerArm::DumpThumb32(std::ostream& os, const uint8_t* instr_ptr)
args << Rd << ", #" << imm16;
break;
}
- case 0x16: {
+ case 0x16: case 0x14: case 0x1C: {
// BFI Rd, Rn, #lsb, #width - 111 10 0 11 011 0 nnnn 0 iii dddd ii 0 iiiii
+ // SBFX Rd, Rn, #lsb, #width - 111 10 0 11 010 0 nnnn 0 iii dddd ii 0 iiiii
+ // UBFX Rd, Rn, #lsb, #width - 111 10 0 11 110 0 nnnn 0 iii dddd ii 0 iiiii
ArmRegister Rd(instr, 8);
ArmRegister Rn(instr, 16);
uint32_t msb = instr & 0x1F;
@@ -1161,12 +1163,21 @@ size_t DisassemblerArm::DumpThumb32(std::ostream& os, const uint8_t* instr_ptr)
uint32_t imm3 = (instr >> 12) & 0x7;
uint32_t lsb = (imm3 << 2) | imm2;
uint32_t width = msb - lsb + 1;
- if (Rn.r != 0xF) {
- opcode << "bfi";
- args << Rd << ", " << Rn << ", #" << lsb << ", #" << width;
+ if (op3 == 0x16) {
+ if (Rn.r != 0xF) {
+ opcode << "bfi";
+ args << Rd << ", " << Rn << ", #" << lsb << ", #" << width;
+ } else {
+ opcode << "bfc";
+ args << Rd << ", #" << lsb << ", #" << width;
+ }
} else {
- opcode << "bfc";
- args << Rd << ", #" << lsb << ", #" << width;
+ opcode << ((op3 & 0x8) != 0u ? "ubfx" : "sbfx");
+ args << Rd << ", " << Rn << ", #" << lsb << ", #" << width;
+ if (Rd.r == 13 || Rd.r == 15 || Rn.r == 13 || Rn.r == 15 ||
+ (instr & 0x04000020) != 0u) {
+ args << " (UNPREDICTABLE)";
+ }
}
break;
}
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 1668dc5f25..1d80bda258 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -150,30 +150,17 @@ static bool FinishFile(File* file, bool close) {
}
}
-bool PatchOat::Patch(File* input_oat, const std::string& image_location, off_t delta,
- File* output_oat, File* output_image, InstructionSet isa,
- TimingLogger* timings,
- bool output_oat_opened_from_fd ATTRIBUTE_UNUSED,
- bool new_oat_out) {
+bool PatchOat::Patch(const std::string& image_location,
+ off_t delta,
+ const std::string& output_directory,
+ InstructionSet isa,
+ TimingLogger* timings) {
CHECK(Runtime::Current() == nullptr);
- CHECK(output_image != nullptr);
- CHECK_GE(output_image->Fd(), 0);
- CHECK(input_oat != nullptr);
- CHECK(output_oat != nullptr);
- CHECK_GE(input_oat->Fd(), 0);
- CHECK_GE(output_oat->Fd(), 0);
CHECK(!image_location.empty()) << "image file must have a filename.";
TimingLogger::ScopedTiming t("Runtime Setup", timings);
- if (isa == kNone) {
- Elf32_Ehdr elf_hdr;
- if (sizeof(elf_hdr) != input_oat->Read(reinterpret_cast<char*>(&elf_hdr), sizeof(elf_hdr), 0)) {
- LOG(ERROR) << "unable to read elf header";
- return false;
- }
- isa = GetInstructionSetFromELF(elf_hdr.e_machine, elf_hdr.e_flags);
- }
+ CHECK_NE(isa, kNone);
const char* isa_name = GetInstructionSetString(isa);
// Set up the runtime
@@ -193,8 +180,6 @@ bool PatchOat::Patch(File* input_oat, const std::string& image_location, off_t d
Thread::Current()->TransitionFromRunnableToSuspended(kNative);
ScopedObjectAccess soa(Thread::Current());
- std::string output_directory =
- output_image->GetPath().substr(0, output_image->GetPath().find_last_of("/"));
t.NewTiming("Image and oat Patching setup");
std::vector<gc::space::ImageSpace*> spaces = Runtime::Current()->GetHeap()->GetBootImageSpaces();
std::map<gc::space::ImageSpace*, std::unique_ptr<File>> space_to_file_map;
@@ -325,6 +310,7 @@ bool PatchOat::Patch(File* input_oat, const std::string& image_location, off_t d
std::string output_image_filename = output_directory +
(StartsWith(converted_image_filename, "/") ? "" : "/") +
converted_image_filename;
+ bool new_oat_out;
std::unique_ptr<File>
output_image_file(CreateOrOpen(output_image_filename.c_str(), &new_oat_out));
if (output_image_file.get() == nullptr) {
@@ -932,21 +918,9 @@ NO_RETURN static void Usage(const char *fmt, ...) {
UsageError(" --output-image-file=<file.art>: Specifies the exact file to write the patched");
UsageError(" image file to.");
UsageError("");
- UsageError(" --output-image-fd=<file-descriptor>: Specifies the file-descriptor to write the");
- UsageError(" the patched image file to.");
- UsageError("");
- UsageError(" --orig-base-offset=<original-base-offset>: Specify the base offset the input file");
- UsageError(" was compiled with. This is needed if one is specifying a --base-offset");
- UsageError("");
- UsageError(" --base-offset=<new-base-offset>: Specify the base offset we will repatch the");
- UsageError(" given files to use. This requires that --orig-base-offset is also given.");
- UsageError("");
UsageError(" --base-offset-delta=<delta>: Specify the amount to change the old base-offset by.");
UsageError(" This value may be negative.");
UsageError("");
- UsageError(" --patched-image-file=<file.art>: Relocate the oat file to be the same as the");
- UsageError(" given image file.");
- UsageError("");
UsageError(" --patched-image-location=<file.art>: Relocate the oat file to be the same as the");
UsageError(" image at the given location. If used one must also specify the");
UsageError(" --instruction-set flag. It will search for this image in the same way that");
@@ -992,207 +966,75 @@ static bool ReadBaseDelta(const char* name, off_t* delta, std::string* error_msg
return true;
}
-static int patchoat(int argc, char **argv) {
- InitLogging(argv);
- MemMap::Init();
- const bool debug = kIsDebugBuild;
- orig_argc = argc;
- orig_argv = argv;
- TimingLogger timings("patcher", false, false);
+static int patchoat_image(TimingLogger& timings,
+ InstructionSet isa,
+ const std::string& input_image_location,
+ const std::string& output_image_filename,
+ off_t base_delta,
+ bool base_delta_set,
+ bool debug) {
+ CHECK(!input_image_location.empty());
+ if (output_image_filename.empty()) {
+ Usage("Image patching requires --output-image-file");
+ }
- InitLogging(argv);
+ if (!base_delta_set) {
+ Usage("Must supply a desired new offset or delta.");
+ }
- // Skip over the command name.
- argv++;
- argc--;
+ if (!IsAligned<kPageSize>(base_delta)) {
+ Usage("Base offset/delta must be aligned to a pagesize (0x%08x) boundary.", kPageSize);
+ }
- if (argc == 0) {
- Usage("No arguments specified");
+ if (debug) {
+ LOG(INFO) << "moving offset by " << base_delta
+ << " (0x" << std::hex << base_delta << ") bytes or "
+ << std::dec << (base_delta/kPageSize) << " pages.";
}
- timings.StartTiming("Patchoat");
+ TimingLogger::ScopedTiming pt("patch image and oat", &timings);
- // cmd line args
- bool isa_set = false;
- InstructionSet isa = kNone;
- std::string input_oat_filename;
- std::string input_oat_location;
- int input_oat_fd = -1;
- bool have_input_oat = false;
- std::string input_image_location;
- std::string output_oat_filename;
- int output_oat_fd = -1;
- bool have_output_oat = false;
- std::string output_image_filename;
- int output_image_fd = -1;
- bool have_output_image = false;
- uintptr_t base_offset = 0;
- bool base_offset_set = false;
- uintptr_t orig_base_offset = 0;
- bool orig_base_offset_set = false;
- off_t base_delta = 0;
- bool base_delta_set = false;
- bool match_delta = false;
- std::string patched_image_filename;
- std::string patched_image_location;
- bool dump_timings = kIsDebugBuild;
- bool lock_output = true;
+ std::string output_directory =
+ output_image_filename.substr(0, output_image_filename.find_last_of("/"));
+ bool ret = PatchOat::Patch(input_image_location, base_delta, output_directory, isa, &timings);
- for (int i = 0; i < argc; ++i) {
- const StringPiece option(argv[i]);
- const bool log_options = false;
- if (log_options) {
- LOG(INFO) << "patchoat: option[" << i << "]=" << argv[i];
- }
- if (option.starts_with("--instruction-set=")) {
- isa_set = true;
- const char* isa_str = option.substr(strlen("--instruction-set=")).data();
- isa = GetInstructionSetFromString(isa_str);
- if (isa == kNone) {
- Usage("Unknown or invalid instruction set %s", isa_str);
- }
- } else if (option.starts_with("--input-oat-location=")) {
- if (have_input_oat) {
- Usage("Only one of --input-oat-file, --input-oat-location and --input-oat-fd may be used.");
- }
- have_input_oat = true;
- input_oat_location = option.substr(strlen("--input-oat-location=")).data();
- } else if (option.starts_with("--input-oat-file=")) {
- if (have_input_oat) {
- Usage("Only one of --input-oat-file, --input-oat-location and --input-oat-fd may be used.");
- }
- have_input_oat = true;
- input_oat_filename = option.substr(strlen("--input-oat-file=")).data();
- } else if (option.starts_with("--input-oat-fd=")) {
- if (have_input_oat) {
- Usage("Only one of --input-oat-file, --input-oat-location and --input-oat-fd may be used.");
- }
- have_input_oat = true;
- const char* oat_fd_str = option.substr(strlen("--input-oat-fd=")).data();
- if (!ParseInt(oat_fd_str, &input_oat_fd)) {
- Usage("Failed to parse --input-oat-fd argument '%s' as an integer", oat_fd_str);
- }
- if (input_oat_fd < 0) {
- Usage("--input-oat-fd pass a negative value %d", input_oat_fd);
- }
- } else if (option.starts_with("--input-image-location=")) {
- input_image_location = option.substr(strlen("--input-image-location=")).data();
- } else if (option.starts_with("--output-oat-file=")) {
- if (have_output_oat) {
- Usage("Only one of --output-oat-file, and --output-oat-fd may be used.");
- }
- have_output_oat = true;
- output_oat_filename = option.substr(strlen("--output-oat-file=")).data();
- } else if (option.starts_with("--output-oat-fd=")) {
- if (have_output_oat) {
- Usage("Only one of --output-oat-file, --output-oat-fd may be used.");
- }
- have_output_oat = true;
- const char* oat_fd_str = option.substr(strlen("--output-oat-fd=")).data();
- if (!ParseInt(oat_fd_str, &output_oat_fd)) {
- Usage("Failed to parse --output-oat-fd argument '%s' as an integer", oat_fd_str);
- }
- if (output_oat_fd < 0) {
- Usage("--output-oat-fd pass a negative value %d", output_oat_fd);
- }
- } else if (option.starts_with("--output-image-file=")) {
- if (have_output_image) {
- Usage("Only one of --output-image-file, and --output-image-fd may be used.");
- }
- have_output_image = true;
- output_image_filename = option.substr(strlen("--output-image-file=")).data();
- } else if (option.starts_with("--output-image-fd=")) {
- if (have_output_image) {
- Usage("Only one of --output-image-file, and --output-image-fd may be used.");
- }
- have_output_image = true;
- const char* image_fd_str = option.substr(strlen("--output-image-fd=")).data();
- if (!ParseInt(image_fd_str, &output_image_fd)) {
- Usage("Failed to parse --output-image-fd argument '%s' as an integer", image_fd_str);
- }
- if (output_image_fd < 0) {
- Usage("--output-image-fd pass a negative value %d", output_image_fd);
- }
- } else if (option.starts_with("--orig-base-offset=")) {
- const char* orig_base_offset_str = option.substr(strlen("--orig-base-offset=")).data();
- orig_base_offset_set = true;
- if (!ParseUint(orig_base_offset_str, &orig_base_offset)) {
- Usage("Failed to parse --orig-base-offset argument '%s' as an uintptr_t",
- orig_base_offset_str);
- }
- } else if (option.starts_with("--base-offset=")) {
- const char* base_offset_str = option.substr(strlen("--base-offset=")).data();
- base_offset_set = true;
- if (!ParseUint(base_offset_str, &base_offset)) {
- Usage("Failed to parse --base-offset argument '%s' as an uintptr_t", base_offset_str);
- }
- } else if (option.starts_with("--base-offset-delta=")) {
- const char* base_delta_str = option.substr(strlen("--base-offset-delta=")).data();
- base_delta_set = true;
- if (!ParseInt(base_delta_str, &base_delta)) {
- Usage("Failed to parse --base-offset-delta argument '%s' as an off_t", base_delta_str);
- }
- } else if (option.starts_with("--patched-image-location=")) {
- patched_image_location = option.substr(strlen("--patched-image-location=")).data();
- } else if (option.starts_with("--patched-image-file=")) {
- patched_image_filename = option.substr(strlen("--patched-image-file=")).data();
- } else if (option == "--lock-output") {
- lock_output = true;
- } else if (option == "--no-lock-output") {
- lock_output = false;
- } else if (option == "--dump-timings") {
- dump_timings = true;
- } else if (option == "--no-dump-timings") {
- dump_timings = false;
- } else {
- Usage("Unknown argument %s", option.data());
- }
+ if (kIsDebugBuild) {
+ LOG(INFO) << "Exiting with return ... " << ret;
}
+ return ret ? EXIT_SUCCESS : EXIT_FAILURE;
+}
+static int patchoat_oat(TimingLogger& timings,
+ InstructionSet isa,
+ const std::string& patched_image_location,
+ off_t base_delta,
+ bool base_delta_set,
+ int input_oat_fd,
+ const std::string& input_oat_location,
+ std::string input_oat_filename,
+ bool have_input_oat,
+ int output_oat_fd,
+ std::string output_oat_filename,
+ bool have_output_oat,
+ bool lock_output,
+ bool debug) {
{
// Only 1 of these may be set.
uint32_t cnt = 0;
cnt += (base_delta_set) ? 1 : 0;
- cnt += (base_offset_set && orig_base_offset_set) ? 1 : 0;
- cnt += (!patched_image_filename.empty()) ? 1 : 0;
cnt += (!patched_image_location.empty()) ? 1 : 0;
if (cnt > 1) {
- Usage("Only one of --base-offset/--orig-base-offset, --base-offset-delta, "
- "--patched-image-filename or --patched-image-location may be used.");
+ Usage("Only one of --base-offset-delta or --patched-image-location may be used.");
} else if (cnt == 0) {
- Usage("Must specify --base-offset-delta, --base-offset and --orig-base-offset, "
- "--patched-image-location or --patched-image-file");
+ Usage("Must specify --base-offset-delta or --patched-image-location.");
}
}
- if (have_input_oat != have_output_oat) {
- Usage("Either both input and output oat must be supplied or niether must be.");
- }
-
- if ((!input_image_location.empty()) != have_output_image) {
- Usage("Either both input and output image must be supplied or niether must be.");
- }
-
- // We know we have both the input and output so rename for clarity.
- bool have_image_files = have_output_image;
- bool have_oat_files = have_output_oat;
-
- if (!have_oat_files) {
- if (have_image_files) {
- Usage("Cannot patch an image file without an oat file");
- } else {
- Usage("Must be patching either an oat file or an image file with an oat file.");
- }
- }
-
- if (!have_oat_files && !isa_set) {
- Usage("Must include ISA if patching an image file without an oat file.");
+ if (!have_input_oat || !have_output_oat) {
+ Usage("Both input and output oat must be supplied to patch an app odex.");
}
if (!input_oat_location.empty()) {
- if (!isa_set) {
- Usage("specifying a location requires specifying an instruction set");
- }
if (!LocationToFilename(input_oat_location, isa, &input_oat_filename)) {
Usage("Unable to find filename for input oat location %s", input_oat_location.c_str());
}
@@ -1200,10 +1042,9 @@ static int patchoat(int argc, char **argv) {
LOG(INFO) << "Using input-oat-file " << input_oat_filename;
}
}
+
+ bool match_delta = false;
if (!patched_image_location.empty()) {
- if (!isa_set) {
- Usage("specifying a location requires specifying an instruction set");
- }
std::string system_filename;
bool has_system = false;
std::string cache_filename;
@@ -1216,11 +1057,12 @@ static int patchoat(int argc, char **argv) {
&is_global_cache)) {
Usage("Unable to determine image file for location %s", patched_image_location.c_str());
}
+ std::string patched_image_filename;
if (has_cache) {
patched_image_filename = cache_filename;
} else if (has_system) {
LOG(WARNING) << "Only image file found was in /system for image location "
- << patched_image_location;
+ << patched_image_location;
patched_image_filename = system_filename;
} else {
Usage("Unable to determine image file for location %s", patched_image_location.c_str());
@@ -1228,28 +1070,12 @@ static int patchoat(int argc, char **argv) {
if (debug) {
LOG(INFO) << "Using patched-image-file " << patched_image_filename;
}
- }
- if (!base_delta_set) {
- if (orig_base_offset_set && base_offset_set) {
- base_delta_set = true;
- base_delta = base_offset - orig_base_offset;
- } else if (!patched_image_filename.empty()) {
- if (have_image_files) {
- Usage("--patched-image-location should not be used when patching other images");
- }
- base_delta_set = true;
- match_delta = true;
- std::string error_msg;
- if (!ReadBaseDelta(patched_image_filename.c_str(), &base_delta, &error_msg)) {
- Usage(error_msg.c_str(), patched_image_filename.c_str());
- }
- } else {
- if (base_offset_set) {
- Usage("Unable to determine original base offset.");
- } else {
- Usage("Must supply a desired new offset or delta.");
- }
+ base_delta_set = true;
+ match_delta = true;
+ std::string error_msg;
+ if (!ReadBaseDelta(patched_image_filename.c_str(), &base_delta, &error_msg)) {
+ Usage(error_msg.c_str(), patched_image_filename.c_str());
}
}
@@ -1258,88 +1084,59 @@ static int patchoat(int argc, char **argv) {
}
// Do we need to cleanup output files if we fail?
- bool new_image_out = false;
bool new_oat_out = false;
std::unique_ptr<File> input_oat;
std::unique_ptr<File> output_oat;
- std::unique_ptr<File> output_image;
-
- if (have_image_files) {
- CHECK(!input_image_location.empty());
- if (output_image_fd != -1) {
- if (output_image_filename.empty()) {
- output_image_filename = "output-image-file";
- }
- output_image.reset(new File(output_image_fd, output_image_filename, true));
- } else {
- CHECK(!output_image_filename.empty());
- output_image.reset(CreateOrOpen(output_image_filename.c_str(), &new_image_out));
+ if (input_oat_fd != -1) {
+ if (input_oat_filename.empty()) {
+ input_oat_filename = "input-oat-file";
+ }
+ input_oat.reset(new File(input_oat_fd, input_oat_filename, false));
+ if (input_oat_fd == output_oat_fd) {
+ input_oat.get()->DisableAutoClose();
+ }
+ if (input_oat == nullptr) {
+ // Unlikely, but ensure exhaustive logging in non-0 exit code case
+ LOG(ERROR) << "Failed to open input oat file by its FD" << input_oat_fd;
}
} else {
- CHECK(output_image_filename.empty() && output_image_fd == -1 && input_image_location.empty());
+ CHECK(!input_oat_filename.empty());
+ input_oat.reset(OS::OpenFileForReading(input_oat_filename.c_str()));
+ if (input_oat == nullptr) {
+ int err = errno;
+ LOG(ERROR) << "Failed to open input oat file " << input_oat_filename
+ << ": " << strerror(err) << "(" << err << ")";
+ }
}
- if (have_oat_files) {
- if (input_oat_fd != -1) {
- if (input_oat_filename.empty()) {
- input_oat_filename = "input-oat-file";
- }
- input_oat.reset(new File(input_oat_fd, input_oat_filename, false));
- if (input_oat_fd == output_oat_fd) {
- input_oat.get()->DisableAutoClose();
- }
- if (input_oat == nullptr) {
- // Unlikely, but ensure exhaustive logging in non-0 exit code case
- LOG(ERROR) << "Failed to open input oat file by its FD" << input_oat_fd;
- }
- } else {
- CHECK(!input_oat_filename.empty());
- input_oat.reset(OS::OpenFileForReading(input_oat_filename.c_str()));
- if (input_oat == nullptr) {
- int err = errno;
- LOG(ERROR) << "Failed to open input oat file " << input_oat_filename
- << ": " << strerror(err) << "(" << err << ")";
- }
+ if (output_oat_fd != -1) {
+ if (output_oat_filename.empty()) {
+ output_oat_filename = "output-oat-file";
}
-
- if (output_oat_fd != -1) {
- if (output_oat_filename.empty()) {
- output_oat_filename = "output-oat-file";
- }
- output_oat.reset(new File(output_oat_fd, output_oat_filename, true));
- if (output_oat == nullptr) {
- // Unlikely, but ensure exhaustive logging in non-0 exit code case
- LOG(ERROR) << "Failed to open output oat file by its FD" << output_oat_fd;
- }
- } else {
- CHECK(!output_oat_filename.empty());
- output_oat.reset(CreateOrOpen(output_oat_filename.c_str(), &new_oat_out));
- if (output_oat == nullptr) {
- int err = errno;
- LOG(ERROR) << "Failed to open output oat file " << output_oat_filename
- << ": " << strerror(err) << "(" << err << ")";
- }
+ output_oat.reset(new File(output_oat_fd, output_oat_filename, true));
+ if (output_oat == nullptr) {
+ // Unlikely, but ensure exhaustive logging in non-0 exit code case
+ LOG(ERROR) << "Failed to open output oat file by its FD" << output_oat_fd;
+ }
+ } else {
+ CHECK(!output_oat_filename.empty());
+ output_oat.reset(CreateOrOpen(output_oat_filename.c_str(), &new_oat_out));
+ if (output_oat == nullptr) {
+ int err = errno;
+ LOG(ERROR) << "Failed to open output oat file " << output_oat_filename
+ << ": " << strerror(err) << "(" << err << ")";
}
}
// TODO: get rid of this.
- auto cleanup = [&output_image_filename, &output_oat_filename,
- &new_oat_out, &new_image_out, &timings, &dump_timings](bool success) {
- timings.EndTiming();
+ auto cleanup = [&output_oat_filename, &new_oat_out](bool success) {
if (!success) {
if (new_oat_out) {
CHECK(!output_oat_filename.empty());
TEMP_FAILURE_RETRY(unlink(output_oat_filename.c_str()));
}
- if (new_image_out) {
- CHECK(!output_image_filename.empty());
- TEMP_FAILURE_RETRY(unlink(output_image_filename.c_str()));
- }
- }
- if (dump_timings) {
- LOG(INFO) << Dumpable<TimingLogger>(timings);
}
if (kIsDebugBuild) {
@@ -1347,18 +1144,13 @@ static int patchoat(int argc, char **argv) {
}
};
- if (have_oat_files && (input_oat.get() == nullptr || output_oat.get() == nullptr)) {
+ if (input_oat.get() == nullptr || output_oat.get() == nullptr) {
LOG(ERROR) << "Failed to open input/output oat files";
cleanup(false);
return EXIT_FAILURE;
- } else if (have_image_files && output_image.get() == nullptr) {
- LOG(ERROR) << "Failed to open output image file";
- cleanup(false);
- return EXIT_FAILURE;
}
if (match_delta) {
- CHECK(!have_image_files); // We will not do this with images.
std::string error_msg;
// Figure out what the current delta is so we can match it to the desired delta.
std::unique_ptr<ElfFile> elf(ElfFile::Open(input_oat.get(), PROT_READ, MAP_PRIVATE,
@@ -1385,48 +1177,189 @@ static int patchoat(int argc, char **argv) {
if (debug) {
LOG(INFO) << "moving offset by " << base_delta
- << " (0x" << std::hex << base_delta << ") bytes or "
- << std::dec << (base_delta/kPageSize) << " pages.";
+ << " (0x" << std::hex << base_delta << ") bytes or "
+ << std::dec << (base_delta/kPageSize) << " pages.";
}
- // TODO: is it going to be promatic to unlink a file that was flock-ed?
ScopedFlock output_oat_lock;
if (lock_output) {
std::string error_msg;
- if (have_oat_files && !output_oat_lock.Init(output_oat.get(), &error_msg)) {
- LOG(ERROR) << "Unable to lock output oat " << output_image->GetPath() << ": " << error_msg;
+ if (!output_oat_lock.Init(output_oat.get(), &error_msg)) {
+ LOG(ERROR) << "Unable to lock output oat " << output_oat->GetPath() << ": " << error_msg;
cleanup(false);
return EXIT_FAILURE;
}
}
- bool ret;
- if (have_image_files && have_oat_files) {
- TimingLogger::ScopedTiming pt("patch image and oat", &timings);
- ret = PatchOat::Patch(input_oat.get(), input_image_location, base_delta,
- output_oat.get(), output_image.get(), isa, &timings,
- output_oat_fd >= 0, // was it opened from FD?
- new_oat_out);
- // The order here doesn't matter. If the first one is successfully saved and the second one
- // erased, ImageSpace will still detect a problem and not use the files.
- ret = FinishFile(output_image.get(), ret);
- ret = FinishFile(output_oat.get(), ret);
- } else if (have_oat_files) {
- TimingLogger::ScopedTiming pt("patch oat", &timings);
- ret = PatchOat::Patch(input_oat.get(), base_delta, output_oat.get(), &timings,
- output_oat_fd >= 0, // was it opened from FD?
- new_oat_out);
- ret = FinishFile(output_oat.get(), ret);
- } else {
- CHECK(false);
- ret = true;
- }
+ TimingLogger::ScopedTiming pt("patch oat", &timings);
+ bool ret = PatchOat::Patch(input_oat.get(), base_delta, output_oat.get(), &timings,
+ output_oat_fd >= 0, // was it opened from FD?
+ new_oat_out);
+ ret = FinishFile(output_oat.get(), ret);
if (kIsDebugBuild) {
LOG(INFO) << "Exiting with return ... " << ret;
}
cleanup(ret);
- return (ret) ? EXIT_SUCCESS : EXIT_FAILURE;
+ return ret ? EXIT_SUCCESS : EXIT_FAILURE;
+}
+
+static int patchoat(int argc, char **argv) {
+ InitLogging(argv);
+ MemMap::Init();
+ const bool debug = kIsDebugBuild;
+ orig_argc = argc;
+ orig_argv = argv;
+ TimingLogger timings("patcher", false, false);
+
+ InitLogging(argv);
+
+ // Skip over the command name.
+ argv++;
+ argc--;
+
+ if (argc == 0) {
+ Usage("No arguments specified");
+ }
+
+ timings.StartTiming("Patchoat");
+
+ // cmd line args
+ bool isa_set = false;
+ InstructionSet isa = kNone;
+ std::string input_oat_filename;
+ std::string input_oat_location;
+ int input_oat_fd = -1;
+ bool have_input_oat = false;
+ std::string input_image_location;
+ std::string output_oat_filename;
+ int output_oat_fd = -1;
+ bool have_output_oat = false;
+ std::string output_image_filename;
+ off_t base_delta = 0;
+ bool base_delta_set = false;
+ std::string patched_image_filename;
+ std::string patched_image_location;
+ bool dump_timings = kIsDebugBuild;
+ bool lock_output = true;
+
+ for (int i = 0; i < argc; ++i) {
+ const StringPiece option(argv[i]);
+ const bool log_options = false;
+ if (log_options) {
+ LOG(INFO) << "patchoat: option[" << i << "]=" << argv[i];
+ }
+ if (option.starts_with("--instruction-set=")) {
+ isa_set = true;
+ const char* isa_str = option.substr(strlen("--instruction-set=")).data();
+ isa = GetInstructionSetFromString(isa_str);
+ if (isa == kNone) {
+ Usage("Unknown or invalid instruction set %s", isa_str);
+ }
+ } else if (option.starts_with("--input-oat-location=")) {
+ if (have_input_oat) {
+ Usage("Only one of --input-oat-file, --input-oat-location and --input-oat-fd may be used.");
+ }
+ have_input_oat = true;
+ input_oat_location = option.substr(strlen("--input-oat-location=")).data();
+ } else if (option.starts_with("--input-oat-file=")) {
+ if (have_input_oat) {
+ Usage("Only one of --input-oat-file, --input-oat-location and --input-oat-fd may be used.");
+ }
+ have_input_oat = true;
+ input_oat_filename = option.substr(strlen("--input-oat-file=")).data();
+ } else if (option.starts_with("--input-oat-fd=")) {
+ if (have_input_oat) {
+ Usage("Only one of --input-oat-file, --input-oat-location and --input-oat-fd may be used.");
+ }
+ have_input_oat = true;
+ const char* oat_fd_str = option.substr(strlen("--input-oat-fd=")).data();
+ if (!ParseInt(oat_fd_str, &input_oat_fd)) {
+ Usage("Failed to parse --input-oat-fd argument '%s' as an integer", oat_fd_str);
+ }
+ if (input_oat_fd < 0) {
+ Usage("--input-oat-fd pass a negative value %d", input_oat_fd);
+ }
+ } else if (option.starts_with("--input-image-location=")) {
+ input_image_location = option.substr(strlen("--input-image-location=")).data();
+ } else if (option.starts_with("--output-oat-file=")) {
+ if (have_output_oat) {
+ Usage("Only one of --output-oat-file, and --output-oat-fd may be used.");
+ }
+ have_output_oat = true;
+ output_oat_filename = option.substr(strlen("--output-oat-file=")).data();
+ } else if (option.starts_with("--output-oat-fd=")) {
+ if (have_output_oat) {
+ Usage("Only one of --output-oat-file, --output-oat-fd may be used.");
+ }
+ have_output_oat = true;
+ const char* oat_fd_str = option.substr(strlen("--output-oat-fd=")).data();
+ if (!ParseInt(oat_fd_str, &output_oat_fd)) {
+ Usage("Failed to parse --output-oat-fd argument '%s' as an integer", oat_fd_str);
+ }
+ if (output_oat_fd < 0) {
+ Usage("--output-oat-fd pass a negative value %d", output_oat_fd);
+ }
+ } else if (option.starts_with("--output-image-file=")) {
+ output_image_filename = option.substr(strlen("--output-image-file=")).data();
+ } else if (option.starts_with("--base-offset-delta=")) {
+ const char* base_delta_str = option.substr(strlen("--base-offset-delta=")).data();
+ base_delta_set = true;
+ if (!ParseInt(base_delta_str, &base_delta)) {
+ Usage("Failed to parse --base-offset-delta argument '%s' as an off_t", base_delta_str);
+ }
+ } else if (option.starts_with("--patched-image-location=")) {
+ patched_image_location = option.substr(strlen("--patched-image-location=")).data();
+ } else if (option == "--lock-output") {
+ lock_output = true;
+ } else if (option == "--no-lock-output") {
+ lock_output = false;
+ } else if (option == "--dump-timings") {
+ dump_timings = true;
+ } else if (option == "--no-dump-timings") {
+ dump_timings = false;
+ } else {
+ Usage("Unknown argument %s", option.data());
+ }
+ }
+
+ // The instruction set is mandatory. This simplifies things...
+ if (!isa_set) {
+ Usage("Instruction set must be set.");
+ }
+
+ int ret;
+ if (!input_image_location.empty()) {
+ ret = patchoat_image(timings,
+ isa,
+ input_image_location,
+ output_image_filename,
+ base_delta,
+ base_delta_set,
+ debug);
+ } else {
+ ret = patchoat_oat(timings,
+ isa,
+ patched_image_location,
+ base_delta,
+ base_delta_set,
+ input_oat_fd,
+ input_oat_location,
+ input_oat_filename,
+ have_input_oat,
+ output_oat_fd,
+ output_oat_filename,
+ have_output_oat,
+ lock_output,
+ debug);
+ }
+
+ timings.EndTiming();
+ if (dump_timings) {
+ LOG(INFO) << Dumpable<TimingLogger>(timings);
+ }
+
+ return ret;
}
} // namespace art
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index ceddc343be..a6a8feeb3c 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -53,11 +53,11 @@ class PatchOat {
TimingLogger* timings);
// Patch both the image and the oat file
- static bool Patch(File* oat_in, const std::string& art_location,
- off_t delta, File* oat_out, File* art_out, InstructionSet isa,
- TimingLogger* timings,
- bool output_oat_opened_from_fd, // Was this using --oatput-oat-fd ?
- bool new_oat_out); // Output oat was a new file created by us?
+ static bool Patch(const std::string& art_location,
+ off_t delta,
+ const std::string& output_directory,
+ InstructionSet isa,
+ TimingLogger* timings);
~PatchOat() {}
PatchOat(PatchOat&&) = default;
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 288f95e89b..e9f7add1af 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -275,6 +275,8 @@ LIBART_TARGET_SRC_FILES_arm64 := \
arch/arm64/fault_handler_arm64.cc
LIBART_SRC_FILES_x86 := \
+ interpreter/mterp/mterp.cc \
+ interpreter/mterp/out/mterp_x86.S \
arch/x86/context_x86.cc \
arch/x86/entrypoints_init_x86.cc \
arch/x86/jni_entrypoints_x86.S \
@@ -286,20 +288,6 @@ LIBART_SRC_FILES_x86 := \
LIBART_TARGET_SRC_FILES_x86 := \
$(LIBART_SRC_FILES_x86)
-# Darwin uses non-standard x86 assembly syntax. Don't build x86 Darwin host mterp there.
-ifeq ($(HOST_OS),darwin)
- LIBART_SRC_FILES_x86 += \
- interpreter/mterp/mterp_stub.cc
-else
- LIBART_SRC_FILES_x86 += \
- interpreter/mterp/mterp.cc \
- interpreter/mterp/out/mterp_x86.S
-endif
-# But do support x86 mterp for target build regardless of host platform.
-LIBART_TARGET_SRC_FILES_x86 += \
- interpreter/mterp/mterp.cc \
- interpreter/mterp/out/mterp_x86.S
-
# Note that the fault_handler_x86.cc is not a mistake. This file is
# shared between the x86 and x86_64 architectures.
LIBART_SRC_FILES_x86_64 := \
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 71411817af..e358ff879c 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -130,6 +130,25 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
qpoints->pL2f = art_quick_l2f;
}
+ // More math.
+ qpoints->pCos = cos;
+ qpoints->pSin = sin;
+ qpoints->pAcos = acos;
+ qpoints->pAsin = asin;
+ qpoints->pAtan = atan;
+ qpoints->pAtan2 = atan2;
+ qpoints->pCbrt = cbrt;
+ qpoints->pCosh = cosh;
+ qpoints->pExp = exp;
+ qpoints->pExpm1 = expm1;
+ qpoints->pHypot = hypot;
+ qpoints->pLog = log;
+ qpoints->pLog10 = log10;
+ qpoints->pNextAfter = nextafter;
+ qpoints->pSinh = sinh;
+ qpoints->pTan = tan;
+ qpoints->pTanh = tanh;
+
// Intrinsics
qpoints->pIndexOf = art_quick_indexof;
qpoints->pStringCompareTo = art_quick_string_compareto;
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 949ad9926e..c4e314b6c8 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -444,11 +444,12 @@ ENTRY art_quick_osr_stub
mov r10, r1 @ Save size of stack
ldr r9, [r11, #40] @ Move managed thread pointer into r9
mov r8, r2 @ Save the pc to call
- sub r7, sp, #12 @ Reserve space for stack pointer, JValue result, and ArtMethod* slot
+ sub r7, sp, #12 @ Reserve space for stack pointer,
+ @ JValue* result, and ArtMethod* slot.
and r7, #0xFFFFFFF0 @ Align stack pointer
mov sp, r7 @ Update stack pointer
str r11, [sp, #4] @ Save old stack pointer
- str r3, [sp, #8] @ Save JValue result
+ str r3, [sp, #8] @ Save JValue* result
mov ip, #0
str ip, [sp] @ Store null for ArtMethod* at bottom of frame
sub sp, sp, r1 @ Reserve space for callee stack
@@ -457,9 +458,8 @@ ENTRY art_quick_osr_stub
mov r0, sp
bl memcpy @ memcpy (dest r0, src r1, bytes r2)
bl .Losr_entry @ Call the method
- ldr r11, [sp, #4] @ Restore saved stack pointer
- ldr r10, [sp, #8] @ Restore JValue result
- mov sp, r11 @ Restore stack pointer.
+ ldr r10, [sp, #8] @ Restore JValue* result
+ ldr sp, [sp, #4] @ Restore saved stack pointer
ldr r4, [sp, #36] @ load shorty
ldrb r4, [r4, #0] @ load return type
cmp r4, #68 @ Test if result type char == 'D'.
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index d6e0f1c1a3..69caec88f0 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1755,6 +1755,8 @@ END_FUNCTION art_quick_read_barrier_for_root_slow
* rcx = JValue* result
* r8 = shorty
* r9 = thread
+ *
+ * Note that the native C ABI already aligned the stack to 16-byte.
*/
DEFINE_FUNCTION art_quick_osr_stub
// Save the non-volatiles.
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 28540c8437..ebe89bbbd2 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -449,23 +449,26 @@ template<typename RootVisitorType>
void ArtMethod::VisitRoots(RootVisitorType& visitor, size_t pointer_size) {
ArtMethod* interface_method = nullptr;
mirror::Class* klass = declaring_class_.Read();
- if (UNLIKELY(klass != nullptr && klass->IsProxyClass())) {
- // For normal methods, dex cache shortcuts will be visited through the declaring class.
- // However, for proxies we need to keep the interface method alive, so we visit its roots.
- interface_method = mirror::DexCache::GetElementPtrSize(
- GetDexCacheResolvedMethods(pointer_size),
- GetDexMethodIndex(),
- pointer_size);
- DCHECK(interface_method != nullptr);
- DCHECK_EQ(interface_method,
- Runtime::Current()->GetClassLinker()->FindMethodForProxy(klass, this));
- interface_method->VisitRoots(visitor, pointer_size);
- }
-
- visitor.VisitRootIfNonNull(declaring_class_.AddressWithoutBarrier());
- ProfilingInfo* profiling_info = GetProfilingInfo(pointer_size);
- if (hotness_count_ != 0 && !IsNative() && profiling_info != nullptr) {
- profiling_info->VisitRoots(visitor);
+ if (LIKELY(klass != nullptr)) {
+ if (UNLIKELY(klass->IsProxyClass())) {
+ // For normal methods, dex cache shortcuts will be visited through the declaring class.
+ // However, for proxies we need to keep the interface method alive, so we visit its roots.
+ interface_method = mirror::DexCache::GetElementPtrSize(
+ GetDexCacheResolvedMethods(pointer_size),
+ GetDexMethodIndex(),
+ pointer_size);
+ DCHECK(interface_method != nullptr);
+ DCHECK_EQ(interface_method,
+ Runtime::Current()->GetClassLinker()->FindMethodForProxy(klass, this));
+ interface_method->VisitRoots(visitor, pointer_size);
+ }
+ visitor.VisitRoot(declaring_class_.AddressWithoutBarrier());
+ if (!IsNative()) {
+ ProfilingInfo* profiling_info = GetProfilingInfo(pointer_size);
+ if (profiling_info != nullptr) {
+ profiling_info->VisitRoots(visitor);
+ }
+ }
}
}
diff --git a/runtime/base/scoped_arena_allocator.cc b/runtime/base/scoped_arena_allocator.cc
index 90c6ee34ec..7d04fa0223 100644
--- a/runtime/base/scoped_arena_allocator.cc
+++ b/runtime/base/scoped_arena_allocator.cc
@@ -99,7 +99,7 @@ void* ArenaStack::AllocWithMemoryTool(size_t bytes, ArenaAllocKind kind) {
if (UNLIKELY(static_cast<size_t>(top_end_ - ptr) < rounded_bytes)) {
ptr = AllocateFromNextArena(rounded_bytes);
CHECK(ptr != nullptr) << "Failed to allocate memory";
- MEMORY_TOOL_MAKE_NOACCESS(ptr, top_end_);
+ MEMORY_TOOL_MAKE_NOACCESS(ptr, top_end_ - ptr);
}
CurrentStats()->RecordAlloc(bytes, kind);
top_ptr_ = ptr + rounded_bytes;
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 0631ebe374..5278d1bb05 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2620,19 +2620,37 @@ const void* ClassLinker::GetQuickOatCodeFor(const DexFile& dex_file,
return oat_class.GetOatMethod(oat_method_idx).GetQuickCode();
}
-// Returns true if the method must run with interpreter, false otherwise.
-static bool NeedsInterpreter(ArtMethod* method, const void* quick_code)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+bool ClassLinker::ShouldUseInterpreterEntrypoint(ArtMethod* method, const void* quick_code) {
+ if (UNLIKELY(method->IsNative() || method->IsProxyMethod())) {
+ return false;
+ }
+
if (quick_code == nullptr) {
- // No code: need interpreter.
- // May return true for native code, in the case of generic JNI
- // DCHECK(!method->IsNative());
return true;
}
- // If interpreter mode is enabled, every method (except native and proxy) must
- // be run with interpreter.
- return Runtime::Current()->GetInstrumentation()->InterpretOnly() &&
- !method->IsNative() && !method->IsProxyMethod();
+
+ Runtime* runtime = Runtime::Current();
+ instrumentation::Instrumentation* instr = runtime->GetInstrumentation();
+ if (instr->InterpretOnly()) {
+ return true;
+ }
+
+ if (runtime->GetClassLinker()->IsQuickToInterpreterBridge(quick_code)) {
+ // Doing this check avoids doing compiled/interpreter transitions.
+ return true;
+ }
+
+ if (Dbg::IsForcedInterpreterNeededForCalling(Thread::Current(), method)) {
+ // Force the use of interpreter when it is required by the debugger.
+ return true;
+ }
+
+ if (runtime->UseJit() && runtime->GetJit()->JitAtFirstUse()) {
+ // The force JIT uses the interpreter entry point to execute the JIT.
+ return true;
+ }
+
+ return false;
}
void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) {
@@ -2677,15 +2695,12 @@ void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) {
OatFile::OatMethod oat_method = oat_class.GetOatMethod(method_index);
quick_code = oat_method.GetQuickCode();
}
- const bool enter_interpreter = NeedsInterpreter(method, quick_code);
- if (enter_interpreter) {
+ // Check whether the method is native, in which case it's generic JNI.
+ if (quick_code == nullptr && method->IsNative()) {
+ quick_code = GetQuickGenericJniStub();
+ } else if (ShouldUseInterpreterEntrypoint(method, quick_code)) {
// Use interpreter entry point.
- // Check whether the method is native, in which case it's generic JNI.
- if (quick_code == nullptr && method->IsNative()) {
- quick_code = GetQuickGenericJniStub();
- } else {
- quick_code = GetQuickToInterpreterBridge();
- }
+ quick_code = GetQuickToInterpreterBridge();
}
runtime->GetInstrumentation()->UpdateMethodsCode(method, quick_code);
}
@@ -2716,7 +2731,8 @@ void ClassLinker::LinkCode(ArtMethod* method, const OatFile::OatClass* oat_class
}
// Install entry point from interpreter.
- bool enter_interpreter = NeedsInterpreter(method, method->GetEntryPointFromQuickCompiledCode());
+ const void* quick_code = method->GetEntryPointFromQuickCompiledCode();
+ bool enter_interpreter = ShouldUseInterpreterEntrypoint(method, quick_code);
if (!method->IsInvokable()) {
EnsureThrowsInvocationError(method);
@@ -2728,20 +2744,18 @@ void ClassLinker::LinkCode(ArtMethod* method, const OatFile::OatClass* oat_class
// It will be replaced by the proper entry point by ClassLinker::FixupStaticTrampolines
// after initializing class (see ClassLinker::InitializeClass method).
method->SetEntryPointFromQuickCompiledCode(GetQuickResolutionStub());
+ } else if (quick_code == nullptr && method->IsNative()) {
+ method->SetEntryPointFromQuickCompiledCode(GetQuickGenericJniStub());
} else if (enter_interpreter) {
- if (!method->IsNative()) {
- // Set entry point from compiled code if there's no code or in interpreter only mode.
- method->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
- } else {
- method->SetEntryPointFromQuickCompiledCode(GetQuickGenericJniStub());
- }
+ // Set entry point from compiled code if there's no code or in interpreter only mode.
+ method->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
}
if (method->IsNative()) {
// Unregistering restores the dlsym lookup stub.
method->UnregisterNative();
- if (enter_interpreter) {
+ if (enter_interpreter || quick_code == nullptr) {
// We have a native method here without code. Then it should have either the generic JNI
// trampoline as entrypoint (non-static), or the resolution trampoline (static).
// TODO: this doesn't handle all the cases where trampolines may be installed.
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 56a868a4d0..a9448f732c 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -592,6 +592,9 @@ class ClassLinker {
REQUIRES(!Locks::classlinker_classes_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
+ static bool ShouldUseInterpreterEntrypoint(ArtMethod* method, const void* quick_code)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
struct DexCacheData {
// Weak root to the DexCache. Note: Do not decode this unnecessarily or else class unloading may
// not work properly.
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 7a852e216e..ddf27496d9 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -478,7 +478,7 @@ bool DexFileVerifier::CheckClassDataItemField(uint32_t idx,
// Check field access flags.
std::string error_msg;
- if (!CheckFieldAccessFlags(access_flags, class_access_flags, &error_msg)) {
+ if (!CheckFieldAccessFlags(idx, access_flags, class_access_flags, &error_msg)) {
ErrorStringPrintf("%s", error_msg.c_str());
return false;
}
@@ -2312,12 +2312,88 @@ static bool CheckAtMostOneOfPublicProtectedPrivate(uint32_t flags) {
return count <= 1;
}
-bool DexFileVerifier::CheckFieldAccessFlags(uint32_t field_access_flags,
+// Helper functions to retrieve names from the dex file. We do not want to rely on DexFile
+// functionality, as we're still verifying the dex file. begin and header correspond to the
+// underscored variants in the DexFileVerifier.
+
+static std::string GetStringOrError(const uint8_t* const begin,
+ const DexFile::Header* const header,
+ uint32_t string_idx) {
+ if (header->string_ids_size_ < string_idx) {
+ return "(error)";
+ }
+
+ const DexFile::StringId* string_id =
+ reinterpret_cast<const DexFile::StringId*>(begin + header->string_ids_off_) + string_idx;
+
+ // Assume that the data is OK at this point. String data has been checked at this point.
+
+ const uint8_t* ptr = begin + string_id->string_data_off_;
+ DecodeUnsignedLeb128(&ptr);
+ return reinterpret_cast<const char*>(ptr);
+}
+
+static std::string GetClassOrError(const uint8_t* const begin,
+ const DexFile::Header* const header,
+ uint32_t class_idx) {
+ if (header->type_ids_size_ < class_idx) {
+ return "(error)";
+ }
+
+ const DexFile::TypeId* type_id =
+ reinterpret_cast<const DexFile::TypeId*>(begin + header->type_ids_off_) + class_idx;
+
+ // Assume that the data is OK at this point. Type id offsets have been checked at this point.
+
+ return GetStringOrError(begin, header, type_id->descriptor_idx_);
+}
+
+static std::string GetFieldDescriptionOrError(const uint8_t* const begin,
+ const DexFile::Header* const header,
+ uint32_t idx) {
+ if (header->field_ids_size_ < idx) {
+ return "(error)";
+ }
+
+ const DexFile::FieldId* field_id =
+ reinterpret_cast<const DexFile::FieldId*>(begin + header->field_ids_off_) + idx;
+
+ // Assume that the data is OK at this point. Field id offsets have been checked at this point.
+
+ std::string class_name = GetClassOrError(begin, header, field_id->class_idx_);
+ std::string field_name = GetStringOrError(begin, header, field_id->name_idx_);
+
+ return class_name + "." + field_name;
+}
+
+static std::string GetMethodDescriptionOrError(const uint8_t* const begin,
+ const DexFile::Header* const header,
+ uint32_t idx) {
+ if (header->method_ids_size_ < idx) {
+ return "(error)";
+ }
+
+ const DexFile::MethodId* method_id =
+ reinterpret_cast<const DexFile::MethodId*>(begin + header->method_ids_off_) + idx;
+
+ // Assume that the data is OK at this point. Method id offsets have been checked at this point.
+
+ std::string class_name = GetClassOrError(begin, header, method_id->class_idx_);
+ std::string method_name = GetStringOrError(begin, header, method_id->name_idx_);
+
+ return class_name + "." + method_name;
+}
+
+bool DexFileVerifier::CheckFieldAccessFlags(uint32_t idx,
+ uint32_t field_access_flags,
uint32_t class_access_flags,
std::string* error_msg) {
// Generally sort out >16-bit flags.
if ((field_access_flags & ~kAccJavaFlagsMask) != 0) {
- *error_msg = StringPrintf("Bad class_data_item field access_flags %x", field_access_flags);
+ *error_msg = StringPrintf("Bad field access_flags for %s: %x(%s)",
+ GetFieldDescriptionOrError(begin_, header_, idx).c_str(),
+ field_access_flags,
+ PrettyJavaAccessFlags(field_access_flags).c_str());
return false;
}
@@ -2334,8 +2410,10 @@ bool DexFileVerifier::CheckFieldAccessFlags(uint32_t field_access_flags,
// Fields may have only one of public/protected/final.
if (!CheckAtMostOneOfPublicProtectedPrivate(field_access_flags)) {
- *error_msg = StringPrintf("Field may have only one of public/protected/private, %x",
- field_access_flags);
+ *error_msg = StringPrintf("Field may have only one of public/protected/private, %s: %x(%s)",
+ GetFieldDescriptionOrError(begin_, header_, idx).c_str(),
+ field_access_flags,
+ PrettyJavaAccessFlags(field_access_flags).c_str());
return false;
}
@@ -2344,14 +2422,19 @@ bool DexFileVerifier::CheckFieldAccessFlags(uint32_t field_access_flags,
// Interface fields must be public final static.
constexpr uint32_t kPublicFinalStatic = kAccPublic | kAccFinal | kAccStatic;
if ((field_access_flags & kPublicFinalStatic) != kPublicFinalStatic) {
- *error_msg = StringPrintf("Interface field is not public final static: %x",
- field_access_flags);
+ *error_msg = StringPrintf("Interface field is not public final static, %s: %x(%s)",
+ GetFieldDescriptionOrError(begin_, header_, idx).c_str(),
+ field_access_flags,
+ PrettyJavaAccessFlags(field_access_flags).c_str());
return false;
}
// Interface fields may be synthetic, but may not have other flags.
constexpr uint32_t kDisallowed = ~(kPublicFinalStatic | kAccSynthetic);
if ((field_access_flags & kFieldAccessFlags & kDisallowed) != 0) {
- *error_msg = StringPrintf("Interface field has disallowed flag: %x", field_access_flags);
+ *error_msg = StringPrintf("Interface field has disallowed flag, %s: %x(%s)",
+ GetFieldDescriptionOrError(begin_, header_, idx).c_str(),
+ field_access_flags,
+ PrettyJavaAccessFlags(field_access_flags).c_str());
return false;
}
return true;
@@ -2360,7 +2443,8 @@ bool DexFileVerifier::CheckFieldAccessFlags(uint32_t field_access_flags,
// Volatile fields may not be final.
constexpr uint32_t kVolatileFinal = kAccVolatile | kAccFinal;
if ((field_access_flags & kVolatileFinal) == kVolatileFinal) {
- *error_msg = "Fields may not be volatile and final";
+ *error_msg = StringPrintf("Fields may not be volatile and final: %s",
+ GetFieldDescriptionOrError(begin_, header_, idx).c_str());
return false;
}
@@ -2410,7 +2494,9 @@ bool DexFileVerifier::CheckMethodAccessFlags(uint32_t method_index,
constexpr uint32_t kAllMethodFlags =
kAccJavaFlagsMask | kAccConstructor | kAccDeclaredSynchronized;
if ((method_access_flags & ~kAllMethodFlags) != 0) {
- *error_msg = StringPrintf("Bad class_data_item method access_flags %x", method_access_flags);
+ *error_msg = StringPrintf("Bad method access_flags for %s: %x",
+ GetMethodDescriptionOrError(begin_, header_, method_index).c_str(),
+ method_access_flags);
return false;
}
@@ -2430,7 +2516,8 @@ bool DexFileVerifier::CheckMethodAccessFlags(uint32_t method_index,
// Methods may have only one of public/protected/final.
if (!CheckAtMostOneOfPublicProtectedPrivate(method_access_flags)) {
- *error_msg = StringPrintf("Method may have only one of public/protected/private, %x",
+ *error_msg = StringPrintf("Method may have only one of public/protected/private, %s: %x",
+ GetMethodDescriptionOrError(begin_, header_, method_index).c_str(),
method_access_flags);
return false;
}
@@ -2456,8 +2543,10 @@ bool DexFileVerifier::CheckMethodAccessFlags(uint32_t method_index,
// Only methods named "<clinit>" or "<init>" may be marked constructor. Note: we cannot enforce
// the reverse for backwards compatibility reasons.
if (((method_access_flags & kAccConstructor) != 0) && !is_constructor) {
- *error_msg = StringPrintf("Method %" PRIu32 " is marked constructor, but doesn't match name",
- method_index);
+ *error_msg =
+ StringPrintf("Method %" PRIu32 "(%s) is marked constructor, but doesn't match name",
+ method_index,
+ GetMethodDescriptionOrError(begin_, header_, method_index).c_str());
return false;
}
// Check that the static constructor (= static initializer) is named "<clinit>" and that the
@@ -2465,8 +2554,9 @@ bool DexFileVerifier::CheckMethodAccessFlags(uint32_t method_index,
if (is_constructor) {
bool is_static = (method_access_flags & kAccStatic) != 0;
if (is_static ^ is_clinit_by_name) {
- *error_msg = StringPrintf("Constructor %" PRIu32 " is not flagged correctly wrt/ static.",
- method_index);
+ *error_msg = StringPrintf("Constructor %" PRIu32 "(%s) is not flagged correctly wrt/ static.",
+ method_index,
+ GetMethodDescriptionOrError(begin_, header_, method_index).c_str());
return false;
}
}
@@ -2474,8 +2564,9 @@ bool DexFileVerifier::CheckMethodAccessFlags(uint32_t method_index,
// and other methods in the virtual methods list.
bool is_direct = (method_access_flags & (kAccStatic | kAccPrivate)) != 0 || is_constructor;
if (is_direct != expect_direct) {
- *error_msg = StringPrintf("Direct/virtual method %" PRIu32 " not in expected list %d",
+ *error_msg = StringPrintf("Direct/virtual method %" PRIu32 "(%s) not in expected list %d",
method_index,
+ GetMethodDescriptionOrError(begin_, header_, method_index).c_str(),
expect_direct);
return false;
}
@@ -2488,14 +2579,17 @@ bool DexFileVerifier::CheckMethodAccessFlags(uint32_t method_index,
if (!has_code) {
// Only native or abstract methods may not have code.
if ((method_access_flags & (kAccNative | kAccAbstract)) == 0) {
- *error_msg = StringPrintf("Method %" PRIu32 " has no code, but is not marked native or "
+ *error_msg = StringPrintf("Method %" PRIu32 "(%s) has no code, but is not marked native or "
"abstract",
- method_index);
+ method_index,
+ GetMethodDescriptionOrError(begin_, header_, method_index).c_str());
return false;
}
// Constructors must always have code.
if (is_constructor) {
- *error_msg = StringPrintf("Constructor %u must not be abstract or native", method_index);
+ *error_msg = StringPrintf("Constructor %u(%s) must not be abstract or native",
+ method_index,
+ GetMethodDescriptionOrError(begin_, header_, method_index).c_str());
return false;
}
if ((method_access_flags & kAccAbstract) != 0) {
@@ -2503,14 +2597,15 @@ bool DexFileVerifier::CheckMethodAccessFlags(uint32_t method_index,
constexpr uint32_t kForbidden =
kAccPrivate | kAccStatic | kAccFinal | kAccNative | kAccStrict | kAccSynchronized;
if ((method_access_flags & kForbidden) != 0) {
- *error_msg = StringPrintf("Abstract method %" PRIu32 " has disallowed access flags %x",
- method_index,
- method_access_flags);
+ *error_msg = StringPrintf("Abstract method %" PRIu32 "(%s) has disallowed access flags %x",
+ method_index,
+ GetMethodDescriptionOrError(begin_, header_, method_index).c_str(),
+ method_access_flags);
return false;
}
// Abstract methods should be in an abstract class or interface.
if ((class_access_flags & (kAccInterface | kAccAbstract)) == 0) {
- LOG(WARNING) << "Method " << PrettyMethod(method_index, *dex_file_)
+ LOG(WARNING) << "Method " << GetMethodDescriptionOrError(begin_, header_, method_index)
<< " is abstract, but the declaring class is neither abstract nor an "
<< "interface in dex file "
<< dex_file_->GetLocation();
@@ -2520,8 +2615,9 @@ bool DexFileVerifier::CheckMethodAccessFlags(uint32_t method_index,
if ((class_access_flags & kAccInterface) != 0) {
// Interface methods must be public and abstract.
if ((method_access_flags & (kAccPublic | kAccAbstract)) != (kAccPublic | kAccAbstract)) {
- *error_msg = StringPrintf("Interface method %" PRIu32 " is not public and abstract",
- method_index);
+ *error_msg = StringPrintf("Interface method %" PRIu32 "(%s) is not public and abstract",
+ method_index,
+ GetMethodDescriptionOrError(begin_, header_, method_index).c_str());
return false;
}
// At this point, we know the method is public and abstract. This means that all the checks
@@ -2533,8 +2629,9 @@ bool DexFileVerifier::CheckMethodAccessFlags(uint32_t method_index,
// When there's code, the method must not be native or abstract.
if ((method_access_flags & (kAccNative | kAccAbstract)) != 0) {
- *error_msg = StringPrintf("Method %" PRIu32 " has code, but is marked native or abstract",
- method_index);
+ *error_msg = StringPrintf("Method %" PRIu32 "(%s) has code, but is marked native or abstract",
+ method_index,
+ GetMethodDescriptionOrError(begin_, header_, method_index).c_str());
return false;
}
@@ -2543,8 +2640,9 @@ bool DexFileVerifier::CheckMethodAccessFlags(uint32_t method_index,
static constexpr uint32_t kInitAllowed =
kAccPrivate | kAccProtected | kAccPublic | kAccStrict | kAccVarargs | kAccSynthetic;
if ((method_access_flags & ~kInitAllowed) != 0) {
- *error_msg = StringPrintf("Constructor %" PRIu32 " flagged inappropriately %x",
+ *error_msg = StringPrintf("Constructor %" PRIu32 "(%s) flagged inappropriately %x",
method_index,
+ GetMethodDescriptionOrError(begin_, header_, method_index).c_str(),
method_access_flags);
return false;
}
diff --git a/runtime/dex_file_verifier.h b/runtime/dex_file_verifier.h
index 6c63749f04..ddfeea2305 100644
--- a/runtime/dex_file_verifier.h
+++ b/runtime/dex_file_verifier.h
@@ -157,9 +157,10 @@ class DexFileVerifier {
// Check validity of the given access flags, interpreted for a field in the context of a class
// with the given second access flags.
- static bool CheckFieldAccessFlags(uint32_t field_access_flags,
- uint32_t class_access_flags,
- std::string* error_msg);
+ bool CheckFieldAccessFlags(uint32_t idx,
+ uint32_t field_access_flags,
+ uint32_t class_access_flags,
+ std::string* error_msg);
// Check validity of the given method and access flags, in the context of a class with the given
// second access flags.
bool CheckMethodAccessFlags(uint32_t method_index,
diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc
index b67af53458..558a6ed5bf 100644
--- a/runtime/dex_file_verifier_test.cc
+++ b/runtime/dex_file_verifier_test.cc
@@ -527,7 +527,7 @@ TEST_F(DexFileVerifierTest, MethodAccessFlagsConstructors) {
ApplyMaskToMethodFlags(dex_file, "<init>", ~kAccPublic);
OrMaskToMethodFlags(dex_file, "<init>", kAccStatic);
},
- "Constructor 1 is not flagged correctly wrt/ static");
+ "Constructor 1(LMethodFlags;.<init>) is not flagged correctly wrt/ static");
static constexpr uint32_t kInitNotAllowed[] = {
kAccFinal,
kAccSynchronized,
@@ -544,7 +544,7 @@ TEST_F(DexFileVerifierTest, MethodAccessFlagsConstructors) {
ApplyMaskToMethodFlags(dex_file, "<init>", ~kAccPublic);
OrMaskToMethodFlags(dex_file, "<init>", kInitNotAllowed[i]);
},
- "Constructor 1 flagged inappropriately");
+ "Constructor 1(LMethodFlags;.<init>) flagged inappropriately");
}
}
@@ -742,7 +742,7 @@ TEST_F(DexFileVerifierTest, MethodAccessFlagsInterfaces) {
ApplyMaskToMethodFlags(dex_file, "foo", ~kAccPublic);
},
- "Interface method 1 is not public and abstract");
+ "Interface method 1(LInterfaceMethodFlags;.foo) is not public and abstract");
VerifyModification(
kMethodFlagsInterface,
"method_flags_interface_non_abstract",
@@ -751,7 +751,7 @@ TEST_F(DexFileVerifierTest, MethodAccessFlagsInterfaces) {
ApplyMaskToMethodFlags(dex_file, "foo", ~kAccAbstract);
},
- "Method 1 has no code, but is not marked native or abstract");
+ "Method 1(LInterfaceMethodFlags;.foo) has no code, but is not marked native or abstract");
VerifyModification(
kMethodFlagsInterface,
@@ -761,7 +761,7 @@ TEST_F(DexFileVerifierTest, MethodAccessFlagsInterfaces) {
OrMaskToMethodFlags(dex_file, "foo", kAccStatic);
},
- "Direct/virtual method 1 not in expected list 0");
+ "Direct/virtual method 1(LInterfaceMethodFlags;.foo) not in expected list 0");
VerifyModification(
kMethodFlagsInterface,
"method_flags_interface_private",
@@ -771,7 +771,7 @@ TEST_F(DexFileVerifierTest, MethodAccessFlagsInterfaces) {
ApplyMaskToMethodFlags(dex_file, "foo", ~kAccPublic);
OrMaskToMethodFlags(dex_file, "foo", kAccPrivate);
},
- "Direct/virtual method 1 not in expected list 0");
+ "Direct/virtual method 1(LInterfaceMethodFlags;.foo) not in expected list 0");
VerifyModification(
kMethodFlagsInterface,
@@ -781,7 +781,7 @@ TEST_F(DexFileVerifierTest, MethodAccessFlagsInterfaces) {
ApplyMaskToMethodFlags(dex_file, "foo", ~kAccPublic);
},
- "Interface method 1 is not public and abstract");
+ "Interface method 1(LInterfaceMethodFlags;.foo) is not public and abstract");
VerifyModification(
kMethodFlagsInterface,
"method_flags_interface_protected",
@@ -791,7 +791,7 @@ TEST_F(DexFileVerifierTest, MethodAccessFlagsInterfaces) {
ApplyMaskToMethodFlags(dex_file, "foo", ~kAccPublic);
OrMaskToMethodFlags(dex_file, "foo", kAccProtected);
},
- "Interface method 1 is not public and abstract");
+ "Interface method 1(LInterfaceMethodFlags;.foo) is not public and abstract");
constexpr uint32_t kAllMethodFlags =
kAccPublic |
@@ -831,7 +831,7 @@ TEST_F(DexFileVerifierTest, MethodAccessFlagsInterfaces) {
}
OrMaskToMethodFlags(dex_file, "foo", mask);
},
- "Abstract method 1 has disallowed access flags");
+ "Abstract method 1(LInterfaceMethodFlags;.foo) has disallowed access flags");
}
}
diff --git a/runtime/dex_instruction_utils.h b/runtime/dex_instruction_utils.h
index 1ae2b1b108..2849cd8533 100644
--- a/runtime/dex_instruction_utils.h
+++ b/runtime/dex_instruction_utils.h
@@ -49,6 +49,16 @@ std::ostream& operator<<(std::ostream& os, const DexMemAccessType& type);
// NOTE: The following functions disregard quickened instructions.
+// By "direct" const we mean to exclude const-string and const-class
+// which load data from somewhere else, i.e. indirectly.
+constexpr bool IsInstructionDirectConst(Instruction::Code opcode) {
+ return Instruction::CONST_4 <= opcode && opcode <= Instruction::CONST_WIDE_HIGH16;
+}
+
+constexpr bool IsInstructionConstWide(Instruction::Code opcode) {
+ return Instruction::CONST_WIDE_16 <= opcode && opcode <= Instruction::CONST_WIDE_HIGH16;
+}
+
constexpr bool IsInstructionReturn(Instruction::Code opcode) {
return Instruction::RETURN_VOID <= opcode && opcode <= Instruction::RETURN_OBJECT;
}
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 92693395f1..0c06c386b5 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -305,12 +305,6 @@ static bool RelocateImage(const char* image_location, const char* dest_filename,
std::string output_image_filename_arg("--output-image-file=");
output_image_filename_arg += dest_filename;
- std::string input_oat_location_arg("--input-oat-location=");
- input_oat_location_arg += ImageHeader::GetOatLocationFromImageLocation(image_location);
-
- std::string output_oat_filename_arg("--output-oat-file=");
- output_oat_filename_arg += ImageHeader::GetOatLocationFromImageLocation(dest_filename);
-
std::string instruction_set_arg("--instruction-set=");
instruction_set_arg += GetInstructionSetString(isa);
@@ -324,9 +318,6 @@ static bool RelocateImage(const char* image_location, const char* dest_filename,
argv.push_back(input_image_location_arg);
argv.push_back(output_image_filename_arg);
- argv.push_back(input_oat_location_arg);
- argv.push_back(output_oat_filename_arg);
-
argv.push_back(instruction_set_arg);
argv.push_back(base_offset_arg);
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 0b2471b4c0..4fd3c78f44 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -27,6 +27,7 @@
#include "unstarted_runtime.h"
#include "mterp/mterp.h"
#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
namespace art {
namespace interpreter {
@@ -293,9 +294,10 @@ static inline JValue Execute(Thread* self, const DexFile::CodeItem* code_item,
method, 0);
}
- if (UNLIKELY(Runtime::Current()->GetJit() != nullptr &&
- Runtime::Current()->GetJit()->JitAtFirstUse() &&
- method->HasAnyCompiledCode())) {
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (UNLIKELY(jit != nullptr &&
+ jit->JitAtFirstUse() &&
+ jit->GetCodeCache()->ContainsMethod(method))) {
JValue result;
// Pop the shadow frame before calling into compiled code.
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 09d860140f..cbaa8173d2 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -20,6 +20,7 @@
#include "debugger.h"
#include "entrypoints/runtime_asm_entrypoints.h"
+#include "jit/jit.h"
#include "mirror/array-inl.h"
#include "stack.h"
#include "unstarted_runtime.h"
@@ -501,23 +502,6 @@ static inline bool DoCallCommon(ArtMethod* called_method,
uint32_t (&arg)[kVarArgMax],
uint32_t vregC) ALWAYS_INLINE;
-SHARED_REQUIRES(Locks::mutator_lock_)
-static inline bool NeedsInterpreter(Thread* self, ShadowFrame* new_shadow_frame) ALWAYS_INLINE;
-
-static inline bool NeedsInterpreter(Thread* self, ShadowFrame* new_shadow_frame) {
- ArtMethod* target = new_shadow_frame->GetMethod();
- if (UNLIKELY(target->IsNative() || target->IsProxyMethod())) {
- return false;
- }
- Runtime* runtime = Runtime::Current();
- ClassLinker* class_linker = runtime->GetClassLinker();
- return runtime->GetInstrumentation()->IsForcedInterpretOnly() ||
- // Doing this check avoids doing compiled/interpreter transitions.
- class_linker->IsQuickToInterpreterBridge(target->GetEntryPointFromQuickCompiledCode()) ||
- // Force the use of interpreter when it is required by the debugger.
- Dbg::IsForcedInterpreterNeededForCalling(self, target);
-}
-
void ArtInterpreterToCompiledCodeBridge(Thread* self,
const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame,
@@ -736,7 +720,10 @@ static inline bool DoCallCommon(ArtMethod* called_method,
// Do the call now.
if (LIKELY(Runtime::Current()->IsStarted())) {
- if (NeedsInterpreter(self, new_shadow_frame)) {
+ ArtMethod* target = new_shadow_frame->GetMethod();
+ if (ClassLinker::ShouldUseInterpreterEntrypoint(
+ target,
+ target->GetEntryPointFromQuickCompiledCode())) {
ArtInterpreterToInterpreterBridge(self, code_item, new_shadow_frame, result);
} else {
ArtInterpreterToCompiledCodeBridge(self, code_item, new_shadow_frame, result);
diff --git a/runtime/interpreter/mterp/arm/binopWide.S b/runtime/interpreter/mterp/arm/binopWide.S
index 57d43c651a..1d511ecfb0 100644
--- a/runtime/interpreter/mterp/arm/binopWide.S
+++ b/runtime/interpreter/mterp/arm/binopWide.S
@@ -16,10 +16,10 @@
*/
/* binop vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
+ mov rINST, rINST, lsr #8 @ rINST<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
- add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r9, rFP, rINST, lsl #2 @ r9<- &fp[AA]
add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
@@ -28,8 +28,8 @@
orrs ip, r2, r3 @ second arg (r2-r3) is zero?
beq common_errDivideByZero
.endif
+ CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
$preinstr @ optional op; may set condition codes
$instr @ result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
diff --git a/runtime/interpreter/mterp/arm/binopWide2addr.S b/runtime/interpreter/mterp/arm/binopWide2addr.S
index 4e855f2d16..81db48bade 100644
--- a/runtime/interpreter/mterp/arm/binopWide2addr.S
+++ b/runtime/interpreter/mterp/arm/binopWide2addr.S
@@ -15,17 +15,17 @@
*/
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ ubfx rINST, rINST, #8, #4 @ rINST<- A
add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
- add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
.if $chkzero
orrs ip, r2, r3 @ second arg (r2-r3) is zero?
beq common_errDivideByZero
.endif
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
$preinstr @ optional op; may set condition codes
$instr @ result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
diff --git a/runtime/interpreter/mterp/arm/fbinopWide.S b/runtime/interpreter/mterp/arm/fbinopWide.S
index 1bed817824..ca13bfbab6 100644
--- a/runtime/interpreter/mterp/arm/fbinopWide.S
+++ b/runtime/interpreter/mterp/arm/fbinopWide.S
@@ -14,9 +14,9 @@
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
fldd d1, [r3] @ d1<- vCC
fldd d0, [r2] @ d0<- vBB
-
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
$instr @ s2<- op
+ CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
GET_INST_OPCODE ip @ extract opcode from rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
fstd d2, [r9] @ vAA<- d2
diff --git a/runtime/interpreter/mterp/arm/fbinopWide2addr.S b/runtime/interpreter/mterp/arm/fbinopWide2addr.S
index 9f56986db8..4e7401dae3 100644
--- a/runtime/interpreter/mterp/arm/fbinopWide2addr.S
+++ b/runtime/interpreter/mterp/arm/fbinopWide2addr.S
@@ -12,10 +12,10 @@
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
and r9, r9, #15 @ r9<- A
fldd d1, [r3] @ d1<- vB
+ CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
fldd d0, [r9] @ d0<- vA
-
$instr @ d2<- op
GET_INST_OPCODE ip @ extract opcode from rINST
fstd d2, [r9] @ vAA<- d2
diff --git a/runtime/interpreter/mterp/arm/funopWider.S b/runtime/interpreter/mterp/arm/funopWider.S
index 087a1f2faf..450ba3a157 100644
--- a/runtime/interpreter/mterp/arm/funopWider.S
+++ b/runtime/interpreter/mterp/arm/funopWider.S
@@ -12,6 +12,7 @@
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
and r9, r9, #15 @ r9<- A
$instr @ d0<- op
+ CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
GET_INST_OPCODE ip @ extract opcode from rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
fstd d0, [r9] @ vA<- d0
diff --git a/runtime/interpreter/mterp/arm/header.S b/runtime/interpreter/mterp/arm/header.S
index 14319d953f..b2370bffb4 100644
--- a/runtime/interpreter/mterp/arm/header.S
+++ b/runtime/interpreter/mterp/arm/header.S
@@ -263,6 +263,19 @@ unspecified registers or condition codes.
str \reg, [rFP, \vreg, lsl #2]
str \reg, [rREFS, \vreg, lsl #2]
.endm
+.macro SET_VREG_SHADOW reg, vreg
+ str \reg, [rREFS, \vreg, lsl #2]
+.endm
+
+/*
+ * Clear the corresponding shadow regs for a vreg pair
+ */
+.macro CLEAR_SHADOW_PAIR vreg, tmp1, tmp2
+ mov \tmp1, #0
+ add \tmp2, \vreg, #1
+ SET_VREG_SHADOW \tmp1, \vreg
+ SET_VREG_SHADOW \tmp1, \tmp2
+.endm
/*
* Convert a virtual register index into an address.
diff --git a/runtime/interpreter/mterp/arm/op_aget_wide.S b/runtime/interpreter/mterp/arm/op_aget_wide.S
index caaec71d02..e1430b44f2 100644
--- a/runtime/interpreter/mterp/arm/op_aget_wide.S
+++ b/runtime/interpreter/mterp/arm/op_aget_wide.S
@@ -10,6 +10,7 @@
mov r3, r0, lsr #8 @ r3<- CC
GET_VREG r0, r2 @ r0<- vBB (array object)
GET_VREG r1, r3 @ r1<- vCC (requested index)
+ CLEAR_SHADOW_PAIR r9, r2, r3 @ Zero out the shadow regs
cmp r0, #0 @ null array object?
beq common_errNullObject @ yes, bail
ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
diff --git a/runtime/interpreter/mterp/arm/op_const_wide.S b/runtime/interpreter/mterp/arm/op_const_wide.S
index 2cdc4261e6..12394b6cbe 100644
--- a/runtime/interpreter/mterp/arm/op_const_wide.S
+++ b/runtime/interpreter/mterp/arm/op_const_wide.S
@@ -6,6 +6,7 @@
FETCH r3, 4 @ r3<- HHHH (high)
mov r9, rINST, lsr #8 @ r9<- AA
orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word)
+ CLEAR_SHADOW_PAIR r9, r2, r3 @ Zero out the shadow regs
FETCH_ADVANCE_INST 5 @ advance rPC, load rINST
add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
diff --git a/runtime/interpreter/mterp/arm/op_const_wide_16.S b/runtime/interpreter/mterp/arm/op_const_wide_16.S
index 56bfc17a91..3811d8641b 100644
--- a/runtime/interpreter/mterp/arm/op_const_wide_16.S
+++ b/runtime/interpreter/mterp/arm/op_const_wide_16.S
@@ -3,6 +3,7 @@
mov r3, rINST, lsr #8 @ r3<- AA
mov r1, r0, asr #31 @ r1<- ssssssss
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ CLEAR_SHADOW_PAIR r3, r2, lr @ Zero out the shadow regs
add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r3, {r0-r1} @ vAA<- r0/r1
diff --git a/runtime/interpreter/mterp/arm/op_const_wide_32.S b/runtime/interpreter/mterp/arm/op_const_wide_32.S
index 36d4628502..0b6f1cc384 100644
--- a/runtime/interpreter/mterp/arm/op_const_wide_32.S
+++ b/runtime/interpreter/mterp/arm/op_const_wide_32.S
@@ -4,6 +4,7 @@
FETCH_S r2, 2 @ r2<- ssssBBBB (high)
FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb
+ CLEAR_SHADOW_PAIR r3, r2, lr @ Zero out the shadow regs
add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
mov r1, r0, asr #31 @ r1<- ssssssss
GET_INST_OPCODE ip @ extract opcode from rINST
diff --git a/runtime/interpreter/mterp/arm/op_const_wide_high16.S b/runtime/interpreter/mterp/arm/op_const_wide_high16.S
index bee592d16b..b9796eb561 100644
--- a/runtime/interpreter/mterp/arm/op_const_wide_high16.S
+++ b/runtime/interpreter/mterp/arm/op_const_wide_high16.S
@@ -4,6 +4,7 @@
mov r0, #0 @ r0<- 00000000
mov r1, r1, lsl #16 @ r1<- BBBB0000
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ CLEAR_SHADOW_PAIR r3, r0, r2 @ Zero shadow regs
add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r3, {r0-r1} @ vAA<- r0/r1
diff --git a/runtime/interpreter/mterp/arm/op_iget_wide.S b/runtime/interpreter/mterp/arm/op_iget_wide.S
index f8d2f41dea..859ffac038 100644
--- a/runtime/interpreter/mterp/arm/op_iget_wide.S
+++ b/runtime/interpreter/mterp/arm/op_iget_wide.S
@@ -15,8 +15,9 @@
PREFETCH_INST 2
cmp r3, #0
bne MterpException @ bail out
- add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
- stmia r3, {r0-r1} @ fp[A]<- r0/r1
+ CLEAR_SHADOW_PAIR r2, ip, lr @ Zero out the shadow regs
+ add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
+ stmia r3, {r0-r1} @ fp[A]<- r0/r1
ADVANCE 2
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_iget_wide_quick.S b/runtime/interpreter/mterp/arm/op_iget_wide_quick.S
index 4d6976e085..07f854adf4 100644
--- a/runtime/interpreter/mterp/arm/op_iget_wide_quick.S
+++ b/runtime/interpreter/mterp/arm/op_iget_wide_quick.S
@@ -8,6 +8,7 @@
ldrd r0, [r3, ip] @ r0<- obj.field (64 bits, aligned)
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
+ CLEAR_SHADOW_PAIR r2, ip, lr @ Zero out the shadow regs
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r3, {r0-r1} @ fp[A]<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_result_wide.S b/runtime/interpreter/mterp/arm/op_move_result_wide.S
index c64103c391..1845ccf69f 100644
--- a/runtime/interpreter/mterp/arm/op_move_result_wide.S
+++ b/runtime/interpreter/mterp/arm/op_move_result_wide.S
@@ -1,8 +1,9 @@
/* move-result-wide vAA */
- mov r2, rINST, lsr #8 @ r2<- AA
+ mov rINST, rINST, lsr #8 @ rINST<- AA
ldr r3, [rFP, #OFF_FP_RESULT_REGISTER]
- add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ add r2, rFP, rINST, lsl #2 @ r2<- &fp[AA]
ldmia r3, {r0-r1} @ r0/r1<- retval.j
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
stmia r2, {r0-r1} @ fp[AA]<- r0/r1
GET_INST_OPCODE ip @ extract opcode from rINST
diff --git a/runtime/interpreter/mterp/arm/op_move_wide.S b/runtime/interpreter/mterp/arm/op_move_wide.S
index 1345b95fa7..f5d156d732 100644
--- a/runtime/interpreter/mterp/arm/op_move_wide.S
+++ b/runtime/interpreter/mterp/arm/op_move_wide.S
@@ -1,10 +1,11 @@
/* move-wide vA, vB */
/* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r2, rINST, #8, #4 @ r2<- A
+ ubfx rINST, rINST, #8, #4 @ rINST<- A
add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
- add r2, rFP, r2, lsl #2 @ r2<- &fp[A]
+ add r2, rFP, rINST, lsl #2 @ r2<- &fp[A]
ldmia r3, {r0-r1} @ r0/r1<- fp[B]
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r2, {r0-r1} @ fp[A]<- r0/r1
diff --git a/runtime/interpreter/mterp/arm/op_move_wide_16.S b/runtime/interpreter/mterp/arm/op_move_wide_16.S
index 133a4c36ba..8a55c4b13b 100644
--- a/runtime/interpreter/mterp/arm/op_move_wide_16.S
+++ b/runtime/interpreter/mterp/arm/op_move_wide_16.S
@@ -3,9 +3,10 @@
FETCH r3, 2 @ r3<- BBBB
FETCH r2, 1 @ r2<- AAAA
add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB]
- add r2, rFP, r2, lsl #2 @ r2<- &fp[AAAA]
+ add lr, rFP, r2, lsl #2 @ r2<- &fp[AAAA]
ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- stmia r2, {r0-r1} @ fp[AAAA]<- r0/r1
+ CLEAR_SHADOW_PAIR r2, r3, ip @ Zero out the shadow regs
+ stmia lr, {r0-r1} @ fp[AAAA]<- r0/r1
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/op_move_wide_from16.S b/runtime/interpreter/mterp/arm/op_move_wide_from16.S
index f2ae785032..b65259db50 100644
--- a/runtime/interpreter/mterp/arm/op_move_wide_from16.S
+++ b/runtime/interpreter/mterp/arm/op_move_wide_from16.S
@@ -1,10 +1,11 @@
/* move-wide/from16 vAA, vBBBB */
/* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
FETCH r3, 1 @ r3<- BBBB
- mov r2, rINST, lsr #8 @ r2<- AA
+ mov rINST, rINST, lsr #8 @ rINST<- AA
add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB]
- add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ add r2, rFP, rINST, lsl #2 @ r2<- &fp[AA]
ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r2, {r0-r1} @ fp[AA]<- r0/r1
diff --git a/runtime/interpreter/mterp/arm/op_sget_wide.S b/runtime/interpreter/mterp/arm/op_sget_wide.S
index 97db05f596..3a5090866a 100644
--- a/runtime/interpreter/mterp/arm/op_sget_wide.S
+++ b/runtime/interpreter/mterp/arm/op_sget_wide.S
@@ -12,10 +12,11 @@
bl artGet64StaticFromCode
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r9, rINST, lsr #8 @ r9<- AA
- add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add lr, rFP, r9, lsl #2 @ r9<- &fp[AA]
cmp r3, #0 @ Fail to resolve?
bne MterpException @ bail out
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ CLEAR_SHADOW_PAIR r9, r2, ip @ Zero out the shadow regs
+ stmia lr, {r0-r1} @ vAA/vAA+1<- r0/r1
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/unopWide.S b/runtime/interpreter/mterp/arm/unopWide.S
index 7b8739cb92..a07423468d 100644
--- a/runtime/interpreter/mterp/arm/unopWide.S
+++ b/runtime/interpreter/mterp/arm/unopWide.S
@@ -8,10 +8,11 @@
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ ubfx rINST, rINST, #8, #4 @ rINST<- A
add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
- add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
ldmia r3, {r0-r1} @ r0/r1<- vAA
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$preinstr @ optional op; may set condition codes
$instr @ r0/r1<- op, r2-r3 changed
diff --git a/runtime/interpreter/mterp/arm/unopWider.S b/runtime/interpreter/mterp/arm/unopWider.S
index 657a3956e5..23b6b9d2f5 100644
--- a/runtime/interpreter/mterp/arm/unopWider.S
+++ b/runtime/interpreter/mterp/arm/unopWider.S
@@ -8,10 +8,11 @@
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ ubfx rINST, rINST, #8, #4 @ rINST<- A
GET_VREG r0, r3 @ r0<- vB
- add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
$preinstr @ optional op; may set condition codes
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$instr @ r0<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
diff --git a/runtime/interpreter/mterp/config_x86 b/runtime/interpreter/mterp/config_x86
index 5fab379f8d..f1501e11d4 100644
--- a/runtime/interpreter/mterp/config_x86
+++ b/runtime/interpreter/mterp/config_x86
@@ -19,6 +19,10 @@
handler-style computed-goto
handler-size 128
+function-type-format FUNCTION_TYPE(%s)
+function-size-format SIZE(%s,%s)
+global-name-format SYMBOL(%s)
+
# source for alternate entry stub
asm-alt-stub x86/alt_stub.S
diff --git a/runtime/interpreter/mterp/gen_mterp.py b/runtime/interpreter/mterp/gen_mterp.py
index f56d8bddaf..5839b5fc97 100755
--- a/runtime/interpreter/mterp/gen_mterp.py
+++ b/runtime/interpreter/mterp/gen_mterp.py
@@ -41,6 +41,9 @@ label_prefix = ".L" # use ".L" to hide labels from gdb
alt_label_prefix = ".L_ALT" # use ".L" to hide labels from gdb
style = None # interpreter style
generate_alt_table = False
+function_type_format = ".type %s, %%function"
+function_size_format = ".size %s, .-%s"
+global_name_format = "%s"
# Exception class.
class DataParseError(SyntaxError):
@@ -147,7 +150,24 @@ def setAsmAltStub(tokens):
raise DataParseError("import requires one argument")
default_alt_stub = tokens[1]
generate_alt_table = True
-
+#
+# Change the default function type format
+#
+def setFunctionTypeFormat(tokens):
+ global function_type_format
+ function_type_format = tokens[1]
+#
+# Change the default function size format
+#
+def setFunctionSizeFormat(tokens):
+ global function_size_format
+ function_size_format = tokens[1]
+#
+# Change the global name format
+#
+def setGlobalNameFormat(tokens):
+ global global_name_format
+ global_name_format = tokens[1]
#
# Parse arch config file --
# Start of opcode list.
@@ -259,12 +279,12 @@ def loadAndEmitOpcodes():
sister_list = []
assert len(opcodes) == kNumPackedOpcodes
need_dummy_start = False
- start_label = "artMterpAsmInstructionStart"
- end_label = "artMterpAsmInstructionEnd"
+ start_label = global_name_format % "artMterpAsmInstructionStart"
+ end_label = global_name_format % "artMterpAsmInstructionEnd"
# point MterpAsmInstructionStart at the first handler or stub
asm_fp.write("\n .global %s\n" % start_label)
- asm_fp.write(" .type %s, %%function\n" % start_label)
+ asm_fp.write(" " + (function_type_format % start_label) + "\n");
asm_fp.write("%s = " % start_label + label_prefix + "_op_nop\n")
asm_fp.write(" .text\n\n")
@@ -290,21 +310,23 @@ def loadAndEmitOpcodes():
asm_fp.write(label_prefix + "_op_nop: /* dummy */\n");
emitAlign()
- asm_fp.write(" .size %s, .-%s\n" % (start_label, start_label))
+ asm_fp.write(" " + (function_size_format % (start_label, start_label)) + "\n")
asm_fp.write(" .global %s\n" % end_label)
asm_fp.write("%s:\n" % end_label)
if style == "computed-goto":
+ start_sister_label = global_name_format % "artMterpAsmSisterStart"
+ end_sister_label = global_name_format % "artMterpAsmSisterEnd"
emitSectionComment("Sister implementations", asm_fp)
- asm_fp.write(" .global artMterpAsmSisterStart\n")
- asm_fp.write(" .type artMterpAsmSisterStart, %function\n")
+ asm_fp.write(" .global %s\n" % start_sister_label)
+ asm_fp.write(" " + (function_type_format % start_sister_label) + "\n");
asm_fp.write(" .text\n")
asm_fp.write(" .balign 4\n")
- asm_fp.write("artMterpAsmSisterStart:\n")
+ asm_fp.write("%s:\n" % start_sister_label)
asm_fp.writelines(sister_list)
- asm_fp.write("\n .size artMterpAsmSisterStart, .-artMterpAsmSisterStart\n")
- asm_fp.write(" .global artMterpAsmSisterEnd\n")
- asm_fp.write("artMterpAsmSisterEnd:\n\n")
+ asm_fp.write("\n " + (function_size_format % (start_sister_label, start_sister_label)) + "\n")
+ asm_fp.write(" .global %s\n" % end_sister_label)
+ asm_fp.write("%s:\n\n" % end_sister_label)
#
# Load an alternate entry stub
@@ -324,12 +346,12 @@ def loadAndEmitAltStub(source, opindex):
#
def loadAndEmitAltOpcodes():
assert len(opcodes) == kNumPackedOpcodes
- start_label = "artMterpAsmAltInstructionStart"
- end_label = "artMterpAsmAltInstructionEnd"
+ start_label = global_name_format % "artMterpAsmAltInstructionStart"
+ end_label = global_name_format % "artMterpAsmAltInstructionEnd"
# point MterpAsmInstructionStart at the first handler or stub
asm_fp.write("\n .global %s\n" % start_label)
- asm_fp.write(" .type %s, %%function\n" % start_label)
+ asm_fp.write(" " + (function_type_format % start_label) + "\n");
asm_fp.write(" .text\n\n")
asm_fp.write("%s = " % start_label + label_prefix + "_ALT_op_nop\n")
@@ -342,7 +364,7 @@ def loadAndEmitAltOpcodes():
loadAndEmitAltStub(source, i)
emitAlign()
- asm_fp.write(" .size %s, .-%s\n" % (start_label, start_label))
+ asm_fp.write(" " + (function_size_format % (start_label, start_label)) + "\n")
asm_fp.write(" .global %s\n" % end_label)
asm_fp.write("%s:\n" % end_label)
@@ -579,6 +601,12 @@ try:
splitops = True
elif tokens[0] == "fallback-stub":
setFallbackStub(tokens)
+ elif tokens[0] == "function-type-format":
+ setFunctionTypeFormat(tokens)
+ elif tokens[0] == "function-size-format":
+ setFunctionSizeFormat(tokens)
+ elif tokens[0] == "global-name-format":
+ setGlobalNameFormat(tokens)
else:
raise DataParseError, "unrecognized command '%s'" % tokens[0]
if style == None:
diff --git a/runtime/interpreter/mterp/out/mterp_arm.S b/runtime/interpreter/mterp/out/mterp_arm.S
index 78c784b773..ee195598db 100644
--- a/runtime/interpreter/mterp/out/mterp_arm.S
+++ b/runtime/interpreter/mterp/out/mterp_arm.S
@@ -270,6 +270,19 @@ unspecified registers or condition codes.
str \reg, [rFP, \vreg, lsl #2]
str \reg, [rREFS, \vreg, lsl #2]
.endm
+.macro SET_VREG_SHADOW reg, vreg
+ str \reg, [rREFS, \vreg, lsl #2]
+.endm
+
+/*
+ * Clear the corresponding shadow regs for a vreg pair
+ */
+.macro CLEAR_SHADOW_PAIR vreg, tmp1, tmp2
+ mov \tmp1, #0
+ add \tmp2, \vreg, #1
+ SET_VREG_SHADOW \tmp1, \vreg
+ SET_VREG_SHADOW \tmp1, \tmp2
+.endm
/*
* Convert a virtual register index into an address.
@@ -426,10 +439,11 @@ artMterpAsmInstructionStart = .L_op_nop
/* move-wide vA, vB */
/* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r2, rINST, #8, #4 @ r2<- A
+ ubfx rINST, rINST, #8, #4 @ rINST<- A
add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
- add r2, rFP, r2, lsl #2 @ r2<- &fp[A]
+ add r2, rFP, rINST, lsl #2 @ r2<- &fp[A]
ldmia r3, {r0-r1} @ r0/r1<- fp[B]
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r2, {r0-r1} @ fp[A]<- r0/r1
@@ -442,10 +456,11 @@ artMterpAsmInstructionStart = .L_op_nop
/* move-wide/from16 vAA, vBBBB */
/* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
FETCH r3, 1 @ r3<- BBBB
- mov r2, rINST, lsr #8 @ r2<- AA
+ mov rINST, rINST, lsr #8 @ rINST<- AA
add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB]
- add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ add r2, rFP, rINST, lsl #2 @ r2<- &fp[AA]
ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r2, {r0-r1} @ fp[AA]<- r0/r1
@@ -460,10 +475,11 @@ artMterpAsmInstructionStart = .L_op_nop
FETCH r3, 2 @ r3<- BBBB
FETCH r2, 1 @ r2<- AAAA
add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB]
- add r2, rFP, r2, lsl #2 @ r2<- &fp[AAAA]
+ add lr, rFP, r2, lsl #2 @ r2<- &fp[AAAA]
ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
- stmia r2, {r0-r1} @ fp[AAAA]<- r0/r1
+ CLEAR_SHADOW_PAIR r2, r3, ip @ Zero out the shadow regs
+ stmia lr, {r0-r1} @ fp[AAAA]<- r0/r1
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
@@ -550,10 +566,11 @@ artMterpAsmInstructionStart = .L_op_nop
.L_op_move_result_wide: /* 0x0b */
/* File: arm/op_move_result_wide.S */
/* move-result-wide vAA */
- mov r2, rINST, lsr #8 @ r2<- AA
+ mov rINST, rINST, lsr #8 @ rINST<- AA
ldr r3, [rFP, #OFF_FP_RESULT_REGISTER]
- add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ add r2, rFP, rINST, lsl #2 @ r2<- &fp[AA]
ldmia r3, {r0-r1} @ r0/r1<- retval.j
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
stmia r2, {r0-r1} @ fp[AA]<- r0/r1
GET_INST_OPCODE ip @ extract opcode from rINST
@@ -731,6 +748,7 @@ artMterpAsmInstructionStart = .L_op_nop
mov r3, rINST, lsr #8 @ r3<- AA
mov r1, r0, asr #31 @ r1<- ssssssss
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ CLEAR_SHADOW_PAIR r3, r2, lr @ Zero out the shadow regs
add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r3, {r0-r1} @ vAA<- r0/r1
@@ -746,6 +764,7 @@ artMterpAsmInstructionStart = .L_op_nop
FETCH_S r2, 2 @ r2<- ssssBBBB (high)
FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb
+ CLEAR_SHADOW_PAIR r3, r2, lr @ Zero out the shadow regs
add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
mov r1, r0, asr #31 @ r1<- ssssssss
GET_INST_OPCODE ip @ extract opcode from rINST
@@ -764,6 +783,7 @@ artMterpAsmInstructionStart = .L_op_nop
FETCH r3, 4 @ r3<- HHHH (high)
mov r9, rINST, lsr #8 @ r9<- AA
orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word)
+ CLEAR_SHADOW_PAIR r9, r2, r3 @ Zero out the shadow regs
FETCH_ADVANCE_INST 5 @ advance rPC, load rINST
add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
@@ -780,6 +800,7 @@ artMterpAsmInstructionStart = .L_op_nop
mov r0, #0 @ r0<- 00000000
mov r1, r1, lsl #16 @ r1<- BBBB0000
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
+ CLEAR_SHADOW_PAIR r3, r0, r2 @ Zero shadow regs
add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r3, {r0-r1} @ vAA<- r0/r1
@@ -2068,6 +2089,7 @@ artMterpAsmInstructionStart = .L_op_nop
mov r3, r0, lsr #8 @ r3<- CC
GET_VREG r0, r2 @ r0<- vBB (array object)
GET_VREG r1, r3 @ r1<- vCC (requested index)
+ CLEAR_SHADOW_PAIR r9, r2, r3 @ Zero out the shadow regs
cmp r0, #0 @ null array object?
beq common_errNullObject @ yes, bail
ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
@@ -2519,8 +2541,9 @@ artMterpAsmInstructionStart = .L_op_nop
PREFETCH_INST 2
cmp r3, #0
bne MterpException @ bail out
- add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
- stmia r3, {r0-r1} @ fp[A]<- r0/r1
+ CLEAR_SHADOW_PAIR r2, ip, lr @ Zero out the shadow regs
+ add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
+ stmia r3, {r0-r1} @ fp[A]<- r0/r1
ADVANCE 2
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
@@ -2909,11 +2932,12 @@ artMterpAsmInstructionStart = .L_op_nop
bl artGet64StaticFromCode
ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET]
mov r9, rINST, lsr #8 @ r9<- AA
- add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add lr, rFP, r9, lsl #2 @ r9<- &fp[AA]
cmp r3, #0 @ Fail to resolve?
bne MterpException @ bail out
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ CLEAR_SHADOW_PAIR r9, r2, ip @ Zero out the shadow regs
+ stmia lr, {r0-r1} @ vAA/vAA+1<- r0/r1
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
@@ -3622,10 +3646,11 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ ubfx rINST, rINST, #8, #4 @ rINST<- A
add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
- add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
ldmia r3, {r0-r1} @ r0/r1<- vAA
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
rsbs r0, r0, #0 @ optional op; may set condition codes
rsc r1, r1, #0 @ r0/r1<- op, r2-r3 changed
@@ -3649,10 +3674,11 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ ubfx rINST, rINST, #8, #4 @ rINST<- A
add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
- add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
ldmia r3, {r0-r1} @ r0/r1<- vAA
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
mvn r0, r0 @ optional op; may set condition codes
mvn r1, r1 @ r0/r1<- op, r2-r3 changed
@@ -3702,10 +3728,11 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ ubfx rINST, rINST, #8, #4 @ rINST<- A
add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
- add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
ldmia r3, {r0-r1} @ r0/r1<- vAA
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
@ optional op; may set condition codes
add r1, r1, #0x80000000 @ r0/r1<- op, r2-r3 changed
@@ -3729,10 +3756,11 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ ubfx rINST, rINST, #8, #4 @ rINST<- A
GET_VREG r0, r3 @ r0<- vB
- add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
@ optional op; may set condition codes
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
mov r1, r0, asr #31 @ r0<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
@@ -3785,6 +3813,7 @@ artMterpAsmInstructionStart = .L_op_nop
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
and r9, r9, #15 @ r9<- A
fsitod d0, s0 @ d0<- op
+ CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
GET_INST_OPCODE ip @ extract opcode from rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
fstd d0, [r9] @ vA<- d0
@@ -3912,10 +3941,11 @@ constvalop_long_to_double:
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ ubfx rINST, rINST, #8, #4 @ rINST<- A
GET_VREG r0, r3 @ r0<- vB
- add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
@ optional op; may set condition codes
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
bl f2l_doconv @ r0<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
@@ -3944,6 +3974,7 @@ constvalop_long_to_double:
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
and r9, r9, #15 @ r9<- A
fcvtds d0, s0 @ d0<- op
+ CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
GET_INST_OPCODE ip @ extract opcode from rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
fstd d0, [r9] @ vA<- d0
@@ -3990,10 +4021,11 @@ constvalop_long_to_double:
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ ubfx rINST, rINST, #8, #4 @ rINST<- A
add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
- add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
ldmia r3, {r0-r1} @ r0/r1<- vAA
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
@ optional op; may set condition codes
bl d2l_doconv @ r0/r1<- op, r2-r3 changed
@@ -4570,10 +4602,10 @@ constvalop_long_to_double:
*/
/* binop vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
+ mov rINST, rINST, lsr #8 @ rINST<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
- add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r9, rFP, rINST, lsl #2 @ r9<- &fp[AA]
add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
@@ -4582,8 +4614,8 @@ constvalop_long_to_double:
orrs ip, r2, r3 @ second arg (r2-r3) is zero?
beq common_errDivideByZero
.endif
+ CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
adds r0, r0, r2 @ optional op; may set condition codes
adc r1, r1, r3 @ result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
@@ -4614,10 +4646,10 @@ constvalop_long_to_double:
*/
/* binop vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
+ mov rINST, rINST, lsr #8 @ rINST<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
- add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r9, rFP, rINST, lsl #2 @ r9<- &fp[AA]
add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
@@ -4626,8 +4658,8 @@ constvalop_long_to_double:
orrs ip, r2, r3 @ second arg (r2-r3) is zero?
beq common_errDivideByZero
.endif
+ CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
subs r0, r0, r2 @ optional op; may set condition codes
sbc r1, r1, r3 @ result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
@@ -4699,10 +4731,10 @@ constvalop_long_to_double:
*/
/* binop vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
+ mov rINST, rINST, lsr #8 @ rINST<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
- add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r9, rFP, rINST, lsl #2 @ r9<- &fp[AA]
add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
@@ -4711,8 +4743,8 @@ constvalop_long_to_double:
orrs ip, r2, r3 @ second arg (r2-r3) is zero?
beq common_errDivideByZero
.endif
+ CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
@ optional op; may set condition codes
bl __aeabi_ldivmod @ result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
@@ -4744,10 +4776,10 @@ constvalop_long_to_double:
*/
/* binop vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
+ mov rINST, rINST, lsr #8 @ rINST<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
- add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r9, rFP, rINST, lsl #2 @ r9<- &fp[AA]
add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
@@ -4756,8 +4788,8 @@ constvalop_long_to_double:
orrs ip, r2, r3 @ second arg (r2-r3) is zero?
beq common_errDivideByZero
.endif
+ CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
@ optional op; may set condition codes
bl __aeabi_ldivmod @ result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
@@ -4788,10 +4820,10 @@ constvalop_long_to_double:
*/
/* binop vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
+ mov rINST, rINST, lsr #8 @ rINST<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
- add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r9, rFP, rINST, lsl #2 @ r9<- &fp[AA]
add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
@@ -4800,8 +4832,8 @@ constvalop_long_to_double:
orrs ip, r2, r3 @ second arg (r2-r3) is zero?
beq common_errDivideByZero
.endif
+ CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
and r0, r0, r2 @ optional op; may set condition codes
and r1, r1, r3 @ result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
@@ -4832,10 +4864,10 @@ constvalop_long_to_double:
*/
/* binop vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
+ mov rINST, rINST, lsr #8 @ rINST<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
- add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r9, rFP, rINST, lsl #2 @ r9<- &fp[AA]
add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
@@ -4844,8 +4876,8 @@ constvalop_long_to_double:
orrs ip, r2, r3 @ second arg (r2-r3) is zero?
beq common_errDivideByZero
.endif
+ CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
orr r0, r0, r2 @ optional op; may set condition codes
orr r1, r1, r3 @ result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
@@ -4876,10 +4908,10 @@ constvalop_long_to_double:
*/
/* binop vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
+ mov rINST, rINST, lsr #8 @ rINST<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
- add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r9, rFP, rINST, lsl #2 @ r9<- &fp[AA]
add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
@@ -4888,8 +4920,8 @@ constvalop_long_to_double:
orrs ip, r2, r3 @ second arg (r2-r3) is zero?
beq common_errDivideByZero
.endif
+ CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
eor r0, r0, r2 @ optional op; may set condition codes
eor r1, r1, r3 @ result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
@@ -5177,9 +5209,9 @@ constvalop_long_to_double:
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
fldd d1, [r3] @ d1<- vCC
fldd d0, [r2] @ d0<- vBB
-
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
faddd d2, d0, d1 @ s2<- op
+ CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
GET_INST_OPCODE ip @ extract opcode from rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
fstd d2, [r9] @ vAA<- d2
@@ -5207,9 +5239,9 @@ constvalop_long_to_double:
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
fldd d1, [r3] @ d1<- vCC
fldd d0, [r2] @ d0<- vBB
-
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
fsubd d2, d0, d1 @ s2<- op
+ CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
GET_INST_OPCODE ip @ extract opcode from rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
fstd d2, [r9] @ vAA<- d2
@@ -5237,9 +5269,9 @@ constvalop_long_to_double:
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
fldd d1, [r3] @ d1<- vCC
fldd d0, [r2] @ d0<- vBB
-
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
fmuld d2, d0, d1 @ s2<- op
+ CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
GET_INST_OPCODE ip @ extract opcode from rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
fstd d2, [r9] @ vAA<- d2
@@ -5267,9 +5299,9 @@ constvalop_long_to_double:
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
fldd d1, [r3] @ d1<- vCC
fldd d0, [r2] @ d0<- vBB
-
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
fdivd d2, d0, d1 @ s2<- op
+ CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
GET_INST_OPCODE ip @ extract opcode from rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
fstd d2, [r9] @ vAA<- d2
@@ -5299,10 +5331,10 @@ constvalop_long_to_double:
*/
/* binop vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
- mov r9, rINST, lsr #8 @ r9<- AA
+ mov rINST, rINST, lsr #8 @ rINST<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
- add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r9, rFP, rINST, lsl #2 @ r9<- &fp[AA]
add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
@@ -5311,8 +5343,8 @@ constvalop_long_to_double:
orrs ip, r2, r3 @ second arg (r2-r3) is zero?
beq common_errDivideByZero
.endif
+ CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
-
@ optional op; may set condition codes
bl fmod @ result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
@@ -5754,17 +5786,17 @@ constvalop_long_to_double:
*/
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ ubfx rINST, rINST, #8, #4 @ rINST<- A
add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
- add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
.if 0
orrs ip, r2, r3 @ second arg (r2-r3) is zero?
beq common_errDivideByZero
.endif
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
adds r0, r0, r2 @ optional op; may set condition codes
adc r1, r1, r3 @ result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
@@ -5794,17 +5826,17 @@ constvalop_long_to_double:
*/
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ ubfx rINST, rINST, #8, #4 @ rINST<- A
add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
- add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
.if 0
orrs ip, r2, r3 @ second arg (r2-r3) is zero?
beq common_errDivideByZero
.endif
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
subs r0, r0, r2 @ optional op; may set condition codes
sbc r1, r1, r3 @ result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
@@ -5863,17 +5895,17 @@ constvalop_long_to_double:
*/
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ ubfx rINST, rINST, #8, #4 @ rINST<- A
add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
- add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
.if 1
orrs ip, r2, r3 @ second arg (r2-r3) is zero?
beq common_errDivideByZero
.endif
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
@ optional op; may set condition codes
bl __aeabi_ldivmod @ result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
@@ -5904,17 +5936,17 @@ constvalop_long_to_double:
*/
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ ubfx rINST, rINST, #8, #4 @ rINST<- A
add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
- add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
.if 1
orrs ip, r2, r3 @ second arg (r2-r3) is zero?
beq common_errDivideByZero
.endif
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
@ optional op; may set condition codes
bl __aeabi_ldivmod @ result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
@@ -5944,17 +5976,17 @@ constvalop_long_to_double:
*/
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ ubfx rINST, rINST, #8, #4 @ rINST<- A
add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
- add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
.if 0
orrs ip, r2, r3 @ second arg (r2-r3) is zero?
beq common_errDivideByZero
.endif
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
and r0, r0, r2 @ optional op; may set condition codes
and r1, r1, r3 @ result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
@@ -5984,17 +6016,17 @@ constvalop_long_to_double:
*/
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ ubfx rINST, rINST, #8, #4 @ rINST<- A
add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
- add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
.if 0
orrs ip, r2, r3 @ second arg (r2-r3) is zero?
beq common_errDivideByZero
.endif
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
orr r0, r0, r2 @ optional op; may set condition codes
orr r1, r1, r3 @ result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
@@ -6024,17 +6056,17 @@ constvalop_long_to_double:
*/
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ ubfx rINST, rINST, #8, #4 @ rINST<- A
add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
- add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
.if 0
orrs ip, r2, r3 @ second arg (r2-r3) is zero?
beq common_errDivideByZero
.endif
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
eor r0, r0, r2 @ optional op; may set condition codes
eor r1, r1, r3 @ result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
@@ -6294,10 +6326,10 @@ constvalop_long_to_double:
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
and r9, r9, #15 @ r9<- A
fldd d1, [r3] @ d1<- vB
+ CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
fldd d0, [r9] @ d0<- vA
-
faddd d2, d0, d1 @ d2<- op
GET_INST_OPCODE ip @ extract opcode from rINST
fstd d2, [r9] @ vAA<- d2
@@ -6323,10 +6355,10 @@ constvalop_long_to_double:
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
and r9, r9, #15 @ r9<- A
fldd d1, [r3] @ d1<- vB
+ CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
fldd d0, [r9] @ d0<- vA
-
fsubd d2, d0, d1 @ d2<- op
GET_INST_OPCODE ip @ extract opcode from rINST
fstd d2, [r9] @ vAA<- d2
@@ -6352,10 +6384,10 @@ constvalop_long_to_double:
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
and r9, r9, #15 @ r9<- A
fldd d1, [r3] @ d1<- vB
+ CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
fldd d0, [r9] @ d0<- vA
-
fmuld d2, d0, d1 @ d2<- op
GET_INST_OPCODE ip @ extract opcode from rINST
fstd d2, [r9] @ vAA<- d2
@@ -6381,10 +6413,10 @@ constvalop_long_to_double:
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
and r9, r9, #15 @ r9<- A
fldd d1, [r3] @ d1<- vB
+ CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
fldd d0, [r9] @ d0<- vA
-
fdivd d2, d0, d1 @ d2<- op
GET_INST_OPCODE ip @ extract opcode from rINST
fstd d2, [r9] @ vAA<- d2
@@ -6413,17 +6445,17 @@ constvalop_long_to_double:
*/
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
- ubfx r9, rINST, #8, #4 @ r9<- A
+ ubfx rINST, rINST, #8, #4 @ rINST<- A
add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
- add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ add r9, rFP, rINST, lsl #2 @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
.if 0
orrs ip, r2, r3 @ second arg (r2-r3) is zero?
beq common_errDivideByZero
.endif
+ CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
-
@ optional op; may set condition codes
bl fmod @ result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
@@ -7155,6 +7187,7 @@ constvalop_long_to_double:
ldrd r0, [r3, ip] @ r0<- obj.field (64 bits, aligned)
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
+ CLEAR_SHADOW_PAIR r2, ip, lr @ Zero out the shadow regs
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r3, {r0-r1} @ fp[A]<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/out/mterp_x86.S b/runtime/interpreter/mterp/out/mterp_x86.S
index e2918dcfb2..96229ceba0 100644
--- a/runtime/interpreter/mterp/out/mterp_x86.S
+++ b/runtime/interpreter/mterp/out/mterp_x86.S
@@ -96,6 +96,22 @@ unspecified registers or condition codes.
*/
#include "asm_support.h"
+/*
+ * Handle mac compiler specific
+ */
+#if defined(__APPLE__)
+ #define MACRO_LITERAL(value) $(value)
+ #define FUNCTION_TYPE(name)
+ #define SIZE(start,end)
+ // Mac OS' symbols have an _ prefix.
+ #define SYMBOL(name) _ ## name
+#else
+ #define MACRO_LITERAL(value) $value
+ #define FUNCTION_TYPE(name) .type name, @function
+ #define SIZE(start,end) .size start, .-end
+ #define SYMBOL(name) name
+#endif
+
/* Frame size must be 16-byte aligned.
* Remember about 4 bytes for return address
*/
@@ -199,7 +215,7 @@ unspecified registers or condition codes.
*/
.macro REFRESH_INST _opnum
movb rINSTbl, rINSTbh
- movb $\_opnum, rINSTbl
+ movb MACRO_LITERAL(\_opnum), rINSTbl
.endm
/*
@@ -215,7 +231,7 @@ unspecified registers or condition codes.
.macro GOTO_NEXT
movzx rINSTbl,%eax
movzbl rINSTbh,rINST
- shll $7, %eax
+ shll MACRO_LITERAL(7), %eax
addl rIBASE, %eax
jmp *%eax
.endm
@@ -255,7 +271,7 @@ unspecified registers or condition codes.
.macro SET_VREG _reg _vreg
movl \_reg, (rFP,\_vreg,4)
- movl $0, (rREFS,\_vreg,4)
+ movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
.endm
/* Write wide value from xmm. xmm is clobbered. */
@@ -276,16 +292,16 @@ unspecified registers or condition codes.
.macro SET_VREG_HIGH _reg _vreg
movl \_reg, 4(rFP,\_vreg,4)
- movl $0, 4(rREFS,\_vreg,4)
+ movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
.endm
.macro CLEAR_REF _vreg
- movl $0, (rREFS,\_vreg,4)
+ movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
.endm
.macro CLEAR_WIDE_REF _vreg
- movl $0, (rREFS,\_vreg,4)
- movl $0, 4(rREFS,\_vreg,4)
+ movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
+ movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
.endm
/* File: x86/entry.S */
@@ -309,8 +325,8 @@ unspecified registers or condition codes.
*/
.text
- .global ExecuteMterpImpl
- .type ExecuteMterpImpl, %function
+ .global SYMBOL(ExecuteMterpImpl)
+ FUNCTION_TYPE(ExecuteMterpImpl)
/*
* On entry:
@@ -321,7 +337,7 @@ unspecified registers or condition codes.
*
*/
-ExecuteMterpImpl:
+SYMBOL(ExecuteMterpImpl):
.cfi_startproc
/* Allocate frame */
subl $FRAME_SIZE, %esp
@@ -362,9 +378,9 @@ ExecuteMterpImpl:
/* NOTE: no fallthrough */
- .global artMterpAsmInstructionStart
- .type artMterpAsmInstructionStart, %function
-artMterpAsmInstructionStart = .L_op_nop
+ .global SYMBOL(artMterpAsmInstructionStart)
+ FUNCTION_TYPE(SYMBOL(artMterpAsmInstructionStart))
+SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
.text
/* ------------------------------ */
@@ -382,11 +398,11 @@ artMterpAsmInstructionStart = .L_op_nop
movzbl rINSTbl, %eax # eax <- BA
andb $0xf, %al # eax <- A
shrl $4, rINST # rINST <- B
- GET_VREG rINST rINST
+ GET_VREG rINST, rINST
.if 0
- SET_VREG_OBJECT rINST %eax # fp[A] <- fp[B]
+ SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
.else
- SET_VREG rINST %eax # fp[A] <- fp[B]
+ SET_VREG rINST, %eax # fp[A] <- fp[B]
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -398,11 +414,11 @@ artMterpAsmInstructionStart = .L_op_nop
/* op vAA, vBBBB */
movzx rINSTbl, %eax # eax <- AA
movw 2(rPC), rINSTw # rINSTw <- BBBB
- GET_VREG rINST rINST # rINST <- fp[BBBB]
+ GET_VREG rINST, rINST # rINST <- fp[BBBB]
.if 0
- SET_VREG_OBJECT rINST %eax # fp[A] <- fp[B]
+ SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
.else
- SET_VREG rINST %eax # fp[A] <- fp[B]
+ SET_VREG rINST, %eax # fp[A] <- fp[B]
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -414,11 +430,11 @@ artMterpAsmInstructionStart = .L_op_nop
/* op vAAAA, vBBBB */
movzwl 4(rPC), %ecx # ecx <- BBBB
movzwl 2(rPC), %eax # eax <- AAAA
- GET_VREG rINST %ecx
+ GET_VREG rINST, %ecx
.if 0
- SET_VREG_OBJECT rINST %eax # fp[A] <- fp[B]
+ SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
.else
- SET_VREG rINST %eax # fp[A] <- fp[B]
+ SET_VREG rINST, %eax # fp[A] <- fp[B]
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
@@ -431,8 +447,8 @@ artMterpAsmInstructionStart = .L_op_nop
movzbl rINSTbl, %ecx # ecx <- BA
sarl $4, rINST # rINST <- B
andb $0xf, %cl # ecx <- A
- GET_WIDE_FP_VREG %xmm0 rINST # xmm0 <- v[B]
- SET_WIDE_FP_VREG %xmm0 %ecx # v[A] <- xmm0
+ GET_WIDE_FP_VREG %xmm0, rINST # xmm0 <- v[B]
+ SET_WIDE_FP_VREG %xmm0, %ecx # v[A] <- xmm0
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
/* ------------------------------ */
@@ -443,8 +459,8 @@ artMterpAsmInstructionStart = .L_op_nop
/* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
movzwl 2(rPC), %ecx # ecx <- BBBB
movzbl rINSTbl, %eax # eax <- AAAA
- GET_WIDE_FP_VREG %xmm0 %ecx # xmm0 <- v[B]
- SET_WIDE_FP_VREG %xmm0 %eax # v[A] <- xmm0
+ GET_WIDE_FP_VREG %xmm0, %ecx # xmm0 <- v[B]
+ SET_WIDE_FP_VREG %xmm0, %eax # v[A] <- xmm0
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
/* ------------------------------ */
@@ -455,8 +471,8 @@ artMterpAsmInstructionStart = .L_op_nop
/* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
movzwl 4(rPC), %ecx # ecx<- BBBB
movzwl 2(rPC), %eax # eax<- AAAA
- GET_WIDE_FP_VREG %xmm0 %ecx # xmm0 <- v[B]
- SET_WIDE_FP_VREG %xmm0 %eax # v[A] <- xmm0
+ GET_WIDE_FP_VREG %xmm0, %ecx # xmm0 <- v[B]
+ SET_WIDE_FP_VREG %xmm0, %eax # v[A] <- xmm0
ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
/* ------------------------------ */
@@ -469,11 +485,11 @@ artMterpAsmInstructionStart = .L_op_nop
movzbl rINSTbl, %eax # eax <- BA
andb $0xf, %al # eax <- A
shrl $4, rINST # rINST <- B
- GET_VREG rINST rINST
+ GET_VREG rINST, rINST
.if 1
- SET_VREG_OBJECT rINST %eax # fp[A] <- fp[B]
+ SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
.else
- SET_VREG rINST %eax # fp[A] <- fp[B]
+ SET_VREG rINST, %eax # fp[A] <- fp[B]
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -487,11 +503,11 @@ artMterpAsmInstructionStart = .L_op_nop
/* op vAA, vBBBB */
movzx rINSTbl, %eax # eax <- AA
movw 2(rPC), rINSTw # rINSTw <- BBBB
- GET_VREG rINST rINST # rINST <- fp[BBBB]
+ GET_VREG rINST, rINST # rINST <- fp[BBBB]
.if 1
- SET_VREG_OBJECT rINST %eax # fp[A] <- fp[B]
+ SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
.else
- SET_VREG rINST %eax # fp[A] <- fp[B]
+ SET_VREG rINST, %eax # fp[A] <- fp[B]
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -505,11 +521,11 @@ artMterpAsmInstructionStart = .L_op_nop
/* op vAAAA, vBBBB */
movzwl 4(rPC), %ecx # ecx <- BBBB
movzwl 2(rPC), %eax # eax <- AAAA
- GET_VREG rINST %ecx
+ GET_VREG rINST, %ecx
.if 1
- SET_VREG_OBJECT rINST %eax # fp[A] <- fp[B]
+ SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
.else
- SET_VREG rINST %eax # fp[A] <- fp[B]
+ SET_VREG rINST, %eax # fp[A] <- fp[B]
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
@@ -523,9 +539,9 @@ artMterpAsmInstructionStart = .L_op_nop
movl OFF_FP_RESULT_REGISTER(rFP), %eax # get pointer to result JType.
movl (%eax), %eax # r0 <- result.i.
.if 0
- SET_VREG_OBJECT %eax rINST # fp[A] <- fp[B]
+ SET_VREG_OBJECT %eax, rINST # fp[A] <- fp[B]
.else
- SET_VREG %eax rINST # fp[A] <- fp[B]
+ SET_VREG %eax, rINST # fp[A] <- fp[B]
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -537,8 +553,8 @@ artMterpAsmInstructionStart = .L_op_nop
movl OFF_FP_RESULT_REGISTER(rFP), %eax # get pointer to result JType.
movl 4(%eax), %ecx # Get high
movl (%eax), %eax # Get low
- SET_VREG %eax rINST # v[AA+0] <- eax
- SET_VREG_HIGH %ecx rINST # v[AA+1] <- ecx
+ SET_VREG %eax, rINST # v[AA+0] <- eax
+ SET_VREG_HIGH %ecx, rINST # v[AA+1] <- ecx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
/* ------------------------------ */
@@ -551,9 +567,9 @@ artMterpAsmInstructionStart = .L_op_nop
movl OFF_FP_RESULT_REGISTER(rFP), %eax # get pointer to result JType.
movl (%eax), %eax # r0 <- result.i.
.if 1
- SET_VREG_OBJECT %eax rINST # fp[A] <- fp[B]
+ SET_VREG_OBJECT %eax, rINST # fp[A] <- fp[B]
.else
- SET_VREG %eax rINST # fp[A] <- fp[B]
+ SET_VREG %eax, rINST # fp[A] <- fp[B]
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -565,7 +581,7 @@ artMterpAsmInstructionStart = .L_op_nop
/* move-exception vAA */
movl rSELF, %ecx
movl THREAD_EXCEPTION_OFFSET(%ecx), %eax
- SET_VREG_OBJECT %eax rINST # fp[AA] <- exception object
+ SET_VREG_OBJECT %eax, rINST # fp[AA] <- exception object
movl $0, THREAD_EXCEPTION_OFFSET(%ecx)
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -574,12 +590,12 @@ artMterpAsmInstructionStart = .L_op_nop
.L_op_return_void: /* 0x0e */
/* File: x86/op_return_void.S */
.extern MterpThreadFenceForConstructor
- call MterpThreadFenceForConstructor
+ call SYMBOL(MterpThreadFenceForConstructor)
movl rSELF, %eax
testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
jz 1f
movl %eax, OUT_ARG0(%esp)
- call MterpSuspendCheck
+ call SYMBOL(MterpSuspendCheck)
1:
xorl %eax, %eax
xorl %ecx, %ecx
@@ -596,14 +612,14 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA */
.extern MterpThreadFenceForConstructor
- call MterpThreadFenceForConstructor
+ call SYMBOL(MterpThreadFenceForConstructor)
movl rSELF, %eax
testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
jz 1f
movl %eax, OUT_ARG0(%esp)
- call MterpSuspendCheck
+ call SYMBOL(MterpSuspendCheck)
1:
- GET_VREG %eax rINST # eax <- vAA
+ GET_VREG %eax, rINST # eax <- vAA
xorl %ecx, %ecx
jmp MterpReturn
@@ -616,15 +632,15 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* return-wide vAA */
.extern MterpThreadFenceForConstructor
- call MterpThreadFenceForConstructor
+ call SYMBOL(MterpThreadFenceForConstructor)
movl rSELF, %eax
testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
jz 1f
movl %eax, OUT_ARG0(%esp)
- call MterpSuspendCheck
+ call SYMBOL(MterpSuspendCheck)
1:
- GET_VREG %eax rINST # eax <- v[AA+0]
- GET_VREG_HIGH %ecx rINST # ecx <- v[AA+1]
+ GET_VREG %eax, rINST # eax <- v[AA+0]
+ GET_VREG_HIGH %ecx, rINST # ecx <- v[AA+1]
jmp MterpReturn
/* ------------------------------ */
@@ -639,14 +655,14 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA */
.extern MterpThreadFenceForConstructor
- call MterpThreadFenceForConstructor
+ call SYMBOL(MterpThreadFenceForConstructor)
movl rSELF, %eax
testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
jz 1f
movl %eax, OUT_ARG0(%esp)
- call MterpSuspendCheck
+ call SYMBOL(MterpSuspendCheck)
1:
- GET_VREG %eax rINST # eax <- vAA
+ GET_VREG %eax, rINST # eax <- vAA
xorl %ecx, %ecx
jmp MterpReturn
@@ -660,7 +676,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl $0xf, rINST
andl %eax, rINST # rINST <- A
sarl $4, %eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
/* ------------------------------ */
@@ -669,7 +685,7 @@ artMterpAsmInstructionStart = .L_op_nop
/* File: x86/op_const_16.S */
/* const/16 vAA, #+BBBB */
movswl 2(rPC), %ecx # ecx <- ssssBBBB
- SET_VREG %ecx rINST # vAA <- ssssBBBB
+ SET_VREG %ecx, rINST # vAA <- ssssBBBB
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
/* ------------------------------ */
@@ -678,7 +694,7 @@ artMterpAsmInstructionStart = .L_op_nop
/* File: x86/op_const.S */
/* const vAA, #+BBBBbbbb */
movl 2(rPC), %eax # grab all 32 bits at once
- SET_VREG %eax rINST # vAA<- eax
+ SET_VREG %eax, rINST # vAA<- eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
/* ------------------------------ */
@@ -688,7 +704,7 @@ artMterpAsmInstructionStart = .L_op_nop
/* const/high16 vAA, #+BBBB0000 */
movzwl 2(rPC), %eax # eax <- 0000BBBB
sall $16, %eax # eax <- BBBB0000
- SET_VREG %eax rINST # vAA <- eax
+ SET_VREG %eax, rINST # vAA <- eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
/* ------------------------------ */
@@ -699,8 +715,8 @@ artMterpAsmInstructionStart = .L_op_nop
movswl 2(rPC), %eax # eax <- ssssBBBB
movl rIBASE, %ecx # preserve rIBASE (cltd trashes it)
cltd # rIBASE:eax <- ssssssssssssBBBB
- SET_VREG_HIGH rIBASE rINST # store msw
- SET_VREG %eax rINST # store lsw
+ SET_VREG_HIGH rIBASE, rINST # store msw
+ SET_VREG %eax, rINST # store lsw
movl %ecx, rIBASE # restore rIBASE
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -712,8 +728,8 @@ artMterpAsmInstructionStart = .L_op_nop
movl 2(rPC), %eax # eax <- BBBBbbbb
movl rIBASE, %ecx # preserve rIBASE (cltd trashes it)
cltd # rIBASE:eax <- ssssssssssssBBBB
- SET_VREG_HIGH rIBASE rINST # store msw
- SET_VREG %eax rINST # store lsw
+ SET_VREG_HIGH rIBASE, rINST # store msw
+ SET_VREG %eax, rINST # store lsw
movl %ecx, rIBASE # restore rIBASE
ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
@@ -725,8 +741,8 @@ artMterpAsmInstructionStart = .L_op_nop
movl 2(rPC), %eax # eax <- lsw
movzbl rINSTbl, %ecx # ecx <- AA
movl 6(rPC), rINST # rINST <- msw
- SET_VREG %eax %ecx
- SET_VREG_HIGH rINST %ecx
+ SET_VREG %eax, %ecx
+ SET_VREG_HIGH rINST, %ecx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 5
/* ------------------------------ */
@@ -736,9 +752,9 @@ artMterpAsmInstructionStart = .L_op_nop
/* const-wide/high16 vAA, #+BBBB000000000000 */
movzwl 2(rPC), %eax # eax <- 0000BBBB
sall $16, %eax # eax <- BBBB0000
- SET_VREG_HIGH %eax rINST # v[AA+1] <- eax
+ SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
xorl %eax, %eax
- SET_VREG %eax rINST # v[AA+0] <- eax
+ SET_VREG %eax, rINST # v[AA+0] <- eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
/* ------------------------------ */
@@ -754,7 +770,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, OUT_ARG2(%esp)
movl rSELF, %eax
movl %eax, OUT_ARG3(%esp)
- call MterpConstString # (index, tgt_reg, shadow_frame, self)
+ call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
REFRESH_IBASE
testl %eax, %eax
jnz MterpPossibleException
@@ -773,7 +789,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, OUT_ARG2(%esp)
movl rSELF, %eax
movl %eax, OUT_ARG3(%esp)
- call MterpConstString # (index, tgt_reg, shadow_frame, self)
+ call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
REFRESH_IBASE
testl %eax, %eax
jnz MterpPossibleException
@@ -792,7 +808,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, OUT_ARG2(%esp)
movl rSELF, %eax
movl %eax, OUT_ARG3(%esp)
- call MterpConstClass # (index, tgt_reg, shadow_frame, self)
+ call SYMBOL(MterpConstClass) # (index, tgt_reg, shadow_frame, self)
REFRESH_IBASE
testl %eax, %eax
jnz MterpPossibleException
@@ -807,11 +823,11 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* monitor-enter vAA */
EXPORT_PC
- GET_VREG %ecx rINST
+ GET_VREG %ecx, rINST
movl %ecx, OUT_ARG0(%esp)
movl rSELF, %eax
movl %eax, OUT_ARG1(%esp)
- call artLockObjectFromCode # (object, self)
+ call SYMBOL(artLockObjectFromCode) # (object, self)
REFRESH_IBASE
testl %eax, %eax
jnz MterpException
@@ -830,11 +846,11 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* monitor-exit vAA */
EXPORT_PC
- GET_VREG %ecx rINST
+ GET_VREG %ecx, rINST
movl %ecx, OUT_ARG0(%esp)
movl rSELF, %eax
movl %eax, OUT_ARG1(%esp)
- call artUnlockObjectFromCode # (object, self)
+ call SYMBOL(artUnlockObjectFromCode) # (object, self)
REFRESH_IBASE
testl %eax, %eax
jnz MterpException
@@ -857,7 +873,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, OUT_ARG2(%esp)
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp)
- call MterpCheckCast # (index, &obj, method, self)
+ call SYMBOL(MterpCheckCast) # (index, &obj, method, self)
REFRESH_IBASE
testl %eax, %eax
jnz MterpPossibleException
@@ -885,13 +901,13 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, OUT_ARG2(%esp)
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp)
- call MterpInstanceOf # (index, &obj, method, self)
+ call SYMBOL(MterpInstanceOf) # (index, &obj, method, self)
movl rSELF, %ecx
REFRESH_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException
andb $0xf, rINSTbl # rINSTbl <- A
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
/* ------------------------------ */
@@ -903,12 +919,12 @@ artMterpAsmInstructionStart = .L_op_nop
*/
mov rINST, %eax # eax <- BA
sarl $4, rINST # rINST <- B
- GET_VREG %ecx rINST # ecx <- vB (object ref)
+ GET_VREG %ecx, rINST # ecx <- vB (object ref)
testl %ecx, %ecx # is null?
je common_errNullObject
andb $0xf, %al # eax <- A
movl MIRROR_ARRAY_LENGTH_OFFSET(%ecx), rINST
- SET_VREG rINST %eax
+ SET_VREG rINST, %eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
/* ------------------------------ */
@@ -926,7 +942,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl %ecx, OUT_ARG1(%esp)
REFRESH_INST 34
movl rINST, OUT_ARG2(%esp)
- call MterpNewInstance
+ call SYMBOL(MterpNewInstance)
REFRESH_IBASE
testl %eax, %eax # 0 means an exception is thrown
jz MterpPossibleException
@@ -952,7 +968,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl rINST, OUT_ARG2(%esp)
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp)
- call MterpNewArray
+ call SYMBOL(MterpNewArray)
REFRESH_IBASE
testl %eax, %eax # 0 means an exception is thrown
jz MterpPossibleException
@@ -976,7 +992,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl rPC, OUT_ARG1(%esp)
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp)
- call MterpFilledNewArray
+ call SYMBOL(MterpFilledNewArray)
REFRESH_IBASE
testl %eax, %eax # 0 means an exception is thrown
jz MterpPossibleException
@@ -1001,7 +1017,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl rPC, OUT_ARG1(%esp)
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp)
- call MterpFilledNewArrayRange
+ call SYMBOL(MterpFilledNewArrayRange)
REFRESH_IBASE
testl %eax, %eax # 0 means an exception is thrown
jz MterpPossibleException
@@ -1016,10 +1032,10 @@ artMterpAsmInstructionStart = .L_op_nop
EXPORT_PC
movl 2(rPC), %ecx # ecx <- BBBBbbbb
leal (rPC,%ecx,2), %ecx # ecx <- PC + BBBBbbbb*2
- GET_VREG %eax rINST # eax <- vAA (array object)
+ GET_VREG %eax, rINST # eax <- vAA (array object)
movl %eax, OUT_ARG0(%esp)
movl %ecx, OUT_ARG1(%esp)
- call MterpFillArrayData # (obj, payload)
+ call SYMBOL(MterpFillArrayData) # (obj, payload)
REFRESH_IBASE
testl %eax, %eax # 0 means an exception is thrown
jz MterpPossibleException
@@ -1034,7 +1050,7 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* throw vAA */
EXPORT_PC
- GET_VREG %eax rINST # eax<- vAA (exception object)
+ GET_VREG %eax, rINST # eax<- vAA (exception object)
testl %eax, %eax
jz common_errNullObject
movl rSELF,%ecx
@@ -1133,11 +1149,11 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA, +BBBB */
movl 2(rPC), %ecx # ecx <- BBBBbbbb
- GET_VREG %eax rINST # eax <- vAA
+ GET_VREG %eax, rINST # eax <- vAA
leal (rPC,%ecx,2), %ecx # ecx <- PC + BBBBbbbb*2
movl %eax, OUT_ARG1(%esp) # ARG1 <- vAA
movl %ecx, OUT_ARG0(%esp) # ARG0 <- switchData
- call MterpDoPackedSwitch
+ call SYMBOL(MterpDoPackedSwitch)
addl %eax, %eax
leal (rPC, %eax), rPC
FETCH_INST
@@ -1167,11 +1183,11 @@ artMterpAsmInstructionStart = .L_op_nop
*/
/* op vAA, +BBBB */
movl 2(rPC), %ecx # ecx <- BBBBbbbb
- GET_VREG %eax rINST # eax <- vAA
+ GET_VREG %eax, rINST # eax <- vAA
leal (rPC,%ecx,2), %ecx # ecx <- PC + BBBBbbbb*2
movl %eax, OUT_ARG1(%esp) # ARG1 <- vAA
movl %ecx, OUT_ARG0(%esp) # ARG0 <- switchData
- call MterpDoSparseSwitch
+ call SYMBOL(MterpDoSparseSwitch)
addl %eax, %eax
leal (rPC, %eax), rPC
FETCH_INST
@@ -1223,7 +1239,7 @@ artMterpAsmInstructionStart = .L_op_nop
.Lop_cmpl_float_less:
decl %eax
.Lop_cmpl_float_finish:
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -1264,7 +1280,7 @@ artMterpAsmInstructionStart = .L_op_nop
.Lop_cmpg_float_less:
decl %eax
.Lop_cmpg_float_finish:
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -1305,7 +1321,7 @@ artMterpAsmInstructionStart = .L_op_nop
.Lop_cmpl_double_less:
decl %eax
.Lop_cmpl_double_finish:
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -1346,7 +1362,7 @@ artMterpAsmInstructionStart = .L_op_nop
.Lop_cmpg_double_less:
decl %eax
.Lop_cmpg_double_finish:
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -1361,17 +1377,17 @@ artMterpAsmInstructionStart = .L_op_nop
/* cmp-long vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG_HIGH %eax %eax # eax <- v[BB+1], BB is clobbered
+ GET_VREG_HIGH %eax, %eax # eax <- v[BB+1], BB is clobbered
cmpl VREG_HIGH_ADDRESS(%ecx), %eax
jl .Lop_cmp_long_smaller
jg .Lop_cmp_long_bigger
movzbl 2(rPC), %eax # eax <- BB, restore BB
- GET_VREG %eax %eax # eax <- v[BB]
+ GET_VREG %eax, %eax # eax <- v[BB]
sub VREG_ADDRESS(%ecx), %eax
ja .Lop_cmp_long_bigger
jb .Lop_cmp_long_smaller
.Lop_cmp_long_finish:
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
.Lop_cmp_long_bigger:
@@ -1397,7 +1413,7 @@ artMterpAsmInstructionStart = .L_op_nop
/* if-cmp vA, vB, +CCCC */
movzx rINSTbl, %ecx # ecx <- A+
andb $0xf, %cl # ecx <- A
- GET_VREG %eax %ecx # eax <- vA
+ GET_VREG %eax, %ecx # eax <- vA
sarl $4, rINST # rINST <- B
cmpl VREG_ADDRESS(rINST), %eax # compare (vA, vB)
movl $2, %eax # assume not taken
@@ -1432,7 +1448,7 @@ artMterpAsmInstructionStart = .L_op_nop
/* if-cmp vA, vB, +CCCC */
movzx rINSTbl, %ecx # ecx <- A+
andb $0xf, %cl # ecx <- A
- GET_VREG %eax %ecx # eax <- vA
+ GET_VREG %eax, %ecx # eax <- vA
sarl $4, rINST # rINST <- B
cmpl VREG_ADDRESS(rINST), %eax # compare (vA, vB)
movl $2, %eax # assume not taken
@@ -1467,7 +1483,7 @@ artMterpAsmInstructionStart = .L_op_nop
/* if-cmp vA, vB, +CCCC */
movzx rINSTbl, %ecx # ecx <- A+
andb $0xf, %cl # ecx <- A
- GET_VREG %eax %ecx # eax <- vA
+ GET_VREG %eax, %ecx # eax <- vA
sarl $4, rINST # rINST <- B
cmpl VREG_ADDRESS(rINST), %eax # compare (vA, vB)
movl $2, %eax # assume not taken
@@ -1502,7 +1518,7 @@ artMterpAsmInstructionStart = .L_op_nop
/* if-cmp vA, vB, +CCCC */
movzx rINSTbl, %ecx # ecx <- A+
andb $0xf, %cl # ecx <- A
- GET_VREG %eax %ecx # eax <- vA
+ GET_VREG %eax, %ecx # eax <- vA
sarl $4, rINST # rINST <- B
cmpl VREG_ADDRESS(rINST), %eax # compare (vA, vB)
movl $2, %eax # assume not taken
@@ -1537,7 +1553,7 @@ artMterpAsmInstructionStart = .L_op_nop
/* if-cmp vA, vB, +CCCC */
movzx rINSTbl, %ecx # ecx <- A+
andb $0xf, %cl # ecx <- A
- GET_VREG %eax %ecx # eax <- vA
+ GET_VREG %eax, %ecx # eax <- vA
sarl $4, rINST # rINST <- B
cmpl VREG_ADDRESS(rINST), %eax # compare (vA, vB)
movl $2, %eax # assume not taken
@@ -1572,7 +1588,7 @@ artMterpAsmInstructionStart = .L_op_nop
/* if-cmp vA, vB, +CCCC */
movzx rINSTbl, %ecx # ecx <- A+
andb $0xf, %cl # ecx <- A
- GET_VREG %eax %ecx # eax <- vA
+ GET_VREG %eax, %ecx # eax <- vA
sarl $4, rINST # rINST <- B
cmpl VREG_ADDRESS(rINST), %eax # compare (vA, vB)
movl $2, %eax # assume not taken
@@ -1857,14 +1873,14 @@ artMterpAsmInstructionStart = .L_op_nop
/* op vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB (array object)
- GET_VREG %ecx %ecx # ecx <- vCC (requested index)
+ GET_VREG %eax, %eax # eax <- vBB (array object)
+ GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
testl %eax, %eax # null array object?
je common_errNullObject # bail if so
cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
jae common_errArrayIndex # index >= length, bail.
movl MIRROR_INT_ARRAY_DATA_OFFSET(%eax,%ecx,4), %eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
/* ------------------------------ */
@@ -1877,15 +1893,15 @@ artMterpAsmInstructionStart = .L_op_nop
/* aget-wide vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB (array object)
- GET_VREG %ecx %ecx # ecx <- vCC (requested index)
+ GET_VREG %eax, %eax # eax <- vBB (array object)
+ GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
testl %eax, %eax # null array object?
je common_errNullObject # bail if so
cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
jae common_errArrayIndex # index >= length, bail.
leal MIRROR_WIDE_ARRAY_DATA_OFFSET(%eax,%ecx,8), %eax
movq (%eax), %xmm0 # xmm0 <- vBB[vCC]
- SET_WIDE_FP_VREG %xmm0 rINST # vAA <- xmm0
+ SET_WIDE_FP_VREG %xmm0, rINST # vAA <- xmm0
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
/* ------------------------------ */
@@ -1900,17 +1916,17 @@ artMterpAsmInstructionStart = .L_op_nop
/* op vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB (array object)
- GET_VREG %ecx %ecx # ecs <- vCC (requested index)
+ GET_VREG %eax, %eax # eax <- vBB (array object)
+ GET_VREG %ecx, %ecx # ecs <- vCC (requested index)
EXPORT_PC
movl %eax, OUT_ARG0(%esp)
movl %ecx, OUT_ARG1(%esp)
- call artAGetObjectFromMterp # (array, index)
+ call SYMBOL(artAGetObjectFromMterp) # (array, index)
movl rSELF, %ecx
REFRESH_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException
- SET_VREG_OBJECT %eax rINST
+ SET_VREG_OBJECT %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
/* ------------------------------ */
@@ -1927,14 +1943,14 @@ artMterpAsmInstructionStart = .L_op_nop
/* op vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB (array object)
- GET_VREG %ecx %ecx # ecx <- vCC (requested index)
+ GET_VREG %eax, %eax # eax <- vBB (array object)
+ GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
testl %eax, %eax # null array object?
je common_errNullObject # bail if so
cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
jae common_errArrayIndex # index >= length, bail.
movzbl MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(%eax,%ecx,1), %eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -1952,14 +1968,14 @@ artMterpAsmInstructionStart = .L_op_nop
/* op vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB (array object)
- GET_VREG %ecx %ecx # ecx <- vCC (requested index)
+ GET_VREG %eax, %eax # eax <- vBB (array object)
+ GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
testl %eax, %eax # null array object?
je common_errNullObject # bail if so
cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
jae common_errArrayIndex # index >= length, bail.
movsbl MIRROR_BYTE_ARRAY_DATA_OFFSET(%eax,%ecx,1), %eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -1977,14 +1993,14 @@ artMterpAsmInstructionStart = .L_op_nop
/* op vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB (array object)
- GET_VREG %ecx %ecx # ecx <- vCC (requested index)
+ GET_VREG %eax, %eax # eax <- vBB (array object)
+ GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
testl %eax, %eax # null array object?
je common_errNullObject # bail if so
cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
jae common_errArrayIndex # index >= length, bail.
movzwl MIRROR_CHAR_ARRAY_DATA_OFFSET(%eax,%ecx,2), %eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2002,14 +2018,14 @@ artMterpAsmInstructionStart = .L_op_nop
/* op vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB (array object)
- GET_VREG %ecx %ecx # ecx <- vCC (requested index)
+ GET_VREG %eax, %eax # eax <- vBB (array object)
+ GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
testl %eax, %eax # null array object?
je common_errNullObject # bail if so
cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
jae common_errArrayIndex # index >= length, bail.
movswl MIRROR_SHORT_ARRAY_DATA_OFFSET(%eax,%ecx,2), %eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2026,14 +2042,14 @@ artMterpAsmInstructionStart = .L_op_nop
/* op vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB (array object)
- GET_VREG %ecx %ecx # ecx <- vCC (requested index)
+ GET_VREG %eax, %eax # eax <- vBB (array object)
+ GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
testl %eax, %eax # null array object?
je common_errNullObject # bail if so
cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
jae common_errArrayIndex # index >= length, bail.
leal MIRROR_INT_ARRAY_DATA_OFFSET(%eax,%ecx,4), %eax
- GET_VREG rINST rINST
+ GET_VREG rINST, rINST
movl rINST, (%eax)
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2048,14 +2064,14 @@ artMterpAsmInstructionStart = .L_op_nop
/* aput-wide vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB (array object)
- GET_VREG %ecx %ecx # ecx <- vCC (requested index)
+ GET_VREG %eax, %eax # eax <- vBB (array object)
+ GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
testl %eax, %eax # null array object?
je common_errNullObject # bail if so
cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
jae common_errArrayIndex # index >= length, bail.
leal MIRROR_WIDE_ARRAY_DATA_OFFSET(%eax,%ecx,8), %eax
- GET_WIDE_FP_VREG %xmm0 rINST # xmm0 <- vAA
+ GET_WIDE_FP_VREG %xmm0, rINST # xmm0 <- vAA
movq %xmm0, (%eax) # vBB[vCC] <- xmm0
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2073,7 +2089,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl rPC, OUT_ARG1(%esp)
REFRESH_INST 77
movl rINST, OUT_ARG2(%esp)
- call MterpAputObject # (array, index)
+ call SYMBOL(MterpAputObject) # (array, index)
REFRESH_IBASE
testl %eax, %eax
jz MterpPossibleException
@@ -2093,14 +2109,14 @@ artMterpAsmInstructionStart = .L_op_nop
/* op vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB (array object)
- GET_VREG %ecx %ecx # ecx <- vCC (requested index)
+ GET_VREG %eax, %eax # eax <- vBB (array object)
+ GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
testl %eax, %eax # null array object?
je common_errNullObject # bail if so
cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
jae common_errArrayIndex # index >= length, bail.
leal MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(%eax,%ecx,1), %eax
- GET_VREG rINST rINST
+ GET_VREG rINST, rINST
movb rINSTbl, (%eax)
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2119,14 +2135,14 @@ artMterpAsmInstructionStart = .L_op_nop
/* op vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB (array object)
- GET_VREG %ecx %ecx # ecx <- vCC (requested index)
+ GET_VREG %eax, %eax # eax <- vBB (array object)
+ GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
testl %eax, %eax # null array object?
je common_errNullObject # bail if so
cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
jae common_errArrayIndex # index >= length, bail.
leal MIRROR_BYTE_ARRAY_DATA_OFFSET(%eax,%ecx,1), %eax
- GET_VREG rINST rINST
+ GET_VREG rINST, rINST
movb rINSTbl, (%eax)
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2145,14 +2161,14 @@ artMterpAsmInstructionStart = .L_op_nop
/* op vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB (array object)
- GET_VREG %ecx %ecx # ecx <- vCC (requested index)
+ GET_VREG %eax, %eax # eax <- vBB (array object)
+ GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
testl %eax, %eax # null array object?
je common_errNullObject # bail if so
cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
jae common_errArrayIndex # index >= length, bail.
leal MIRROR_CHAR_ARRAY_DATA_OFFSET(%eax,%ecx,2), %eax
- GET_VREG rINST rINST
+ GET_VREG rINST, rINST
movw rINSTw, (%eax)
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2171,14 +2187,14 @@ artMterpAsmInstructionStart = .L_op_nop
/* op vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB (array object)
- GET_VREG %ecx %ecx # ecx <- vCC (requested index)
+ GET_VREG %eax, %eax # eax <- vBB (array object)
+ GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
testl %eax, %eax # null array object?
je common_errNullObject # bail if so
cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
jae common_errArrayIndex # index >= length, bail.
leal MIRROR_SHORT_ARRAY_DATA_OFFSET(%eax,%ecx,2), %eax
- GET_VREG rINST rINST
+ GET_VREG rINST, rINST
movw rINSTw, (%eax)
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2203,16 +2219,16 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
mov rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call artGet32InstanceFromCode
+ call SYMBOL(artGet32InstanceFromCode)
movl rSELF, %ecx
REFRESH_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException # bail out
andb $0xf, rINSTbl # rINST <- A
.if 0
- SET_VREG_OBJECT %eax rINST # fp[A] <-value
+ SET_VREG_OBJECT %eax, rINST # fp[A] <-value
.else
- SET_VREG %eax rINST # fp[A] <-value
+ SET_VREG %eax, rINST # fp[A] <-value
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2236,13 +2252,13 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
mov rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call artGet64InstanceFromCode
+ call SYMBOL(artGet64InstanceFromCode)
mov rSELF, %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException # bail out
andb $0xf, rINSTbl # rINST <- A
- SET_VREG %eax rINST
- SET_VREG_HIGH %edx rINST
+ SET_VREG %eax, rINST
+ SET_VREG_HIGH %edx, rINST
REFRESH_IBASE_FROM_SELF %ecx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2267,16 +2283,16 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
mov rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call artGetObjInstanceFromCode
+ call SYMBOL(artGetObjInstanceFromCode)
movl rSELF, %ecx
REFRESH_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException # bail out
andb $0xf, rINSTbl # rINST <- A
.if 1
- SET_VREG_OBJECT %eax rINST # fp[A] <-value
+ SET_VREG_OBJECT %eax, rINST # fp[A] <-value
.else
- SET_VREG %eax rINST # fp[A] <-value
+ SET_VREG %eax, rINST # fp[A] <-value
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2302,16 +2318,16 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
mov rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call artGetBooleanInstanceFromCode
+ call SYMBOL(artGetBooleanInstanceFromCode)
movl rSELF, %ecx
REFRESH_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException # bail out
andb $0xf, rINSTbl # rINST <- A
.if 0
- SET_VREG_OBJECT %eax rINST # fp[A] <-value
+ SET_VREG_OBJECT %eax, rINST # fp[A] <-value
.else
- SET_VREG %eax rINST # fp[A] <-value
+ SET_VREG %eax, rINST # fp[A] <-value
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2337,16 +2353,16 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
mov rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call artGetByteInstanceFromCode
+ call SYMBOL(artGetByteInstanceFromCode)
movl rSELF, %ecx
REFRESH_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException # bail out
andb $0xf, rINSTbl # rINST <- A
.if 0
- SET_VREG_OBJECT %eax rINST # fp[A] <-value
+ SET_VREG_OBJECT %eax, rINST # fp[A] <-value
.else
- SET_VREG %eax rINST # fp[A] <-value
+ SET_VREG %eax, rINST # fp[A] <-value
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2372,16 +2388,16 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
mov rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call artGetCharInstanceFromCode
+ call SYMBOL(artGetCharInstanceFromCode)
movl rSELF, %ecx
REFRESH_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException # bail out
andb $0xf, rINSTbl # rINST <- A
.if 0
- SET_VREG_OBJECT %eax rINST # fp[A] <-value
+ SET_VREG_OBJECT %eax, rINST # fp[A] <-value
.else
- SET_VREG %eax rINST # fp[A] <-value
+ SET_VREG %eax, rINST # fp[A] <-value
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2407,16 +2423,16 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, OUT_ARG2(%esp) # referrer
mov rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call artGetShortInstanceFromCode
+ call SYMBOL(artGetShortInstanceFromCode)
movl rSELF, %ecx
REFRESH_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException # bail out
andb $0xf, rINSTbl # rINST <- A
.if 0
- SET_VREG_OBJECT %eax rINST # fp[A] <-value
+ SET_VREG_OBJECT %eax, rINST # fp[A] <-value
.else
- SET_VREG %eax rINST # fp[A] <-value
+ SET_VREG %eax, rINST # fp[A] <-value
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2444,7 +2460,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, OUT_ARG2(%esp) # fp[A]
movl OFF_FP_METHOD(rFP), %eax
movl %eax, OUT_ARG3(%esp) # referrer
- call artSet32InstanceFromMterp
+ call SYMBOL(artSet32InstanceFromMterp)
testl %eax, %eax
jnz MterpPossibleException
REFRESH_IBASE
@@ -2468,7 +2484,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, OUT_ARG2(%esp) # &fp[A]
movl OFF_FP_METHOD(rFP), %eax
movl %eax, OUT_ARG3(%esp) # referrer
- call artSet64InstanceFromMterp
+ call SYMBOL(artSet64InstanceFromMterp)
testl %eax, %eax
jnz MterpPossibleException
REFRESH_IBASE
@@ -2486,7 +2502,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl rINST, OUT_ARG2(%esp)
movl rSELF, %eax
movl %eax, OUT_ARG3(%esp)
- call MterpIputObject
+ call SYMBOL(MterpIputObject)
testl %eax, %eax
jz MterpException
REFRESH_IBASE
@@ -2516,7 +2532,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, OUT_ARG2(%esp) # fp[A]
movl OFF_FP_METHOD(rFP), %eax
movl %eax, OUT_ARG3(%esp) # referrer
- call artSet8InstanceFromMterp
+ call SYMBOL(artSet8InstanceFromMterp)
testl %eax, %eax
jnz MterpPossibleException
REFRESH_IBASE
@@ -2547,7 +2563,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, OUT_ARG2(%esp) # fp[A]
movl OFF_FP_METHOD(rFP), %eax
movl %eax, OUT_ARG3(%esp) # referrer
- call artSet8InstanceFromMterp
+ call SYMBOL(artSet8InstanceFromMterp)
testl %eax, %eax
jnz MterpPossibleException
REFRESH_IBASE
@@ -2578,7 +2594,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, OUT_ARG2(%esp) # fp[A]
movl OFF_FP_METHOD(rFP), %eax
movl %eax, OUT_ARG3(%esp) # referrer
- call artSet16InstanceFromMterp
+ call SYMBOL(artSet16InstanceFromMterp)
testl %eax, %eax
jnz MterpPossibleException
REFRESH_IBASE
@@ -2609,7 +2625,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, OUT_ARG2(%esp) # fp[A]
movl OFF_FP_METHOD(rFP), %eax
movl %eax, OUT_ARG3(%esp) # referrer
- call artSet16InstanceFromMterp
+ call SYMBOL(artSet16InstanceFromMterp)
testl %eax, %eax
jnz MterpPossibleException
REFRESH_IBASE
@@ -2634,15 +2650,15 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call artGet32StaticFromCode
+ call SYMBOL(artGet32StaticFromCode)
movl rSELF, %ecx
REFRESH_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException
.if 0
- SET_VREG_OBJECT %eax rINST # fp[A] <- value
+ SET_VREG_OBJECT %eax, rINST # fp[A] <- value
.else
- SET_VREG %eax rINST # fp[A] <- value
+ SET_VREG %eax, rINST # fp[A] <- value
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2663,12 +2679,12 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call artGet64StaticFromCode
+ call SYMBOL(artGet64StaticFromCode)
movl rSELF, %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException
- SET_VREG %eax rINST # fp[A]<- low part
- SET_VREG_HIGH %edx rINST # fp[A+1]<- high part
+ SET_VREG %eax, rINST # fp[A]<- low part
+ SET_VREG_HIGH %edx, rINST # fp[A+1]<- high part
REFRESH_IBASE_FROM_SELF %ecx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2691,15 +2707,15 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call artGetObjStaticFromCode
+ call SYMBOL(artGetObjStaticFromCode)
movl rSELF, %ecx
REFRESH_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException
.if 1
- SET_VREG_OBJECT %eax rINST # fp[A] <- value
+ SET_VREG_OBJECT %eax, rINST # fp[A] <- value
.else
- SET_VREG %eax rINST # fp[A] <- value
+ SET_VREG %eax, rINST # fp[A] <- value
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2723,15 +2739,15 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call artGetBooleanStaticFromCode
+ call SYMBOL(artGetBooleanStaticFromCode)
movl rSELF, %ecx
REFRESH_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException
.if 0
- SET_VREG_OBJECT %eax rINST # fp[A] <- value
+ SET_VREG_OBJECT %eax, rINST # fp[A] <- value
.else
- SET_VREG %eax rINST # fp[A] <- value
+ SET_VREG %eax, rINST # fp[A] <- value
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2755,15 +2771,15 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call artGetByteStaticFromCode
+ call SYMBOL(artGetByteStaticFromCode)
movl rSELF, %ecx
REFRESH_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException
.if 0
- SET_VREG_OBJECT %eax rINST # fp[A] <- value
+ SET_VREG_OBJECT %eax, rINST # fp[A] <- value
.else
- SET_VREG %eax rINST # fp[A] <- value
+ SET_VREG %eax, rINST # fp[A] <- value
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2787,15 +2803,15 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call artGetCharStaticFromCode
+ call SYMBOL(artGetCharStaticFromCode)
movl rSELF, %ecx
REFRESH_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException
.if 0
- SET_VREG_OBJECT %eax rINST # fp[A] <- value
+ SET_VREG_OBJECT %eax, rINST # fp[A] <- value
.else
- SET_VREG %eax rINST # fp[A] <- value
+ SET_VREG %eax, rINST # fp[A] <- value
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2819,15 +2835,15 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call artGetShortStaticFromCode
+ call SYMBOL(artGetShortStaticFromCode)
movl rSELF, %ecx
REFRESH_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException
.if 0
- SET_VREG_OBJECT %eax rINST # fp[A] <- value
+ SET_VREG_OBJECT %eax, rINST # fp[A] <- value
.else
- SET_VREG %eax rINST # fp[A] <- value
+ SET_VREG %eax, rINST # fp[A] <- value
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -2846,13 +2862,13 @@ artMterpAsmInstructionStart = .L_op_nop
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref BBBB
- GET_VREG rINST rINST
+ GET_VREG rINST, rINST
movl rINST, OUT_ARG1(%esp) # fp[AA]
movl OFF_FP_METHOD(rFP), %eax
movl %eax, OUT_ARG2(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call artSet32StaticFromCode
+ call SYMBOL(artSet32StaticFromCode)
testl %eax, %eax
jnz MterpException
REFRESH_IBASE
@@ -2877,7 +2893,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, OUT_ARG2(%esp) # &fp[AA]
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call artSet64IndirectStaticFromMterp
+ call SYMBOL(artSet64IndirectStaticFromMterp)
testl %eax, %eax
jnz MterpException
REFRESH_IBASE
@@ -2895,7 +2911,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl rINST, OUT_ARG2(%esp)
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp)
- call MterpSputObject
+ call SYMBOL(MterpSputObject)
testl %eax, %eax
jz MterpException
REFRESH_IBASE
@@ -2916,13 +2932,13 @@ artMterpAsmInstructionStart = .L_op_nop
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref BBBB
- GET_VREG rINST rINST
+ GET_VREG rINST, rINST
movl rINST, OUT_ARG1(%esp) # fp[AA]
movl OFF_FP_METHOD(rFP), %eax
movl %eax, OUT_ARG2(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call artSet8StaticFromCode
+ call SYMBOL(artSet8StaticFromCode)
testl %eax, %eax
jnz MterpException
REFRESH_IBASE
@@ -2944,13 +2960,13 @@ artMterpAsmInstructionStart = .L_op_nop
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref BBBB
- GET_VREG rINST rINST
+ GET_VREG rINST, rINST
movl rINST, OUT_ARG1(%esp) # fp[AA]
movl OFF_FP_METHOD(rFP), %eax
movl %eax, OUT_ARG2(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call artSet8StaticFromCode
+ call SYMBOL(artSet8StaticFromCode)
testl %eax, %eax
jnz MterpException
REFRESH_IBASE
@@ -2972,13 +2988,13 @@ artMterpAsmInstructionStart = .L_op_nop
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref BBBB
- GET_VREG rINST rINST
+ GET_VREG rINST, rINST
movl rINST, OUT_ARG1(%esp) # fp[AA]
movl OFF_FP_METHOD(rFP), %eax
movl %eax, OUT_ARG2(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call artSet16StaticFromCode
+ call SYMBOL(artSet16StaticFromCode)
testl %eax, %eax
jnz MterpException
REFRESH_IBASE
@@ -3000,13 +3016,13 @@ artMterpAsmInstructionStart = .L_op_nop
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref BBBB
- GET_VREG rINST rINST
+ GET_VREG rINST, rINST
movl rINST, OUT_ARG1(%esp) # fp[AA]
movl OFF_FP_METHOD(rFP), %eax
movl %eax, OUT_ARG2(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call artSet16StaticFromCode
+ call SYMBOL(artSet16StaticFromCode)
testl %eax, %eax
jnz MterpException
REFRESH_IBASE
@@ -3032,7 +3048,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl rPC, OUT_ARG2(%esp)
REFRESH_INST 110
movl rINST, OUT_ARG3(%esp)
- call MterpInvokeVirtual
+ call SYMBOL(MterpInvokeVirtual)
testl %eax, %eax
jz MterpException
REFRESH_IBASE
@@ -3065,7 +3081,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl rPC, OUT_ARG2(%esp)
REFRESH_INST 111
movl rINST, OUT_ARG3(%esp)
- call MterpInvokeSuper
+ call SYMBOL(MterpInvokeSuper)
testl %eax, %eax
jz MterpException
REFRESH_IBASE
@@ -3098,7 +3114,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl rPC, OUT_ARG2(%esp)
REFRESH_INST 112
movl rINST, OUT_ARG3(%esp)
- call MterpInvokeDirect
+ call SYMBOL(MterpInvokeDirect)
testl %eax, %eax
jz MterpException
REFRESH_IBASE
@@ -3124,7 +3140,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl rPC, OUT_ARG2(%esp)
REFRESH_INST 113
movl rINST, OUT_ARG3(%esp)
- call MterpInvokeStatic
+ call SYMBOL(MterpInvokeStatic)
testl %eax, %eax
jz MterpException
REFRESH_IBASE
@@ -3151,7 +3167,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl rPC, OUT_ARG2(%esp)
REFRESH_INST 114
movl rINST, OUT_ARG3(%esp)
- call MterpInvokeInterface
+ call SYMBOL(MterpInvokeInterface)
testl %eax, %eax
jz MterpException
REFRESH_IBASE
@@ -3173,7 +3189,7 @@ artMterpAsmInstructionStart = .L_op_nop
testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
jz 1f
movl %eax, OUT_ARG0(%esp)
- call MterpSuspendCheck
+ call SYMBOL(MterpSuspendCheck)
1:
xorl %eax, %eax
xorl %ecx, %ecx
@@ -3198,7 +3214,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl rPC, OUT_ARG2(%esp)
REFRESH_INST 116
movl rINST, OUT_ARG3(%esp)
- call MterpInvokeVirtualRange
+ call SYMBOL(MterpInvokeVirtualRange)
testl %eax, %eax
jz MterpException
REFRESH_IBASE
@@ -3224,7 +3240,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl rPC, OUT_ARG2(%esp)
REFRESH_INST 117
movl rINST, OUT_ARG3(%esp)
- call MterpInvokeSuperRange
+ call SYMBOL(MterpInvokeSuperRange)
testl %eax, %eax
jz MterpException
REFRESH_IBASE
@@ -3250,7 +3266,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl rPC, OUT_ARG2(%esp)
REFRESH_INST 118
movl rINST, OUT_ARG3(%esp)
- call MterpInvokeDirectRange
+ call SYMBOL(MterpInvokeDirectRange)
testl %eax, %eax
jz MterpException
REFRESH_IBASE
@@ -3276,7 +3292,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl rPC, OUT_ARG2(%esp)
REFRESH_INST 119
movl rINST, OUT_ARG3(%esp)
- call MterpInvokeStaticRange
+ call SYMBOL(MterpInvokeStaticRange)
testl %eax, %eax
jz MterpException
REFRESH_IBASE
@@ -3302,7 +3318,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl rPC, OUT_ARG2(%esp)
REFRESH_INST 120
movl rINST, OUT_ARG3(%esp)
- call MterpInvokeInterfaceRange
+ call SYMBOL(MterpInvokeInterfaceRange)
testl %eax, %eax
jz MterpException
REFRESH_IBASE
@@ -3343,10 +3359,10 @@ artMterpAsmInstructionStart = .L_op_nop
/* unop vA, vB */
movzbl rINSTbl,%ecx # ecx <- A+
sarl $4,rINST # rINST <- B
- GET_VREG %eax rINST # eax <- vB
+ GET_VREG %eax, rINST # eax <- vB
andb $0xf,%cl # ecx <- A
negl %eax
- SET_VREG %eax %ecx
+ SET_VREG %eax, %ecx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -3362,10 +3378,10 @@ artMterpAsmInstructionStart = .L_op_nop
/* unop vA, vB */
movzbl rINSTbl,%ecx # ecx <- A+
sarl $4,rINST # rINST <- B
- GET_VREG %eax rINST # eax <- vB
+ GET_VREG %eax, rINST # eax <- vB
andb $0xf,%cl # ecx <- A
notl %eax
- SET_VREG %eax %ecx
+ SET_VREG %eax, %ecx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -3377,13 +3393,13 @@ artMterpAsmInstructionStart = .L_op_nop
movzbl rINSTbl, %ecx # ecx <- BA
sarl $4, %ecx # ecx <- B
andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax %ecx # eax <- v[B+0]
- GET_VREG_HIGH %ecx %ecx # ecx <- v[B+1]
+ GET_VREG %eax, %ecx # eax <- v[B+0]
+ GET_VREG_HIGH %ecx, %ecx # ecx <- v[B+1]
negl %eax
adcl $0, %ecx
negl %ecx
- SET_VREG %eax rINST # v[A+0] <- eax
- SET_VREG_HIGH %ecx rINST # v[A+1] <- ecx
+ SET_VREG %eax, rINST # v[A+0] <- eax
+ SET_VREG_HIGH %ecx, rINST # v[A+1] <- ecx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -3395,12 +3411,12 @@ artMterpAsmInstructionStart = .L_op_nop
movzbl rINSTbl, %ecx # ecx <- BA
sarl $4, %ecx # ecx <- B
andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax %ecx # eax <- v[B+0]
- GET_VREG_HIGH %ecx %ecx # ecx <- v[B+1]
+ GET_VREG %eax, %ecx # eax <- v[B+0]
+ GET_VREG_HIGH %ecx, %ecx # ecx <- v[B+1]
notl %eax
notl %ecx
- SET_VREG %eax rINST # v[A+0] <- eax
- SET_VREG_HIGH %ecx rINST # v[A+1] <- ecx
+ SET_VREG %eax, rINST # v[A+0] <- eax
+ SET_VREG_HIGH %ecx, rINST # v[A+1] <- ecx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
/* ------------------------------ */
@@ -3456,12 +3472,12 @@ artMterpAsmInstructionStart = .L_op_nop
/* int to long vA, vB */
movzbl rINSTbl, %eax # eax <- +A
sarl $4, %eax # eax <- B
- GET_VREG %eax %eax # eax <- vB
+ GET_VREG %eax, %eax # eax <- vB
andb $0xf, rINSTbl # rINST <- A
movl rIBASE, %ecx # cltd trashes rIBASE/edx
cltd # rINST:eax<- sssssssBBBBBBBB
- SET_VREG_HIGH rIBASE rINST # v[A+1] <- rIBASE
- SET_VREG %eax rINST # v[A+0] <- %eax
+ SET_VREG_HIGH rIBASE, rINST # v[A+1] <- rIBASE
+ SET_VREG %eax, rINST # v[A+0] <- %eax
movl %ecx, rIBASE
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -3523,11 +3539,11 @@ artMterpAsmInstructionStart = .L_op_nop
movzbl rINSTbl, %eax # eax <- BA
andb $0xf, %al # eax <- A
shrl $4, rINST # rINST <- B
- GET_VREG rINST rINST
+ GET_VREG rINST, rINST
.if 0
- SET_VREG_OBJECT rINST %eax # fp[A] <- fp[B]
+ SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
.else
- SET_VREG rINST %eax # fp[A] <- fp[B]
+ SET_VREG rINST, %eax # fp[A] <- fp[B]
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -3904,10 +3920,10 @@ artMterpAsmInstructionStart = .L_op_nop
/* unop vA, vB */
movzbl rINSTbl,%ecx # ecx <- A+
sarl $4,rINST # rINST <- B
- GET_VREG %eax rINST # eax <- vB
+ GET_VREG %eax, rINST # eax <- vB
andb $0xf,%cl # ecx <- A
movsbl %al, %eax
- SET_VREG %eax %ecx
+ SET_VREG %eax, %ecx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -3923,10 +3939,10 @@ artMterpAsmInstructionStart = .L_op_nop
/* unop vA, vB */
movzbl rINSTbl,%ecx # ecx <- A+
sarl $4,rINST # rINST <- B
- GET_VREG %eax rINST # eax <- vB
+ GET_VREG %eax, rINST # eax <- vB
andb $0xf,%cl # ecx <- A
movzwl %ax,%eax
- SET_VREG %eax %ecx
+ SET_VREG %eax, %ecx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -3942,10 +3958,10 @@ artMterpAsmInstructionStart = .L_op_nop
/* unop vA, vB */
movzbl rINSTbl,%ecx # ecx <- A+
sarl $4,rINST # rINST <- B
- GET_VREG %eax rINST # eax <- vB
+ GET_VREG %eax, rINST # eax <- vB
andb $0xf,%cl # ecx <- A
movswl %ax, %eax
- SET_VREG %eax %ecx
+ SET_VREG %eax, %ecx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -3966,9 +3982,9 @@ artMterpAsmInstructionStart = .L_op_nop
/* binop vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB
+ GET_VREG %eax, %eax # eax <- vBB
addl (rFP,%ecx,4), %eax # ex: addl (rFP,%ecx,4),%eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -3989,9 +4005,9 @@ artMterpAsmInstructionStart = .L_op_nop
/* binop vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB
+ GET_VREG %eax, %eax # eax <- vBB
subl (rFP,%ecx,4), %eax # ex: addl (rFP,%ecx,4),%eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -4005,11 +4021,11 @@ artMterpAsmInstructionStart = .L_op_nop
/* mul vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB
+ GET_VREG %eax, %eax # eax <- vBB
mov rIBASE, LOCAL0(%esp)
imull (rFP,%ecx,4), %eax # trashes rIBASE/edx
mov LOCAL0(%esp), rIBASE
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
/* ------------------------------ */
@@ -4024,8 +4040,8 @@ artMterpAsmInstructionStart = .L_op_nop
/* div/rem vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB
- GET_VREG %ecx %ecx # ecx <- vCC
+ GET_VREG %eax, %eax # eax <- vBB
+ GET_VREG %ecx, %ecx # ecx <- vCC
mov rIBASE, LOCAL0(%esp)
testl %ecx, %ecx
je common_errDivideByZero
@@ -4061,7 +4077,7 @@ artMterpAsmInstructionStart = .L_op_nop
xorl %edx, %edx # Clear %edx before divide
div %cx
.Lop_div_int_finish:
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
mov LOCAL0(%esp), rIBASE
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -4078,8 +4094,8 @@ artMterpAsmInstructionStart = .L_op_nop
/* div/rem vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB
- GET_VREG %ecx %ecx # ecx <- vCC
+ GET_VREG %eax, %eax # eax <- vBB
+ GET_VREG %ecx, %ecx # ecx <- vCC
mov rIBASE, LOCAL0(%esp)
testl %ecx, %ecx
je common_errDivideByZero
@@ -4115,7 +4131,7 @@ artMterpAsmInstructionStart = .L_op_nop
xorl %edx, %edx # Clear %edx before divide
div %cx
.Lop_rem_int_finish:
- SET_VREG rIBASE rINST
+ SET_VREG rIBASE, rINST
mov LOCAL0(%esp), rIBASE
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -4137,9 +4153,9 @@ artMterpAsmInstructionStart = .L_op_nop
/* binop vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB
+ GET_VREG %eax, %eax # eax <- vBB
andl (rFP,%ecx,4), %eax # ex: addl (rFP,%ecx,4),%eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -4160,9 +4176,9 @@ artMterpAsmInstructionStart = .L_op_nop
/* binop vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB
+ GET_VREG %eax, %eax # eax <- vBB
orl (rFP,%ecx,4), %eax # ex: addl (rFP,%ecx,4),%eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -4183,9 +4199,9 @@ artMterpAsmInstructionStart = .L_op_nop
/* binop vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB
+ GET_VREG %eax, %eax # eax <- vBB
xorl (rFP,%ecx,4), %eax # ex: addl (rFP,%ecx,4),%eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -4201,10 +4217,10 @@ artMterpAsmInstructionStart = .L_op_nop
/* binop vAA, vBB, vCC */
movzbl 2(rPC),%eax # eax <- BB
movzbl 3(rPC),%ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB
- GET_VREG %ecx %ecx # eax <- vBB
+ GET_VREG %eax, %eax # eax <- vBB
+ GET_VREG %ecx, %ecx # eax <- vBB
sall %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -4220,10 +4236,10 @@ artMterpAsmInstructionStart = .L_op_nop
/* binop vAA, vBB, vCC */
movzbl 2(rPC),%eax # eax <- BB
movzbl 3(rPC),%ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB
- GET_VREG %ecx %ecx # eax <- vBB
+ GET_VREG %eax, %eax # eax <- vBB
+ GET_VREG %ecx, %ecx # eax <- vBB
sarl %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -4239,10 +4255,10 @@ artMterpAsmInstructionStart = .L_op_nop
/* binop vAA, vBB, vCC */
movzbl 2(rPC),%eax # eax <- BB
movzbl 3(rPC),%ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB
- GET_VREG %ecx %ecx # eax <- vBB
+ GET_VREG %eax, %eax # eax <- vBB
+ GET_VREG %ecx, %ecx # eax <- vBB
shrl %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -4255,16 +4271,16 @@ artMterpAsmInstructionStart = .L_op_nop
* Generic 64-bit binary operation.
*/
/* binop vAA, vBB, vCC */
- movzbl 2(rPC),%eax # eax <- BB
- movzbl 3(rPC),%ecx # ecx <- CC
- movl rIBASE,LOCAL0(%esp) # save rIBASE
- GET_VREG rIBASE %eax # rIBASE <- v[BB+0]
- GET_VREG_HIGH %eax %eax # eax <- v[BB+1]
+ movzbl 2(rPC), %eax # eax <- BB
+ movzbl 3(rPC), %ecx # ecx <- CC
+ movl rIBASE, LOCAL0(%esp) # save rIBASE
+ GET_VREG rIBASE, %eax # rIBASE <- v[BB+0]
+ GET_VREG_HIGH %eax, %eax # eax <- v[BB+1]
addl (rFP,%ecx,4), rIBASE # ex: addl (rFP,%ecx,4),rIBASE
adcl 4(rFP,%ecx,4), %eax # ex: adcl 4(rFP,%ecx,4),%eax
- SET_VREG rIBASE rINST # v[AA+0] <- rIBASE
- movl LOCAL0(%esp),rIBASE # restore rIBASE
- SET_VREG_HIGH %eax rINST # v[AA+1] <- eax
+ SET_VREG rIBASE, rINST # v[AA+0] <- rIBASE
+ movl LOCAL0(%esp), rIBASE # restore rIBASE
+ SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -4277,16 +4293,16 @@ artMterpAsmInstructionStart = .L_op_nop
* Generic 64-bit binary operation.
*/
/* binop vAA, vBB, vCC */
- movzbl 2(rPC),%eax # eax <- BB
- movzbl 3(rPC),%ecx # ecx <- CC
- movl rIBASE,LOCAL0(%esp) # save rIBASE
- GET_VREG rIBASE %eax # rIBASE <- v[BB+0]
- GET_VREG_HIGH %eax %eax # eax <- v[BB+1]
+ movzbl 2(rPC), %eax # eax <- BB
+ movzbl 3(rPC), %ecx # ecx <- CC
+ movl rIBASE, LOCAL0(%esp) # save rIBASE
+ GET_VREG rIBASE, %eax # rIBASE <- v[BB+0]
+ GET_VREG_HIGH %eax, %eax # eax <- v[BB+1]
subl (rFP,%ecx,4), rIBASE # ex: addl (rFP,%ecx,4),rIBASE
sbbl 4(rFP,%ecx,4), %eax # ex: adcl 4(rFP,%ecx,4),%eax
- SET_VREG rIBASE rINST # v[AA+0] <- rIBASE
- movl LOCAL0(%esp),rIBASE # restore rIBASE
- SET_VREG_HIGH %eax rINST # v[AA+1] <- eax
+ SET_VREG rIBASE, rINST # v[AA+0] <- rIBASE
+ movl LOCAL0(%esp), rIBASE # restore rIBASE
+ SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -4323,9 +4339,9 @@ artMterpAsmInstructionStart = .L_op_nop
mov LOCAL0(%esp), rPC # restore Interpreter PC
mov LOCAL1(%esp), rFP # restore FP
leal (%ecx,rIBASE), rIBASE # full result now in rIBASE:%eax
- SET_VREG_HIGH rIBASE rINST # v[B+1] <- rIBASE
+ SET_VREG_HIGH rIBASE, rINST # v[B+1] <- rIBASE
mov LOCAL2(%esp), rIBASE # restore IBASE
- SET_VREG %eax rINST # v[B] <- eax
+ SET_VREG %eax, rINST # v[B] <- eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
/* ------------------------------ */
@@ -4340,18 +4356,18 @@ artMterpAsmInstructionStart = .L_op_nop
mov rIBASE, LOCAL0(%esp) # save rIBASE/%edx
mov rINST, LOCAL1(%esp) # save rINST/%ebx
movzbl 3(rPC), %eax # eax <- CC
- GET_VREG %ecx %eax
- GET_VREG_HIGH %ebx %eax
+ GET_VREG %ecx, %eax
+ GET_VREG_HIGH %ebx, %eax
movl %ecx, %edx
orl %ebx, %ecx
jz common_errDivideByZero
movzbl 2(rPC), %eax # eax <- BB
- GET_VREG_HIGH %ecx %eax
- GET_VREG %eax %eax
- call art_quick_ldiv
+ GET_VREG_HIGH %ecx, %eax
+ GET_VREG %eax, %eax
+ call SYMBOL(art_quick_ldiv)
mov LOCAL1(%esp), rINST # restore rINST/%ebx
- SET_VREG_HIGH rIBASE rINST
- SET_VREG %eax rINST
+ SET_VREG_HIGH rIBASE, rINST
+ SET_VREG %eax, rINST
mov LOCAL0(%esp), rIBASE # restore rIBASE/%edx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -4368,18 +4384,18 @@ artMterpAsmInstructionStart = .L_op_nop
mov rIBASE, LOCAL0(%esp) # save rIBASE/%edx
mov rINST, LOCAL1(%esp) # save rINST/%ebx
movzbl 3(rPC), %eax # eax <- CC
- GET_VREG %ecx %eax
- GET_VREG_HIGH %ebx %eax
+ GET_VREG %ecx, %eax
+ GET_VREG_HIGH %ebx, %eax
movl %ecx, %edx
orl %ebx, %ecx
jz common_errDivideByZero
movzbl 2(rPC), %eax # eax <- BB
- GET_VREG_HIGH %ecx %eax
- GET_VREG %eax %eax
- call art_quick_lmod
+ GET_VREG_HIGH %ecx, %eax
+ GET_VREG %eax, %eax
+ call SYMBOL(art_quick_lmod)
mov LOCAL1(%esp), rINST # restore rINST/%ebx
- SET_VREG_HIGH rIBASE rINST
- SET_VREG %eax rINST
+ SET_VREG_HIGH rIBASE, rINST
+ SET_VREG %eax, rINST
mov LOCAL0(%esp), rIBASE # restore rIBASE/%edx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -4393,16 +4409,16 @@ artMterpAsmInstructionStart = .L_op_nop
* Generic 64-bit binary operation.
*/
/* binop vAA, vBB, vCC */
- movzbl 2(rPC),%eax # eax <- BB
- movzbl 3(rPC),%ecx # ecx <- CC
- movl rIBASE,LOCAL0(%esp) # save rIBASE
- GET_VREG rIBASE %eax # rIBASE <- v[BB+0]
- GET_VREG_HIGH %eax %eax # eax <- v[BB+1]
+ movzbl 2(rPC), %eax # eax <- BB
+ movzbl 3(rPC), %ecx # ecx <- CC
+ movl rIBASE, LOCAL0(%esp) # save rIBASE
+ GET_VREG rIBASE, %eax # rIBASE <- v[BB+0]
+ GET_VREG_HIGH %eax, %eax # eax <- v[BB+1]
andl (rFP,%ecx,4), rIBASE # ex: addl (rFP,%ecx,4),rIBASE
andl 4(rFP,%ecx,4), %eax # ex: adcl 4(rFP,%ecx,4),%eax
- SET_VREG rIBASE rINST # v[AA+0] <- rIBASE
- movl LOCAL0(%esp),rIBASE # restore rIBASE
- SET_VREG_HIGH %eax rINST # v[AA+1] <- eax
+ SET_VREG rIBASE, rINST # v[AA+0] <- rIBASE
+ movl LOCAL0(%esp), rIBASE # restore rIBASE
+ SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -4415,16 +4431,16 @@ artMterpAsmInstructionStart = .L_op_nop
* Generic 64-bit binary operation.
*/
/* binop vAA, vBB, vCC */
- movzbl 2(rPC),%eax # eax <- BB
- movzbl 3(rPC),%ecx # ecx <- CC
- movl rIBASE,LOCAL0(%esp) # save rIBASE
- GET_VREG rIBASE %eax # rIBASE <- v[BB+0]
- GET_VREG_HIGH %eax %eax # eax <- v[BB+1]
+ movzbl 2(rPC), %eax # eax <- BB
+ movzbl 3(rPC), %ecx # ecx <- CC
+ movl rIBASE, LOCAL0(%esp) # save rIBASE
+ GET_VREG rIBASE, %eax # rIBASE <- v[BB+0]
+ GET_VREG_HIGH %eax, %eax # eax <- v[BB+1]
orl (rFP,%ecx,4), rIBASE # ex: addl (rFP,%ecx,4),rIBASE
orl 4(rFP,%ecx,4), %eax # ex: adcl 4(rFP,%ecx,4),%eax
- SET_VREG rIBASE rINST # v[AA+0] <- rIBASE
- movl LOCAL0(%esp),rIBASE # restore rIBASE
- SET_VREG_HIGH %eax rINST # v[AA+1] <- eax
+ SET_VREG rIBASE, rINST # v[AA+0] <- rIBASE
+ movl LOCAL0(%esp), rIBASE # restore rIBASE
+ SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -4437,16 +4453,16 @@ artMterpAsmInstructionStart = .L_op_nop
* Generic 64-bit binary operation.
*/
/* binop vAA, vBB, vCC */
- movzbl 2(rPC),%eax # eax <- BB
- movzbl 3(rPC),%ecx # ecx <- CC
- movl rIBASE,LOCAL0(%esp) # save rIBASE
- GET_VREG rIBASE %eax # rIBASE <- v[BB+0]
- GET_VREG_HIGH %eax %eax # eax <- v[BB+1]
+ movzbl 2(rPC), %eax # eax <- BB
+ movzbl 3(rPC), %ecx # ecx <- CC
+ movl rIBASE, LOCAL0(%esp) # save rIBASE
+ GET_VREG rIBASE, %eax # rIBASE <- v[BB+0]
+ GET_VREG_HIGH %eax, %eax # eax <- v[BB+1]
xorl (rFP,%ecx,4), rIBASE # ex: addl (rFP,%ecx,4),rIBASE
xorl 4(rFP,%ecx,4), %eax # ex: adcl 4(rFP,%ecx,4),%eax
- SET_VREG rIBASE rINST # v[AA+0] <- rIBASE
- movl LOCAL0(%esp),rIBASE # restore rIBASE
- SET_VREG_HIGH %eax rINST # v[AA+1] <- eax
+ SET_VREG rIBASE, rINST # v[AA+0] <- rIBASE
+ movl LOCAL0(%esp), rIBASE # restore rIBASE
+ SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -4469,9 +4485,9 @@ artMterpAsmInstructionStart = .L_op_nop
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE %eax # ecx <- v[BB+1]
- GET_VREG %ecx %ecx # ecx <- vCC
- GET_VREG %eax %eax # eax <- v[BB+0]
+ GET_VREG_HIGH rIBASE, %eax # ecx <- v[BB+1]
+ GET_VREG %ecx, %ecx # ecx <- vCC
+ GET_VREG %eax, %eax # eax <- v[BB+0]
shldl %eax,rIBASE
sall %cl, %eax
testb $32, %cl
@@ -4479,9 +4495,9 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, rIBASE
xorl %eax, %eax
2:
- SET_VREG_HIGH rIBASE rINST # v[AA+1] <- rIBASE
+ SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
movl LOCAL0(%esp), rIBASE
- SET_VREG %eax rINST # v[AA+0] <- %eax
+ SET_VREG %eax, rINST # v[AA+0] <- %eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
/* ------------------------------ */
@@ -4503,9 +4519,9 @@ artMterpAsmInstructionStart = .L_op_nop
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE %eax # rIBASE<- v[BB+1]
- GET_VREG %ecx %ecx # ecx <- vCC
- GET_VREG %eax %eax # eax <- v[BB+0]
+ GET_VREG_HIGH rIBASE, %eax # rIBASE<- v[BB+1]
+ GET_VREG %ecx, %ecx # ecx <- vCC
+ GET_VREG %eax, %eax # eax <- v[BB+0]
shrdl rIBASE, %eax
sarl %cl, rIBASE
testb $32, %cl
@@ -4513,9 +4529,9 @@ artMterpAsmInstructionStart = .L_op_nop
movl rIBASE, %eax
sarl $31, rIBASE
2:
- SET_VREG_HIGH rIBASE rINST # v[AA+1] <- rIBASE
+ SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
movl LOCAL0(%esp), rIBASE
- SET_VREG %eax rINST # v[AA+0] <- eax
+ SET_VREG %eax, rINST # v[AA+0] <- eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
/* ------------------------------ */
@@ -4537,9 +4553,9 @@ artMterpAsmInstructionStart = .L_op_nop
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE %eax # rIBASE <- v[BB+1]
- GET_VREG %ecx %ecx # ecx <- vCC
- GET_VREG %eax %eax # eax <- v[BB+0]
+ GET_VREG_HIGH rIBASE, %eax # rIBASE <- v[BB+1]
+ GET_VREG %ecx, %ecx # ecx <- vCC
+ GET_VREG %eax, %eax # eax <- v[BB+0]
shrdl rIBASE, %eax
shrl %cl, rIBASE
testb $32, %cl
@@ -4547,9 +4563,9 @@ artMterpAsmInstructionStart = .L_op_nop
movl rIBASE, %eax
xorl rIBASE, rIBASE
2:
- SET_VREG_HIGH rIBASE rINST # v[AA+1] <- rIBASE
+ SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
movl LOCAL0(%esp), rIBASE
- SET_VREG %eax rINST # v[BB+0] <- eax
+ SET_VREG %eax, rINST # v[BB+0] <- eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
/* ------------------------------ */
@@ -4728,7 +4744,7 @@ artMterpAsmInstructionStart = .L_op_nop
/* binop/2addr vA, vB */
movzx rINSTbl, %ecx # ecx <- A+
sarl $4, rINST # rINST <- B
- GET_VREG %eax rINST # eax <- vB
+ GET_VREG %eax, rINST # eax <- vB
andb $0xf, %cl # ecx <- A
addl %eax, (rFP,%ecx,4) # for ex: addl %eax,(rFP,%ecx,4)
CLEAR_REF %ecx
@@ -4753,7 +4769,7 @@ artMterpAsmInstructionStart = .L_op_nop
/* binop/2addr vA, vB */
movzx rINSTbl, %ecx # ecx <- A+
sarl $4, rINST # rINST <- B
- GET_VREG %eax rINST # eax <- vB
+ GET_VREG %eax, rINST # eax <- vB
andb $0xf, %cl # ecx <- A
subl %eax, (rFP,%ecx,4) # for ex: addl %eax,(rFP,%ecx,4)
CLEAR_REF %ecx
@@ -4767,12 +4783,12 @@ artMterpAsmInstructionStart = .L_op_nop
/* mul vA, vB */
movzx rINSTbl, %ecx # ecx <- A+
sarl $4, rINST # rINST <- B
- GET_VREG %eax rINST # eax <- vB
+ GET_VREG %eax, rINST # eax <- vB
andb $0xf, %cl # ecx <- A
mov rIBASE, LOCAL0(%esp)
imull (rFP,%ecx,4), %eax # trashes rIBASE/edx
mov LOCAL0(%esp), rIBASE
- SET_VREG %eax %ecx
+ SET_VREG %eax, %ecx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
/* ------------------------------ */
@@ -4788,9 +4804,9 @@ artMterpAsmInstructionStart = .L_op_nop
movzx rINSTbl, %ecx # eax <- BA
mov rIBASE, LOCAL0(%esp)
sarl $4, %ecx # ecx <- B
- GET_VREG %ecx %ecx # eax <- vBB
+ GET_VREG %ecx, %ecx # eax <- vBB
andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax rINST # eax <- vBB
+ GET_VREG %eax, rINST # eax <- vBB
testl %ecx, %ecx
je common_errDivideByZero
cmpl $-1, %ecx
@@ -4798,14 +4814,14 @@ artMterpAsmInstructionStart = .L_op_nop
cmpl $0x80000000, %eax
jne .Lop_div_int_2addr_continue_div2addr
movl $0x80000000, %eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
mov LOCAL0(%esp), rIBASE
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
.Lop_div_int_2addr_continue_div2addr:
cltd
idivl %ecx
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
mov LOCAL0(%esp), rIBASE
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -4823,9 +4839,9 @@ artMterpAsmInstructionStart = .L_op_nop
movzx rINSTbl, %ecx # eax <- BA
mov rIBASE, LOCAL0(%esp)
sarl $4, %ecx # ecx <- B
- GET_VREG %ecx %ecx # eax <- vBB
+ GET_VREG %ecx, %ecx # eax <- vBB
andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax rINST # eax <- vBB
+ GET_VREG %eax, rINST # eax <- vBB
testl %ecx, %ecx
je common_errDivideByZero
cmpl $-1, %ecx
@@ -4833,14 +4849,14 @@ artMterpAsmInstructionStart = .L_op_nop
cmpl $0x80000000, %eax
jne .Lop_rem_int_2addr_continue_div2addr
movl $0, rIBASE
- SET_VREG rIBASE rINST
+ SET_VREG rIBASE, rINST
mov LOCAL0(%esp), rIBASE
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
.Lop_rem_int_2addr_continue_div2addr:
cltd
idivl %ecx
- SET_VREG rIBASE rINST
+ SET_VREG rIBASE, rINST
mov LOCAL0(%esp), rIBASE
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -4863,7 +4879,7 @@ artMterpAsmInstructionStart = .L_op_nop
/* binop/2addr vA, vB */
movzx rINSTbl, %ecx # ecx <- A+
sarl $4, rINST # rINST <- B
- GET_VREG %eax rINST # eax <- vB
+ GET_VREG %eax, rINST # eax <- vB
andb $0xf, %cl # ecx <- A
andl %eax, (rFP,%ecx,4) # for ex: addl %eax,(rFP,%ecx,4)
CLEAR_REF %ecx
@@ -4888,7 +4904,7 @@ artMterpAsmInstructionStart = .L_op_nop
/* binop/2addr vA, vB */
movzx rINSTbl, %ecx # ecx <- A+
sarl $4, rINST # rINST <- B
- GET_VREG %eax rINST # eax <- vB
+ GET_VREG %eax, rINST # eax <- vB
andb $0xf, %cl # ecx <- A
orl %eax, (rFP,%ecx,4) # for ex: addl %eax,(rFP,%ecx,4)
CLEAR_REF %ecx
@@ -4913,7 +4929,7 @@ artMterpAsmInstructionStart = .L_op_nop
/* binop/2addr vA, vB */
movzx rINSTbl, %ecx # ecx <- A+
sarl $4, rINST # rINST <- B
- GET_VREG %eax rINST # eax <- vB
+ GET_VREG %eax, rINST # eax <- vB
andb $0xf, %cl # ecx <- A
xorl %eax, (rFP,%ecx,4) # for ex: addl %eax,(rFP,%ecx,4)
CLEAR_REF %ecx
@@ -4931,11 +4947,11 @@ artMterpAsmInstructionStart = .L_op_nop
/* shift/2addr vA, vB */
movzx rINSTbl, %ecx # eax <- BA
sarl $4, %ecx # ecx <- B
- GET_VREG %ecx %ecx # eax <- vBB
+ GET_VREG %ecx, %ecx # eax <- vBB
andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax rINST # eax <- vAA
+ GET_VREG %eax, rINST # eax <- vAA
sall %cl, %eax # ex: sarl %cl, %eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -4950,11 +4966,11 @@ artMterpAsmInstructionStart = .L_op_nop
/* shift/2addr vA, vB */
movzx rINSTbl, %ecx # eax <- BA
sarl $4, %ecx # ecx <- B
- GET_VREG %ecx %ecx # eax <- vBB
+ GET_VREG %ecx, %ecx # eax <- vBB
andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax rINST # eax <- vAA
+ GET_VREG %eax, rINST # eax <- vAA
sarl %cl, %eax # ex: sarl %cl, %eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -4969,11 +4985,11 @@ artMterpAsmInstructionStart = .L_op_nop
/* shift/2addr vA, vB */
movzx rINSTbl, %ecx # eax <- BA
sarl $4, %ecx # ecx <- B
- GET_VREG %ecx %ecx # eax <- vBB
+ GET_VREG %ecx, %ecx # eax <- vBB
andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax rINST # eax <- vAA
+ GET_VREG %eax, rINST # eax <- vAA
shrl %cl, %eax # ex: sarl %cl, %eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -4986,11 +5002,11 @@ artMterpAsmInstructionStart = .L_op_nop
* Generic 64-bit binary operation.
*/
/* binop/2addr vA, vB */
- movzbl rINSTbl,%ecx # ecx<- BA
- sarl $4,%ecx # ecx<- B
- GET_VREG %eax %ecx # eax<- v[B+0]
- GET_VREG_HIGH %ecx %ecx # eax<- v[B+1]
- andb $0xF,rINSTbl # rINST<- A
+ movzbl rINSTbl, %ecx # ecx<- BA
+ sarl $4, %ecx # ecx<- B
+ GET_VREG %eax, %ecx # eax<- v[B+0]
+ GET_VREG_HIGH %ecx, %ecx # eax<- v[B+1]
+ andb $0xF, rINSTbl # rINST<- A
addl %eax, (rFP,rINST,4) # ex: addl %eax,(rFP,rINST,4)
adcl %ecx, 4(rFP,rINST,4) # ex: adcl %ecx,4(rFP,rINST,4)
CLEAR_WIDE_REF rINST
@@ -5006,11 +5022,11 @@ artMterpAsmInstructionStart = .L_op_nop
* Generic 64-bit binary operation.
*/
/* binop/2addr vA, vB */
- movzbl rINSTbl,%ecx # ecx<- BA
- sarl $4,%ecx # ecx<- B
- GET_VREG %eax %ecx # eax<- v[B+0]
- GET_VREG_HIGH %ecx %ecx # eax<- v[B+1]
- andb $0xF,rINSTbl # rINST<- A
+ movzbl rINSTbl, %ecx # ecx<- BA
+ sarl $4, %ecx # ecx<- B
+ GET_VREG %eax, %ecx # eax<- v[B+0]
+ GET_VREG_HIGH %ecx, %ecx # eax<- v[B+1]
+ andb $0xF, rINSTbl # rINST<- A
subl %eax, (rFP,rINST,4) # ex: addl %eax,(rFP,rINST,4)
sbbl %ecx, 4(rFP,rINST,4) # ex: adcl %ecx,4(rFP,rINST,4)
CLEAR_WIDE_REF rINST
@@ -5072,17 +5088,17 @@ artMterpAsmInstructionStart = .L_op_nop
andb $0xf, rINSTbl # rINST <- A
mov rINST, LOCAL1(%esp) # save rINST/%ebx
movl %ebx, %ecx
- GET_VREG %edx %eax
- GET_VREG_HIGH %ebx %eax
+ GET_VREG %edx, %eax
+ GET_VREG_HIGH %ebx, %eax
movl %edx, %eax
orl %ebx, %eax
jz common_errDivideByZero
- GET_VREG %eax %ecx
- GET_VREG_HIGH %ecx %ecx
- call art_quick_ldiv
+ GET_VREG %eax, %ecx
+ GET_VREG_HIGH %ecx, %ecx
+ call SYMBOL(art_quick_ldiv)
mov LOCAL1(%esp), rINST # restore rINST/%ebx
- SET_VREG_HIGH rIBASE rINST
- SET_VREG %eax rINST
+ SET_VREG_HIGH rIBASE, rINST
+ SET_VREG %eax, rINST
mov LOCAL0(%esp), rIBASE # restore rIBASE/%edx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -5102,17 +5118,17 @@ artMterpAsmInstructionStart = .L_op_nop
andb $0xf, rINSTbl # rINST <- A
mov rINST, LOCAL1(%esp) # save rINST/%ebx
movl %ebx, %ecx
- GET_VREG %edx %eax
- GET_VREG_HIGH %ebx %eax
+ GET_VREG %edx, %eax
+ GET_VREG_HIGH %ebx, %eax
movl %edx, %eax
orl %ebx, %eax
jz common_errDivideByZero
- GET_VREG %eax %ecx
- GET_VREG_HIGH %ecx %ecx
- call art_quick_lmod
+ GET_VREG %eax, %ecx
+ GET_VREG_HIGH %ecx, %ecx
+ call SYMBOL(art_quick_lmod)
mov LOCAL1(%esp), rINST # restore rINST/%ebx
- SET_VREG_HIGH rIBASE rINST
- SET_VREG %eax rINST
+ SET_VREG_HIGH rIBASE, rINST
+ SET_VREG %eax, rINST
mov LOCAL0(%esp), rIBASE # restore rIBASE/%edx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -5126,11 +5142,11 @@ artMterpAsmInstructionStart = .L_op_nop
* Generic 64-bit binary operation.
*/
/* binop/2addr vA, vB */
- movzbl rINSTbl,%ecx # ecx<- BA
- sarl $4,%ecx # ecx<- B
- GET_VREG %eax %ecx # eax<- v[B+0]
- GET_VREG_HIGH %ecx %ecx # eax<- v[B+1]
- andb $0xF,rINSTbl # rINST<- A
+ movzbl rINSTbl, %ecx # ecx<- BA
+ sarl $4, %ecx # ecx<- B
+ GET_VREG %eax, %ecx # eax<- v[B+0]
+ GET_VREG_HIGH %ecx, %ecx # eax<- v[B+1]
+ andb $0xF, rINSTbl # rINST<- A
andl %eax, (rFP,rINST,4) # ex: addl %eax,(rFP,rINST,4)
andl %ecx, 4(rFP,rINST,4) # ex: adcl %ecx,4(rFP,rINST,4)
CLEAR_WIDE_REF rINST
@@ -5146,11 +5162,11 @@ artMterpAsmInstructionStart = .L_op_nop
* Generic 64-bit binary operation.
*/
/* binop/2addr vA, vB */
- movzbl rINSTbl,%ecx # ecx<- BA
- sarl $4,%ecx # ecx<- B
- GET_VREG %eax %ecx # eax<- v[B+0]
- GET_VREG_HIGH %ecx %ecx # eax<- v[B+1]
- andb $0xF,rINSTbl # rINST<- A
+ movzbl rINSTbl, %ecx # ecx<- BA
+ sarl $4, %ecx # ecx<- B
+ GET_VREG %eax, %ecx # eax<- v[B+0]
+ GET_VREG_HIGH %ecx, %ecx # eax<- v[B+1]
+ andb $0xF, rINSTbl # rINST<- A
orl %eax, (rFP,rINST,4) # ex: addl %eax,(rFP,rINST,4)
orl %ecx, 4(rFP,rINST,4) # ex: adcl %ecx,4(rFP,rINST,4)
CLEAR_WIDE_REF rINST
@@ -5166,11 +5182,11 @@ artMterpAsmInstructionStart = .L_op_nop
* Generic 64-bit binary operation.
*/
/* binop/2addr vA, vB */
- movzbl rINSTbl,%ecx # ecx<- BA
- sarl $4,%ecx # ecx<- B
- GET_VREG %eax %ecx # eax<- v[B+0]
- GET_VREG_HIGH %ecx %ecx # eax<- v[B+1]
- andb $0xF,rINSTbl # rINST<- A
+ movzbl rINSTbl, %ecx # ecx<- BA
+ sarl $4, %ecx # ecx<- B
+ GET_VREG %eax, %ecx # eax<- v[B+0]
+ GET_VREG_HIGH %ecx, %ecx # eax<- v[B+1]
+ andb $0xF, rINSTbl # rINST<- A
xorl %eax, (rFP,rINST,4) # ex: addl %eax,(rFP,rINST,4)
xorl %ecx, 4(rFP,rINST,4) # ex: adcl %ecx,4(rFP,rINST,4)
CLEAR_WIDE_REF rINST
@@ -5191,11 +5207,11 @@ artMterpAsmInstructionStart = .L_op_nop
/* rINSTw gets AA */
movzbl rINSTbl, %ecx # ecx <- BA
andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax rINST # eax <- v[AA+0]
+ GET_VREG %eax, rINST # eax <- v[AA+0]
sarl $4, %ecx # ecx <- B
movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE rINST # rIBASE <- v[AA+1]
- GET_VREG %ecx %ecx # ecx <- vBB
+ GET_VREG_HIGH rIBASE, rINST # rIBASE <- v[AA+1]
+ GET_VREG %ecx, %ecx # ecx <- vBB
shldl %eax, rIBASE
sall %cl, %eax
testb $32, %cl
@@ -5203,9 +5219,9 @@ artMterpAsmInstructionStart = .L_op_nop
movl %eax, rIBASE
xorl %eax, %eax
2:
- SET_VREG_HIGH rIBASE rINST # v[AA+1] <- rIBASE
+ SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
movl LOCAL0(%esp), rIBASE
- SET_VREG %eax rINST # v[AA+0] <- eax
+ SET_VREG %eax, rINST # v[AA+0] <- eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
/* ------------------------------ */
@@ -5222,11 +5238,11 @@ artMterpAsmInstructionStart = .L_op_nop
/* rINSTw gets AA */
movzbl rINSTbl, %ecx # ecx <- BA
andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax rINST # eax <- v[AA+0]
+ GET_VREG %eax, rINST # eax <- v[AA+0]
sarl $4, %ecx # ecx <- B
movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE rINST # rIBASE <- v[AA+1]
- GET_VREG %ecx %ecx # ecx <- vBB
+ GET_VREG_HIGH rIBASE, rINST # rIBASE <- v[AA+1]
+ GET_VREG %ecx, %ecx # ecx <- vBB
shrdl rIBASE, %eax
sarl %cl, rIBASE
testb $32, %cl
@@ -5234,9 +5250,9 @@ artMterpAsmInstructionStart = .L_op_nop
movl rIBASE, %eax
sarl $31, rIBASE
2:
- SET_VREG_HIGH rIBASE rINST # v[AA+1] <- rIBASE
+ SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
movl LOCAL0(%esp), rIBASE
- SET_VREG %eax rINST # v[AA+0] <- eax
+ SET_VREG %eax, rINST # v[AA+0] <- eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
/* ------------------------------ */
@@ -5253,11 +5269,11 @@ artMterpAsmInstructionStart = .L_op_nop
/* rINSTw gets AA */
movzbl rINSTbl, %ecx # ecx <- BA
andb $0xf, rINSTbl # rINST <- A
- GET_VREG %eax rINST # eax <- v[AA+0]
+ GET_VREG %eax, rINST # eax <- v[AA+0]
sarl $4, %ecx # ecx <- B
movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE rINST # rIBASE <- v[AA+1]
- GET_VREG %ecx %ecx # ecx <- vBB
+ GET_VREG_HIGH rIBASE, rINST # rIBASE <- v[AA+1]
+ GET_VREG %ecx, %ecx # ecx <- vBB
shrdl rIBASE, %eax
shrl %cl, rIBASE
testb $32, %cl
@@ -5265,9 +5281,9 @@ artMterpAsmInstructionStart = .L_op_nop
movl rIBASE, %eax
xorl rIBASE, rIBASE
2:
- SET_VREG_HIGH rIBASE rINST # v[AA+1] <- rIBASE
+ SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
movl LOCAL0(%esp), rIBASE
- SET_VREG %eax rINST # v[AA+0] <- eax
+ SET_VREG %eax, rINST # v[AA+0] <- eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
/* ------------------------------ */
@@ -5455,11 +5471,11 @@ artMterpAsmInstructionStart = .L_op_nop
/* binop/lit16 vA, vB, #+CCCC */
movzbl rINSTbl, %eax # eax <- 000000BA
sarl $4, %eax # eax <- B
- GET_VREG %eax %eax # eax <- vB
+ GET_VREG %eax, %eax # eax <- vB
movswl 2(rPC), %ecx # ecx <- ssssCCCC
andb $0xf, rINSTbl # rINST <- A
addl %ecx, %eax # for example: addl %ecx, %eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -5481,11 +5497,11 @@ artMterpAsmInstructionStart = .L_op_nop
/* binop/lit16 vA, vB, #+CCCC */
movzbl rINSTbl, %eax # eax <- 000000BA
sarl $4, %eax # eax <- B
- GET_VREG %eax %eax # eax <- vB
+ GET_VREG %eax, %eax # eax <- vB
movswl 2(rPC), %ecx # ecx <- ssssCCCC
andb $0xf, rINSTbl # rINST <- A
subl %eax, %ecx # for example: addl %ecx, %eax
- SET_VREG %ecx rINST
+ SET_VREG %ecx, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -5497,13 +5513,13 @@ artMterpAsmInstructionStart = .L_op_nop
/* Need A in rINST, ssssCCCC in ecx, vB in eax */
movzbl rINSTbl, %eax # eax <- 000000BA
sarl $4, %eax # eax <- B
- GET_VREG %eax %eax # eax <- vB
+ GET_VREG %eax, %eax # eax <- vB
movswl 2(rPC), %ecx # ecx <- ssssCCCC
andb $0xf, rINSTbl # rINST <- A
mov rIBASE, LOCAL0(%esp)
imull %ecx, %eax # trashes rIBASE/edx
mov LOCAL0(%esp), rIBASE
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
/* ------------------------------ */
@@ -5519,7 +5535,7 @@ artMterpAsmInstructionStart = .L_op_nop
/* Need A in rINST, ssssCCCC in ecx, vB in eax */
movzbl rINSTbl, %eax # eax <- 000000BA
sarl $4, %eax # eax <- B
- GET_VREG %eax %eax # eax <- vB
+ GET_VREG %eax, %eax # eax <- vB
movswl 2(rPC), %ecx # ecx <- ssssCCCC
andb $0xf, rINSTbl # rINST <- A
testl %ecx, %ecx
@@ -5529,14 +5545,14 @@ artMterpAsmInstructionStart = .L_op_nop
cmpl $0x80000000, %eax
jne .Lop_div_int_lit16_continue_div
movl $0x80000000, %eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
.Lop_div_int_lit16_continue_div:
mov rIBASE, LOCAL0(%esp)
cltd
idivl %ecx
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
mov LOCAL0(%esp), rIBASE
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -5554,7 +5570,7 @@ artMterpAsmInstructionStart = .L_op_nop
/* Need A in rINST, ssssCCCC in ecx, vB in eax */
movzbl rINSTbl, %eax # eax <- 000000BA
sarl $4, %eax # eax <- B
- GET_VREG %eax %eax # eax <- vB
+ GET_VREG %eax, %eax # eax <- vB
movswl 2(rPC), %ecx # ecx <- ssssCCCC
andb $0xf, rINSTbl # rINST <- A
testl %ecx, %ecx
@@ -5564,14 +5580,14 @@ artMterpAsmInstructionStart = .L_op_nop
cmpl $0x80000000, %eax
jne .Lop_rem_int_lit16_continue_div
movl $0, %eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
.Lop_rem_int_lit16_continue_div:
mov rIBASE, LOCAL0(%esp)
cltd
idivl %ecx
- SET_VREG rIBASE rINST
+ SET_VREG rIBASE, rINST
mov LOCAL0(%esp), rIBASE
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -5593,11 +5609,11 @@ artMterpAsmInstructionStart = .L_op_nop
/* binop/lit16 vA, vB, #+CCCC */
movzbl rINSTbl, %eax # eax <- 000000BA
sarl $4, %eax # eax <- B
- GET_VREG %eax %eax # eax <- vB
+ GET_VREG %eax, %eax # eax <- vB
movswl 2(rPC), %ecx # ecx <- ssssCCCC
andb $0xf, rINSTbl # rINST <- A
andl %ecx, %eax # for example: addl %ecx, %eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -5618,11 +5634,11 @@ artMterpAsmInstructionStart = .L_op_nop
/* binop/lit16 vA, vB, #+CCCC */
movzbl rINSTbl, %eax # eax <- 000000BA
sarl $4, %eax # eax <- B
- GET_VREG %eax %eax # eax <- vB
+ GET_VREG %eax, %eax # eax <- vB
movswl 2(rPC), %ecx # ecx <- ssssCCCC
andb $0xf, rINSTbl # rINST <- A
orl %ecx, %eax # for example: addl %ecx, %eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -5643,11 +5659,11 @@ artMterpAsmInstructionStart = .L_op_nop
/* binop/lit16 vA, vB, #+CCCC */
movzbl rINSTbl, %eax # eax <- 000000BA
sarl $4, %eax # eax <- B
- GET_VREG %eax %eax # eax <- vB
+ GET_VREG %eax, %eax # eax <- vB
movswl 2(rPC), %ecx # ecx <- ssssCCCC
andb $0xf, rINSTbl # rINST <- A
xorl %ecx, %eax # for example: addl %ecx, %eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -5669,9 +5685,9 @@ artMterpAsmInstructionStart = .L_op_nop
/* binop/lit8 vAA, vBB, #+CC */
movzbl 2(rPC), %eax # eax <- BB
movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax %eax # eax <- rBB
+ GET_VREG %eax, %eax # eax <- rBB
addl %ecx, %eax # ex: addl %ecx,%eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -5693,9 +5709,9 @@ artMterpAsmInstructionStart = .L_op_nop
/* binop/lit8 vAA, vBB, #+CC */
movzbl 2(rPC), %eax # eax <- BB
movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax %eax # eax <- rBB
+ GET_VREG %eax, %eax # eax <- rBB
subl %eax, %ecx # ex: addl %ecx,%eax
- SET_VREG %ecx rINST
+ SET_VREG %ecx, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -5706,11 +5722,11 @@ artMterpAsmInstructionStart = .L_op_nop
/* mul/lit8 vAA, vBB, #+CC */
movzbl 2(rPC), %eax # eax <- BB
movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax %eax # eax <- rBB
+ GET_VREG %eax, %eax # eax <- rBB
mov rIBASE, LOCAL0(%esp)
imull %ecx, %eax # trashes rIBASE/edx
mov LOCAL0(%esp), rIBASE
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
/* ------------------------------ */
@@ -5725,7 +5741,7 @@ artMterpAsmInstructionStart = .L_op_nop
/* div/rem/lit8 vAA, vBB, #+CC */
movzbl 2(rPC), %eax # eax <- BB
movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax %eax # eax <- rBB
+ GET_VREG %eax, %eax # eax <- rBB
testl %ecx, %ecx
je common_errDivideByZero
cmpl $0x80000000, %eax
@@ -5733,14 +5749,14 @@ artMterpAsmInstructionStart = .L_op_nop
cmpl $-1, %ecx
jne .Lop_div_int_lit8_continue_div
movl $0x80000000, %eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
.Lop_div_int_lit8_continue_div:
mov rIBASE, LOCAL0(%esp)
cltd
idivl %ecx
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
mov LOCAL0(%esp), rIBASE
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -5757,7 +5773,7 @@ artMterpAsmInstructionStart = .L_op_nop
/* div/rem/lit8 vAA, vBB, #+CC */
movzbl 2(rPC), %eax # eax <- BB
movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax %eax # eax <- rBB
+ GET_VREG %eax, %eax # eax <- rBB
testl %ecx, %ecx
je common_errDivideByZero
cmpl $0x80000000, %eax
@@ -5765,14 +5781,14 @@ artMterpAsmInstructionStart = .L_op_nop
cmpl $-1, %ecx
jne .Lop_rem_int_lit8_continue_div
movl $0, %eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
.Lop_rem_int_lit8_continue_div:
mov rIBASE, LOCAL0(%esp)
cltd
idivl %ecx
- SET_VREG rIBASE rINST
+ SET_VREG rIBASE, rINST
mov LOCAL0(%esp), rIBASE
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -5795,9 +5811,9 @@ artMterpAsmInstructionStart = .L_op_nop
/* binop/lit8 vAA, vBB, #+CC */
movzbl 2(rPC), %eax # eax <- BB
movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax %eax # eax <- rBB
+ GET_VREG %eax, %eax # eax <- rBB
andl %ecx, %eax # ex: addl %ecx,%eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -5819,9 +5835,9 @@ artMterpAsmInstructionStart = .L_op_nop
/* binop/lit8 vAA, vBB, #+CC */
movzbl 2(rPC), %eax # eax <- BB
movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax %eax # eax <- rBB
+ GET_VREG %eax, %eax # eax <- rBB
orl %ecx, %eax # ex: addl %ecx,%eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -5843,9 +5859,9 @@ artMterpAsmInstructionStart = .L_op_nop
/* binop/lit8 vAA, vBB, #+CC */
movzbl 2(rPC), %eax # eax <- BB
movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax %eax # eax <- rBB
+ GET_VREG %eax, %eax # eax <- rBB
xorl %ecx, %eax # ex: addl %ecx,%eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -5867,9 +5883,9 @@ artMterpAsmInstructionStart = .L_op_nop
/* binop/lit8 vAA, vBB, #+CC */
movzbl 2(rPC), %eax # eax <- BB
movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax %eax # eax <- rBB
+ GET_VREG %eax, %eax # eax <- rBB
sall %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -5891,9 +5907,9 @@ artMterpAsmInstructionStart = .L_op_nop
/* binop/lit8 vAA, vBB, #+CC */
movzbl 2(rPC), %eax # eax <- BB
movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax %eax # eax <- rBB
+ GET_VREG %eax, %eax # eax <- rBB
sarl %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -5915,9 +5931,9 @@ artMterpAsmInstructionStart = .L_op_nop
/* binop/lit8 vAA, vBB, #+CC */
movzbl 2(rPC), %eax # eax <- BB
movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax %eax # eax <- rBB
+ GET_VREG %eax, %eax # eax <- rBB
shrl %cl, %eax # ex: addl %ecx,%eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -5929,13 +5945,13 @@ artMterpAsmInstructionStart = .L_op_nop
/* op vA, vB, offset@CCCC */
movzbl rINSTbl, %ecx # ecx <- BA
sarl $4, %ecx # ecx <- B
- GET_VREG %ecx %ecx # vB (object we're operating on)
+ GET_VREG %ecx, %ecx # vB (object we're operating on)
movzwl 2(rPC), %eax # eax <- field byte offset
testl %ecx, %ecx # is object null?
je common_errNullObject
movl (%ecx,%eax,1), %eax
andb $0xf,rINSTbl # rINST <- A
- SET_VREG %eax rINST # fp[A] <- value
+ SET_VREG %eax, rINST # fp[A] <- value
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
/* ------------------------------ */
@@ -5945,13 +5961,13 @@ artMterpAsmInstructionStart = .L_op_nop
/* iget-wide-quick vA, vB, offset@CCCC */
movzbl rINSTbl, %ecx # ecx <- BA
sarl $4, %ecx # ecx <- B
- GET_VREG %ecx %ecx # vB (object we're operating on)
+ GET_VREG %ecx, %ecx # vB (object we're operating on)
movzwl 2(rPC), %eax # eax <- field byte offset
testl %ecx, %ecx # is object null?
je common_errNullObject
movq (%ecx,%eax,1), %xmm0
andb $0xf, rINSTbl # rINST <- A
- SET_WIDE_FP_VREG %xmm0 rINST
+ SET_WIDE_FP_VREG %xmm0, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
/* ------------------------------ */
@@ -5962,18 +5978,18 @@ artMterpAsmInstructionStart = .L_op_nop
/* op vA, vB, offset@CCCC */
movzbl rINSTbl, %ecx # ecx <- BA
sarl $4, %ecx # ecx <- B
- GET_VREG %ecx %ecx # vB (object we're operating on)
+ GET_VREG %ecx, %ecx # vB (object we're operating on)
movzwl 2(rPC), %eax # eax <- field byte offset
movl %ecx, OUT_ARG0(%esp)
movl %eax, OUT_ARG1(%esp)
EXPORT_PC
- call artIGetObjectFromMterp # (obj, offset)
+ call SYMBOL(artIGetObjectFromMterp) # (obj, offset)
movl rSELF, %ecx
REFRESH_IBASE_FROM_SELF %ecx
cmpl $0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException # bail out
andb $0xf,rINSTbl # rINST <- A
- SET_VREG_OBJECT %eax rINST # fp[A] <- value
+ SET_VREG_OBJECT %eax, rINST # fp[A] <- value
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
/* ------------------------------ */
@@ -5984,11 +6000,11 @@ artMterpAsmInstructionStart = .L_op_nop
/* op vA, vB, offset@CCCC */
movzbl rINSTbl, %ecx # ecx <- BA
sarl $4, %ecx # ecx <- B
- GET_VREG %ecx %ecx # vB (object we're operating on)
+ GET_VREG %ecx, %ecx # vB (object we're operating on)
testl %ecx, %ecx # is object null?
je common_errNullObject
andb $0xf, rINSTbl # rINST <- A
- GET_VREG rINST rINST # rINST <- v[A]
+ GET_VREG rINST, rINST # rINST <- v[A]
movzwl 2(rPC), %eax # eax <- field byte offset
movl rINST, (%ecx,%eax,1)
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -6000,13 +6016,13 @@ artMterpAsmInstructionStart = .L_op_nop
/* iput-wide-quick vA, vB, offset@CCCC */
movzbl rINSTbl, %ecx # ecx<- BA
sarl $4, %ecx # ecx<- B
- GET_VREG %ecx %ecx # vB (object we're operating on)
+ GET_VREG %ecx, %ecx # vB (object we're operating on)
testl %ecx, %ecx # is object null?
je common_errNullObject
movzwl 2(rPC), %eax # eax<- field byte offset
leal (%ecx,%eax,1), %ecx # ecx<- Address of 64-bit target
andb $0xf, rINSTbl # rINST<- A
- GET_WIDE_FP_VREG %xmm0 rINST # xmm0<- fp[A]/fp[A+1]
+ GET_WIDE_FP_VREG %xmm0, rINST # xmm0<- fp[A]/fp[A+1]
movq %xmm0, (%ecx) # obj.field<- r0/r1
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -6020,7 +6036,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl rPC, OUT_ARG1(%esp)
REFRESH_INST 232
movl rINST, OUT_ARG2(%esp)
- call MterpIputObjectQuick
+ call SYMBOL(MterpIputObjectQuick)
testl %eax, %eax
jz MterpException
REFRESH_IBASE
@@ -6045,7 +6061,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl rPC, OUT_ARG2(%esp)
REFRESH_INST 233
movl rINST, OUT_ARG3(%esp)
- call MterpInvokeVirtualQuick
+ call SYMBOL(MterpInvokeVirtualQuick)
testl %eax, %eax
jz MterpException
REFRESH_IBASE
@@ -6071,7 +6087,7 @@ artMterpAsmInstructionStart = .L_op_nop
movl rPC, OUT_ARG2(%esp)
REFRESH_INST 234
movl rINST, OUT_ARG3(%esp)
- call MterpInvokeVirtualQuickRange
+ call SYMBOL(MterpInvokeVirtualQuickRange)
testl %eax, %eax
jz MterpException
REFRESH_IBASE
@@ -6087,11 +6103,11 @@ artMterpAsmInstructionStart = .L_op_nop
/* op vA, vB, offset@CCCC */
movzbl rINSTbl, %ecx # ecx <- BA
sarl $4, %ecx # ecx <- B
- GET_VREG %ecx %ecx # vB (object we're operating on)
+ GET_VREG %ecx, %ecx # vB (object we're operating on)
testl %ecx, %ecx # is object null?
je common_errNullObject
andb $0xf, rINSTbl # rINST <- A
- GET_VREG rINST rINST # rINST <- v[A]
+ GET_VREG rINST, rINST # rINST <- v[A]
movzwl 2(rPC), %eax # eax <- field byte offset
movb rINSTbl, (%ecx,%eax,1)
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -6106,11 +6122,11 @@ artMterpAsmInstructionStart = .L_op_nop
/* op vA, vB, offset@CCCC */
movzbl rINSTbl, %ecx # ecx <- BA
sarl $4, %ecx # ecx <- B
- GET_VREG %ecx %ecx # vB (object we're operating on)
+ GET_VREG %ecx, %ecx # vB (object we're operating on)
testl %ecx, %ecx # is object null?
je common_errNullObject
andb $0xf, rINSTbl # rINST <- A
- GET_VREG rINST rINST # rINST <- v[A]
+ GET_VREG rINST, rINST # rINST <- v[A]
movzwl 2(rPC), %eax # eax <- field byte offset
movb rINSTbl, (%ecx,%eax,1)
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -6125,11 +6141,11 @@ artMterpAsmInstructionStart = .L_op_nop
/* op vA, vB, offset@CCCC */
movzbl rINSTbl, %ecx # ecx <- BA
sarl $4, %ecx # ecx <- B
- GET_VREG %ecx %ecx # vB (object we're operating on)
+ GET_VREG %ecx, %ecx # vB (object we're operating on)
testl %ecx, %ecx # is object null?
je common_errNullObject
andb $0xf, rINSTbl # rINST <- A
- GET_VREG rINST rINST # rINST <- v[A]
+ GET_VREG rINST, rINST # rINST <- v[A]
movzwl 2(rPC), %eax # eax <- field byte offset
movw rINSTw, (%ecx,%eax,1)
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -6144,11 +6160,11 @@ artMterpAsmInstructionStart = .L_op_nop
/* op vA, vB, offset@CCCC */
movzbl rINSTbl, %ecx # ecx <- BA
sarl $4, %ecx # ecx <- B
- GET_VREG %ecx %ecx # vB (object we're operating on)
+ GET_VREG %ecx, %ecx # vB (object we're operating on)
testl %ecx, %ecx # is object null?
je common_errNullObject
andb $0xf, rINSTbl # rINST <- A
- GET_VREG rINST rINST # rINST <- v[A]
+ GET_VREG rINST, rINST # rINST <- v[A]
movzwl 2(rPC), %eax # eax <- field byte offset
movw rINSTw, (%ecx,%eax,1)
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -6163,13 +6179,13 @@ artMterpAsmInstructionStart = .L_op_nop
/* op vA, vB, offset@CCCC */
movzbl rINSTbl, %ecx # ecx <- BA
sarl $4, %ecx # ecx <- B
- GET_VREG %ecx %ecx # vB (object we're operating on)
+ GET_VREG %ecx, %ecx # vB (object we're operating on)
movzwl 2(rPC), %eax # eax <- field byte offset
testl %ecx, %ecx # is object null?
je common_errNullObject
movsbl (%ecx,%eax,1), %eax
andb $0xf,rINSTbl # rINST <- A
- SET_VREG %eax rINST # fp[A] <- value
+ SET_VREG %eax, rINST # fp[A] <- value
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -6182,13 +6198,13 @@ artMterpAsmInstructionStart = .L_op_nop
/* op vA, vB, offset@CCCC */
movzbl rINSTbl, %ecx # ecx <- BA
sarl $4, %ecx # ecx <- B
- GET_VREG %ecx %ecx # vB (object we're operating on)
+ GET_VREG %ecx, %ecx # vB (object we're operating on)
movzwl 2(rPC), %eax # eax <- field byte offset
testl %ecx, %ecx # is object null?
je common_errNullObject
movsbl (%ecx,%eax,1), %eax
andb $0xf,rINSTbl # rINST <- A
- SET_VREG %eax rINST # fp[A] <- value
+ SET_VREG %eax, rINST # fp[A] <- value
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -6201,13 +6217,13 @@ artMterpAsmInstructionStart = .L_op_nop
/* op vA, vB, offset@CCCC */
movzbl rINSTbl, %ecx # ecx <- BA
sarl $4, %ecx # ecx <- B
- GET_VREG %ecx %ecx # vB (object we're operating on)
+ GET_VREG %ecx, %ecx # vB (object we're operating on)
movzwl 2(rPC), %eax # eax <- field byte offset
testl %ecx, %ecx # is object null?
je common_errNullObject
movzwl (%ecx,%eax,1), %eax
andb $0xf,rINSTbl # rINST <- A
- SET_VREG %eax rINST # fp[A] <- value
+ SET_VREG %eax, rINST # fp[A] <- value
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -6220,13 +6236,13 @@ artMterpAsmInstructionStart = .L_op_nop
/* op vA, vB, offset@CCCC */
movzbl rINSTbl, %ecx # ecx <- BA
sarl $4, %ecx # ecx <- B
- GET_VREG %ecx %ecx # vB (object we're operating on)
+ GET_VREG %ecx, %ecx # vB (object we're operating on)
movzwl 2(rPC), %eax # eax <- field byte offset
testl %ecx, %ecx # is object null?
je common_errNullObject
movswl (%ecx,%eax,1), %eax
andb $0xf,rINSTbl # rINST <- A
- SET_VREG %eax rINST # fp[A] <- value
+ SET_VREG %eax, rINST # fp[A] <- value
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -6350,31 +6366,31 @@ artMterpAsmInstructionStart = .L_op_nop
.balign 128
- .size artMterpAsmInstructionStart, .-artMterpAsmInstructionStart
- .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
+ SIZE(SYMBOL(artMterpAsmInstructionStart),SYMBOL(artMterpAsmInstructionStart))
+ .global SYMBOL(artMterpAsmInstructionEnd)
+SYMBOL(artMterpAsmInstructionEnd):
/*
* ===========================================================================
* Sister implementations
* ===========================================================================
*/
- .global artMterpAsmSisterStart
- .type artMterpAsmSisterStart, %function
+ .global SYMBOL(artMterpAsmSisterStart)
+ FUNCTION_TYPE(SYMBOL(artMterpAsmSisterStart))
.text
.balign 4
-artMterpAsmSisterStart:
+SYMBOL(artMterpAsmSisterStart):
- .size artMterpAsmSisterStart, .-artMterpAsmSisterStart
- .global artMterpAsmSisterEnd
-artMterpAsmSisterEnd:
+ SIZE(SYMBOL(artMterpAsmSisterStart),SYMBOL(artMterpAsmSisterStart))
+ .global SYMBOL(artMterpAsmSisterEnd)
+SYMBOL(artMterpAsmSisterEnd):
- .global artMterpAsmAltInstructionStart
- .type artMterpAsmAltInstructionStart, %function
+ .global SYMBOL(artMterpAsmAltInstructionStart)
+ FUNCTION_TYPE(SYMBOL(artMterpAsmAltInstructionStart))
.text
-artMterpAsmAltInstructionStart = .L_ALT_op_nop
+SYMBOL(artMterpAsmAltInstructionStart) = .L_ALT_op_nop
/* ------------------------------ */
.balign 128
.L_ALT_op_nop: /* 0x00 */
@@ -6396,7 +6412,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(0*128)
@@ -6421,7 +6437,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(1*128)
@@ -6446,7 +6462,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(2*128)
@@ -6471,7 +6487,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(3*128)
@@ -6496,7 +6512,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(4*128)
@@ -6521,7 +6537,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(5*128)
@@ -6546,7 +6562,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(6*128)
@@ -6571,7 +6587,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(7*128)
@@ -6596,7 +6612,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(8*128)
@@ -6621,7 +6637,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(9*128)
@@ -6646,7 +6662,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(10*128)
@@ -6671,7 +6687,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(11*128)
@@ -6696,7 +6712,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(12*128)
@@ -6721,7 +6737,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(13*128)
@@ -6746,7 +6762,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(14*128)
@@ -6771,7 +6787,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(15*128)
@@ -6796,7 +6812,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(16*128)
@@ -6821,7 +6837,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(17*128)
@@ -6846,7 +6862,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(18*128)
@@ -6871,7 +6887,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(19*128)
@@ -6896,7 +6912,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(20*128)
@@ -6921,7 +6937,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(21*128)
@@ -6946,7 +6962,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(22*128)
@@ -6971,7 +6987,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(23*128)
@@ -6996,7 +7012,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(24*128)
@@ -7021,7 +7037,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(25*128)
@@ -7046,7 +7062,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(26*128)
@@ -7071,7 +7087,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(27*128)
@@ -7096,7 +7112,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(28*128)
@@ -7121,7 +7137,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(29*128)
@@ -7146,7 +7162,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(30*128)
@@ -7171,7 +7187,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(31*128)
@@ -7196,7 +7212,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(32*128)
@@ -7221,7 +7237,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(33*128)
@@ -7246,7 +7262,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(34*128)
@@ -7271,7 +7287,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(35*128)
@@ -7296,7 +7312,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(36*128)
@@ -7321,7 +7337,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(37*128)
@@ -7346,7 +7362,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(38*128)
@@ -7371,7 +7387,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(39*128)
@@ -7396,7 +7412,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(40*128)
@@ -7421,7 +7437,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(41*128)
@@ -7446,7 +7462,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(42*128)
@@ -7471,7 +7487,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(43*128)
@@ -7496,7 +7512,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(44*128)
@@ -7521,7 +7537,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(45*128)
@@ -7546,7 +7562,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(46*128)
@@ -7571,7 +7587,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(47*128)
@@ -7596,7 +7612,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(48*128)
@@ -7621,7 +7637,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(49*128)
@@ -7646,7 +7662,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(50*128)
@@ -7671,7 +7687,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(51*128)
@@ -7696,7 +7712,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(52*128)
@@ -7721,7 +7737,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(53*128)
@@ -7746,7 +7762,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(54*128)
@@ -7771,7 +7787,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(55*128)
@@ -7796,7 +7812,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(56*128)
@@ -7821,7 +7837,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(57*128)
@@ -7846,7 +7862,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(58*128)
@@ -7871,7 +7887,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(59*128)
@@ -7896,7 +7912,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(60*128)
@@ -7921,7 +7937,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(61*128)
@@ -7946,7 +7962,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(62*128)
@@ -7971,7 +7987,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(63*128)
@@ -7996,7 +8012,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(64*128)
@@ -8021,7 +8037,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(65*128)
@@ -8046,7 +8062,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(66*128)
@@ -8071,7 +8087,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(67*128)
@@ -8096,7 +8112,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(68*128)
@@ -8121,7 +8137,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(69*128)
@@ -8146,7 +8162,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(70*128)
@@ -8171,7 +8187,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(71*128)
@@ -8196,7 +8212,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(72*128)
@@ -8221,7 +8237,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(73*128)
@@ -8246,7 +8262,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(74*128)
@@ -8271,7 +8287,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(75*128)
@@ -8296,7 +8312,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(76*128)
@@ -8321,7 +8337,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(77*128)
@@ -8346,7 +8362,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(78*128)
@@ -8371,7 +8387,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(79*128)
@@ -8396,7 +8412,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(80*128)
@@ -8421,7 +8437,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(81*128)
@@ -8446,7 +8462,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(82*128)
@@ -8471,7 +8487,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(83*128)
@@ -8496,7 +8512,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(84*128)
@@ -8521,7 +8537,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(85*128)
@@ -8546,7 +8562,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(86*128)
@@ -8571,7 +8587,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(87*128)
@@ -8596,7 +8612,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(88*128)
@@ -8621,7 +8637,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(89*128)
@@ -8646,7 +8662,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(90*128)
@@ -8671,7 +8687,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(91*128)
@@ -8696,7 +8712,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(92*128)
@@ -8721,7 +8737,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(93*128)
@@ -8746,7 +8762,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(94*128)
@@ -8771,7 +8787,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(95*128)
@@ -8796,7 +8812,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(96*128)
@@ -8821,7 +8837,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(97*128)
@@ -8846,7 +8862,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(98*128)
@@ -8871,7 +8887,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(99*128)
@@ -8896,7 +8912,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(100*128)
@@ -8921,7 +8937,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(101*128)
@@ -8946,7 +8962,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(102*128)
@@ -8971,7 +8987,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(103*128)
@@ -8996,7 +9012,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(104*128)
@@ -9021,7 +9037,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(105*128)
@@ -9046,7 +9062,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(106*128)
@@ -9071,7 +9087,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(107*128)
@@ -9096,7 +9112,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(108*128)
@@ -9121,7 +9137,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(109*128)
@@ -9146,7 +9162,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(110*128)
@@ -9171,7 +9187,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(111*128)
@@ -9196,7 +9212,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(112*128)
@@ -9221,7 +9237,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(113*128)
@@ -9246,7 +9262,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(114*128)
@@ -9271,7 +9287,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(115*128)
@@ -9296,7 +9312,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(116*128)
@@ -9321,7 +9337,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(117*128)
@@ -9346,7 +9362,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(118*128)
@@ -9371,7 +9387,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(119*128)
@@ -9396,7 +9412,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(120*128)
@@ -9421,7 +9437,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(121*128)
@@ -9446,7 +9462,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(122*128)
@@ -9471,7 +9487,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(123*128)
@@ -9496,7 +9512,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(124*128)
@@ -9521,7 +9537,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(125*128)
@@ -9546,7 +9562,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(126*128)
@@ -9571,7 +9587,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(127*128)
@@ -9596,7 +9612,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(128*128)
@@ -9621,7 +9637,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(129*128)
@@ -9646,7 +9662,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(130*128)
@@ -9671,7 +9687,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(131*128)
@@ -9696,7 +9712,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(132*128)
@@ -9721,7 +9737,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(133*128)
@@ -9746,7 +9762,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(134*128)
@@ -9771,7 +9787,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(135*128)
@@ -9796,7 +9812,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(136*128)
@@ -9821,7 +9837,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(137*128)
@@ -9846,7 +9862,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(138*128)
@@ -9871,7 +9887,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(139*128)
@@ -9896,7 +9912,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(140*128)
@@ -9921,7 +9937,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(141*128)
@@ -9946,7 +9962,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(142*128)
@@ -9971,7 +9987,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(143*128)
@@ -9996,7 +10012,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(144*128)
@@ -10021,7 +10037,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(145*128)
@@ -10046,7 +10062,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(146*128)
@@ -10071,7 +10087,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(147*128)
@@ -10096,7 +10112,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(148*128)
@@ -10121,7 +10137,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(149*128)
@@ -10146,7 +10162,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(150*128)
@@ -10171,7 +10187,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(151*128)
@@ -10196,7 +10212,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(152*128)
@@ -10221,7 +10237,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(153*128)
@@ -10246,7 +10262,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(154*128)
@@ -10271,7 +10287,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(155*128)
@@ -10296,7 +10312,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(156*128)
@@ -10321,7 +10337,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(157*128)
@@ -10346,7 +10362,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(158*128)
@@ -10371,7 +10387,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(159*128)
@@ -10396,7 +10412,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(160*128)
@@ -10421,7 +10437,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(161*128)
@@ -10446,7 +10462,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(162*128)
@@ -10471,7 +10487,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(163*128)
@@ -10496,7 +10512,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(164*128)
@@ -10521,7 +10537,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(165*128)
@@ -10546,7 +10562,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(166*128)
@@ -10571,7 +10587,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(167*128)
@@ -10596,7 +10612,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(168*128)
@@ -10621,7 +10637,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(169*128)
@@ -10646,7 +10662,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(170*128)
@@ -10671,7 +10687,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(171*128)
@@ -10696,7 +10712,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(172*128)
@@ -10721,7 +10737,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(173*128)
@@ -10746,7 +10762,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(174*128)
@@ -10771,7 +10787,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(175*128)
@@ -10796,7 +10812,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(176*128)
@@ -10821,7 +10837,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(177*128)
@@ -10846,7 +10862,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(178*128)
@@ -10871,7 +10887,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(179*128)
@@ -10896,7 +10912,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(180*128)
@@ -10921,7 +10937,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(181*128)
@@ -10946,7 +10962,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(182*128)
@@ -10971,7 +10987,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(183*128)
@@ -10996,7 +11012,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(184*128)
@@ -11021,7 +11037,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(185*128)
@@ -11046,7 +11062,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(186*128)
@@ -11071,7 +11087,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(187*128)
@@ -11096,7 +11112,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(188*128)
@@ -11121,7 +11137,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(189*128)
@@ -11146,7 +11162,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(190*128)
@@ -11171,7 +11187,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(191*128)
@@ -11196,7 +11212,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(192*128)
@@ -11221,7 +11237,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(193*128)
@@ -11246,7 +11262,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(194*128)
@@ -11271,7 +11287,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(195*128)
@@ -11296,7 +11312,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(196*128)
@@ -11321,7 +11337,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(197*128)
@@ -11346,7 +11362,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(198*128)
@@ -11371,7 +11387,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(199*128)
@@ -11396,7 +11412,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(200*128)
@@ -11421,7 +11437,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(201*128)
@@ -11446,7 +11462,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(202*128)
@@ -11471,7 +11487,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(203*128)
@@ -11496,7 +11512,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(204*128)
@@ -11521,7 +11537,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(205*128)
@@ -11546,7 +11562,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(206*128)
@@ -11571,7 +11587,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(207*128)
@@ -11596,7 +11612,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(208*128)
@@ -11621,7 +11637,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(209*128)
@@ -11646,7 +11662,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(210*128)
@@ -11671,7 +11687,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(211*128)
@@ -11696,7 +11712,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(212*128)
@@ -11721,7 +11737,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(213*128)
@@ -11746,7 +11762,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(214*128)
@@ -11771,7 +11787,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(215*128)
@@ -11796,7 +11812,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(216*128)
@@ -11821,7 +11837,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(217*128)
@@ -11846,7 +11862,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(218*128)
@@ -11871,7 +11887,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(219*128)
@@ -11896,7 +11912,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(220*128)
@@ -11921,7 +11937,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(221*128)
@@ -11946,7 +11962,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(222*128)
@@ -11971,7 +11987,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(223*128)
@@ -11996,7 +12012,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(224*128)
@@ -12021,7 +12037,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(225*128)
@@ -12046,7 +12062,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(226*128)
@@ -12071,7 +12087,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(227*128)
@@ -12096,7 +12112,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(228*128)
@@ -12121,7 +12137,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(229*128)
@@ -12146,7 +12162,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(230*128)
@@ -12171,7 +12187,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(231*128)
@@ -12196,7 +12212,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(232*128)
@@ -12221,7 +12237,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(233*128)
@@ -12246,7 +12262,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(234*128)
@@ -12271,7 +12287,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(235*128)
@@ -12296,7 +12312,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(236*128)
@@ -12321,7 +12337,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(237*128)
@@ -12346,7 +12362,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(238*128)
@@ -12371,7 +12387,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(239*128)
@@ -12396,7 +12412,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(240*128)
@@ -12421,7 +12437,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(241*128)
@@ -12446,7 +12462,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(242*128)
@@ -12471,7 +12487,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(243*128)
@@ -12496,7 +12512,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(244*128)
@@ -12521,7 +12537,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(245*128)
@@ -12546,7 +12562,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(246*128)
@@ -12571,7 +12587,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(247*128)
@@ -12596,7 +12612,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(248*128)
@@ -12621,7 +12637,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(249*128)
@@ -12646,7 +12662,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(250*128)
@@ -12671,7 +12687,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(251*128)
@@ -12696,7 +12712,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(252*128)
@@ -12721,7 +12737,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(253*128)
@@ -12746,7 +12762,7 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(254*128)
@@ -12771,14 +12787,14 @@ artMterpAsmAltInstructionStart = .L_ALT_op_nop
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(255*128)
.balign 128
- .size artMterpAsmAltInstructionStart, .-artMterpAsmAltInstructionStart
- .global artMterpAsmAltInstructionEnd
-artMterpAsmAltInstructionEnd:
+ SIZE(SYMBOL(artMterpAsmAltInstructionStart),SYMBOL(artMterpAsmAltInstructionStart))
+ .global SYMBOL(artMterpAsmAltInstructionEnd)
+SYMBOL(artMterpAsmAltInstructionEnd):
/* File: x86/footer.S */
/*
* ===========================================================================
@@ -12802,7 +12818,7 @@ common_errDivideByZero:
movl %eax, OUT_ARG0(%esp)
lea OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
- call MterpLogDivideByZeroException
+ call SYMBOL(MterpLogDivideByZeroException)
#endif
jmp MterpCommonFallback
@@ -12813,7 +12829,7 @@ common_errArrayIndex:
movl %eax, OUT_ARG0(%esp)
lea OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
- call MterpLogArrayIndexException
+ call SYMBOL(MterpLogArrayIndexException)
#endif
jmp MterpCommonFallback
@@ -12824,7 +12840,7 @@ common_errNegativeArraySize:
movl %eax, OUT_ARG0(%esp)
lea OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
- call MterpLogNegativeArraySizeException
+ call SYMBOL(MterpLogNegativeArraySizeException)
#endif
jmp MterpCommonFallback
@@ -12835,7 +12851,7 @@ common_errNoSuchMethod:
movl %eax, OUT_ARG0(%esp)
lea OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
- call MterpLogNoSuchMethodException
+ call SYMBOL(MterpLogNoSuchMethodException)
#endif
jmp MterpCommonFallback
@@ -12846,7 +12862,7 @@ common_errNullObject:
movl %eax, OUT_ARG0(%esp)
lea OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
- call MterpLogNullObjectException
+ call SYMBOL(MterpLogNullObjectException)
#endif
jmp MterpCommonFallback
@@ -12857,7 +12873,7 @@ common_exceptionThrown:
movl %eax, OUT_ARG0(%esp)
lea OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG0(%esp)
- call MterpLogExceptionThrownException
+ call SYMBOL(MterpLogExceptionThrownException)
#endif
jmp MterpCommonFallback
@@ -12870,7 +12886,7 @@ MterpSuspendFallback:
movl %ecx, OUT_ARG0(%esp)
movl THREAD_FLAGS_OFFSET(%eax), %eax
movl %eax, OUT_ARG2(%esp)
- call MterpLogSuspendFallback
+ call SYMBOL(MterpLogSuspendFallback)
#endif
jmp MterpCommonFallback
@@ -12895,7 +12911,7 @@ MterpException:
movl %eax, OUT_ARG0(%esp)
lea OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
- call MterpHandleException
+ call SYMBOL(MterpHandleException)
testl %eax, %eax
jz MterpExceptionReturn
REFRESH_IBASE
@@ -12919,7 +12935,7 @@ MterpCheckSuspendAndContinue:
testl $(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
jz 1f
movl %eax, OUT_ARG0(%esp)
- call MterpSuspendCheck
+ call SYMBOL(MterpSuspendCheck)
REFRESH_IBASE
1:
GOTO_NEXT
@@ -12934,7 +12950,7 @@ MterpFallback:
movl %eax, OUT_ARG0(%esp)
lea OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
- call MterpLogFallback
+ call SYMBOL(MterpLogFallback)
#endif
MterpCommonFallback:
xor %eax, %eax
@@ -12965,5 +12981,5 @@ MterpDone:
ret
.cfi_endproc
- .size ExecuteMterpImpl, .-ExecuteMterpImpl
+ SIZE(ExecuteMterpImpl,ExecuteMterpImpl)
diff --git a/runtime/interpreter/mterp/x86/alt_stub.S b/runtime/interpreter/mterp/x86/alt_stub.S
index 6462fc5919..5a911670d2 100644
--- a/runtime/interpreter/mterp/x86/alt_stub.S
+++ b/runtime/interpreter/mterp/x86/alt_stub.S
@@ -15,6 +15,6 @@
movl %ecx, OUT_ARG0(%esp)
leal OFF_FP_SHADOWFRAME(rFP), %eax
movl %eax, OUT_ARG1(%esp)
- call MterpCheckBefore # (self, shadow_frame)
+ call SYMBOL(MterpCheckBefore) # (self, shadow_frame)
REFRESH_IBASE
jmp .L_op_nop+(${opnum}*${handler_size_bytes})
diff --git a/runtime/interpreter/mterp/x86/bincmp.S b/runtime/interpreter/mterp/x86/bincmp.S
index a9a8c3ae7f..27cf6ea6d4 100644
--- a/runtime/interpreter/mterp/x86/bincmp.S
+++ b/runtime/interpreter/mterp/x86/bincmp.S
@@ -8,7 +8,7 @@
/* if-cmp vA, vB, +CCCC */
movzx rINSTbl, %ecx # ecx <- A+
andb $$0xf, %cl # ecx <- A
- GET_VREG %eax %ecx # eax <- vA
+ GET_VREG %eax, %ecx # eax <- vA
sarl $$4, rINST # rINST <- B
cmpl VREG_ADDRESS(rINST), %eax # compare (vA, vB)
movl $$2, %eax # assume not taken
diff --git a/runtime/interpreter/mterp/x86/bindiv.S b/runtime/interpreter/mterp/x86/bindiv.S
index 742f758bc4..bb5b319c49 100644
--- a/runtime/interpreter/mterp/x86/bindiv.S
+++ b/runtime/interpreter/mterp/x86/bindiv.S
@@ -6,8 +6,8 @@
/* div/rem vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB
- GET_VREG %ecx %ecx # ecx <- vCC
+ GET_VREG %eax, %eax # eax <- vBB
+ GET_VREG %ecx, %ecx # ecx <- vCC
mov rIBASE, LOCAL0(%esp)
testl %ecx, %ecx
je common_errDivideByZero
@@ -43,6 +43,6 @@
xorl %edx, %edx # Clear %edx before divide
div %cx
.L${opcode}_finish:
- SET_VREG $result rINST
+ SET_VREG $result, rINST
mov LOCAL0(%esp), rIBASE
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/bindiv2addr.S b/runtime/interpreter/mterp/x86/bindiv2addr.S
index ee7c523b0a..e620996c9f 100644
--- a/runtime/interpreter/mterp/x86/bindiv2addr.S
+++ b/runtime/interpreter/mterp/x86/bindiv2addr.S
@@ -7,9 +7,9 @@
movzx rINSTbl, %ecx # eax <- BA
mov rIBASE, LOCAL0(%esp)
sarl $$4, %ecx # ecx <- B
- GET_VREG %ecx %ecx # eax <- vBB
+ GET_VREG %ecx, %ecx # eax <- vBB
andb $$0xf, rINSTbl # rINST <- A
- GET_VREG %eax rINST # eax <- vBB
+ GET_VREG %eax, rINST # eax <- vBB
testl %ecx, %ecx
je common_errDivideByZero
cmpl $$-1, %ecx
@@ -17,13 +17,13 @@
cmpl $$0x80000000, %eax
jne .L${opcode}_continue_div2addr
movl $special, $result
- SET_VREG $result rINST
+ SET_VREG $result, rINST
mov LOCAL0(%esp), rIBASE
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
.L${opcode}_continue_div2addr:
cltd
idivl %ecx
- SET_VREG $result rINST
+ SET_VREG $result, rINST
mov LOCAL0(%esp), rIBASE
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/bindivLit16.S b/runtime/interpreter/mterp/x86/bindivLit16.S
index a2c4334cf0..be094aee49 100644
--- a/runtime/interpreter/mterp/x86/bindivLit16.S
+++ b/runtime/interpreter/mterp/x86/bindivLit16.S
@@ -7,7 +7,7 @@
/* Need A in rINST, ssssCCCC in ecx, vB in eax */
movzbl rINSTbl, %eax # eax <- 000000BA
sarl $$4, %eax # eax <- B
- GET_VREG %eax %eax # eax <- vB
+ GET_VREG %eax, %eax # eax <- vB
movswl 2(rPC), %ecx # ecx <- ssssCCCC
andb $$0xf, rINSTbl # rINST <- A
testl %ecx, %ecx
@@ -17,13 +17,13 @@
cmpl $$0x80000000, %eax
jne .L${opcode}_continue_div
movl $special, %eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
.L${opcode}_continue_div:
mov rIBASE, LOCAL0(%esp)
cltd
idivl %ecx
- SET_VREG $result rINST
+ SET_VREG $result, rINST
mov LOCAL0(%esp), rIBASE
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/bindivLit8.S b/runtime/interpreter/mterp/x86/bindivLit8.S
index 61bee0621c..fddb54574d 100644
--- a/runtime/interpreter/mterp/x86/bindivLit8.S
+++ b/runtime/interpreter/mterp/x86/bindivLit8.S
@@ -6,7 +6,7 @@
/* div/rem/lit8 vAA, vBB, #+CC */
movzbl 2(rPC), %eax # eax <- BB
movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax %eax # eax <- rBB
+ GET_VREG %eax, %eax # eax <- rBB
testl %ecx, %ecx
je common_errDivideByZero
cmpl $$0x80000000, %eax
@@ -14,13 +14,13 @@
cmpl $$-1, %ecx
jne .L${opcode}_continue_div
movl $special, %eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
.L${opcode}_continue_div:
mov rIBASE, LOCAL0(%esp)
cltd
idivl %ecx
- SET_VREG $result rINST
+ SET_VREG $result, rINST
mov LOCAL0(%esp), rIBASE
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/binop.S b/runtime/interpreter/mterp/x86/binop.S
index 5383f25fa1..d895235212 100644
--- a/runtime/interpreter/mterp/x86/binop.S
+++ b/runtime/interpreter/mterp/x86/binop.S
@@ -11,7 +11,7 @@
/* binop vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB
+ GET_VREG %eax, %eax # eax <- vBB
$instr # ex: addl (rFP,%ecx,4),%eax
- SET_VREG $result rINST
+ SET_VREG $result, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/binop1.S b/runtime/interpreter/mterp/x86/binop1.S
index cd51d0c27f..5049bb325e 100644
--- a/runtime/interpreter/mterp/x86/binop1.S
+++ b/runtime/interpreter/mterp/x86/binop1.S
@@ -6,8 +6,8 @@
/* binop vAA, vBB, vCC */
movzbl 2(rPC),%eax # eax <- BB
movzbl 3(rPC),%ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB
- GET_VREG %ecx %ecx # eax <- vBB
+ GET_VREG %eax, %eax # eax <- vBB
+ GET_VREG %ecx, %ecx # eax <- vBB
$instr # ex: addl %ecx,%eax
- SET_VREG $result rINST
+ SET_VREG $result, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/binop2addr.S b/runtime/interpreter/mterp/x86/binop2addr.S
index abee4dbd46..f126234a40 100644
--- a/runtime/interpreter/mterp/x86/binop2addr.S
+++ b/runtime/interpreter/mterp/x86/binop2addr.S
@@ -12,7 +12,7 @@
/* binop/2addr vA, vB */
movzx rINSTbl, %ecx # ecx <- A+
sarl $$4, rINST # rINST <- B
- GET_VREG %eax rINST # eax <- vB
+ GET_VREG %eax, rINST # eax <- vB
andb $$0xf, %cl # ecx <- A
$instr # for ex: addl %eax,(rFP,%ecx,4)
CLEAR_REF %ecx
diff --git a/runtime/interpreter/mterp/x86/binopLit16.S b/runtime/interpreter/mterp/x86/binopLit16.S
index 6c7fe61e10..2fd59de936 100644
--- a/runtime/interpreter/mterp/x86/binopLit16.S
+++ b/runtime/interpreter/mterp/x86/binopLit16.S
@@ -11,9 +11,9 @@
/* binop/lit16 vA, vB, #+CCCC */
movzbl rINSTbl, %eax # eax <- 000000BA
sarl $$4, %eax # eax <- B
- GET_VREG %eax %eax # eax <- vB
+ GET_VREG %eax, %eax # eax <- vB
movswl 2(rPC), %ecx # ecx <- ssssCCCC
andb $$0xf, rINSTbl # rINST <- A
$instr # for example: addl %ecx, %eax
- SET_VREG $result rINST
+ SET_VREG $result, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/binopLit8.S b/runtime/interpreter/mterp/x86/binopLit8.S
index 924685df0e..67cead27d5 100644
--- a/runtime/interpreter/mterp/x86/binopLit8.S
+++ b/runtime/interpreter/mterp/x86/binopLit8.S
@@ -12,7 +12,7 @@
/* binop/lit8 vAA, vBB, #+CC */
movzbl 2(rPC), %eax # eax <- BB
movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax %eax # eax <- rBB
+ GET_VREG %eax, %eax # eax <- rBB
$instr # ex: addl %ecx,%eax
- SET_VREG $result rINST
+ SET_VREG $result, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/binopWide.S b/runtime/interpreter/mterp/x86/binopWide.S
index 9f7106e620..da1293d5b9 100644
--- a/runtime/interpreter/mterp/x86/binopWide.S
+++ b/runtime/interpreter/mterp/x86/binopWide.S
@@ -2,14 +2,14 @@
* Generic 64-bit binary operation.
*/
/* binop vAA, vBB, vCC */
- movzbl 2(rPC),%eax # eax <- BB
- movzbl 3(rPC),%ecx # ecx <- CC
- movl rIBASE,LOCAL0(%esp) # save rIBASE
- GET_VREG rIBASE %eax # rIBASE <- v[BB+0]
- GET_VREG_HIGH %eax %eax # eax <- v[BB+1]
+ movzbl 2(rPC), %eax # eax <- BB
+ movzbl 3(rPC), %ecx # ecx <- CC
+ movl rIBASE, LOCAL0(%esp) # save rIBASE
+ GET_VREG rIBASE, %eax # rIBASE <- v[BB+0]
+ GET_VREG_HIGH %eax, %eax # eax <- v[BB+1]
$instr1 # ex: addl (rFP,%ecx,4),rIBASE
$instr2 # ex: adcl 4(rFP,%ecx,4),%eax
- SET_VREG rIBASE rINST # v[AA+0] <- rIBASE
- movl LOCAL0(%esp),rIBASE # restore rIBASE
- SET_VREG_HIGH %eax rINST # v[AA+1] <- eax
+ SET_VREG rIBASE, rINST # v[AA+0] <- rIBASE
+ movl LOCAL0(%esp), rIBASE # restore rIBASE
+ SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/binopWide2addr.S b/runtime/interpreter/mterp/x86/binopWide2addr.S
index 7560af44fe..da816f468b 100644
--- a/runtime/interpreter/mterp/x86/binopWide2addr.S
+++ b/runtime/interpreter/mterp/x86/binopWide2addr.S
@@ -2,11 +2,11 @@
* Generic 64-bit binary operation.
*/
/* binop/2addr vA, vB */
- movzbl rINSTbl,%ecx # ecx<- BA
- sarl $$4,%ecx # ecx<- B
- GET_VREG %eax %ecx # eax<- v[B+0]
- GET_VREG_HIGH %ecx %ecx # eax<- v[B+1]
- andb $$0xF,rINSTbl # rINST<- A
+ movzbl rINSTbl, %ecx # ecx<- BA
+ sarl $$4, %ecx # ecx<- B
+ GET_VREG %eax, %ecx # eax<- v[B+0]
+ GET_VREG_HIGH %ecx, %ecx # eax<- v[B+1]
+ andb $$0xF, rINSTbl # rINST<- A
$instr1 # ex: addl %eax,(rFP,rINST,4)
$instr2 # ex: adcl %ecx,4(rFP,rINST,4)
CLEAR_WIDE_REF rINST
diff --git a/runtime/interpreter/mterp/x86/entry.S b/runtime/interpreter/mterp/x86/entry.S
index a24ef70df5..b83f7e1d83 100644
--- a/runtime/interpreter/mterp/x86/entry.S
+++ b/runtime/interpreter/mterp/x86/entry.S
@@ -18,8 +18,8 @@
*/
.text
- .global ExecuteMterpImpl
- .type ExecuteMterpImpl, %function
+ .global SYMBOL(ExecuteMterpImpl)
+ FUNCTION_TYPE(ExecuteMterpImpl)
/*
* On entry:
@@ -30,7 +30,7 @@
*
*/
-ExecuteMterpImpl:
+SYMBOL(ExecuteMterpImpl):
.cfi_startproc
/* Allocate frame */
subl $$FRAME_SIZE, %esp
diff --git a/runtime/interpreter/mterp/x86/footer.S b/runtime/interpreter/mterp/x86/footer.S
index a2a36c4f64..385e78499f 100644
--- a/runtime/interpreter/mterp/x86/footer.S
+++ b/runtime/interpreter/mterp/x86/footer.S
@@ -20,7 +20,7 @@ common_errDivideByZero:
movl %eax, OUT_ARG0(%esp)
lea OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
- call MterpLogDivideByZeroException
+ call SYMBOL(MterpLogDivideByZeroException)
#endif
jmp MterpCommonFallback
@@ -31,7 +31,7 @@ common_errArrayIndex:
movl %eax, OUT_ARG0(%esp)
lea OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
- call MterpLogArrayIndexException
+ call SYMBOL(MterpLogArrayIndexException)
#endif
jmp MterpCommonFallback
@@ -42,7 +42,7 @@ common_errNegativeArraySize:
movl %eax, OUT_ARG0(%esp)
lea OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
- call MterpLogNegativeArraySizeException
+ call SYMBOL(MterpLogNegativeArraySizeException)
#endif
jmp MterpCommonFallback
@@ -53,7 +53,7 @@ common_errNoSuchMethod:
movl %eax, OUT_ARG0(%esp)
lea OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
- call MterpLogNoSuchMethodException
+ call SYMBOL(MterpLogNoSuchMethodException)
#endif
jmp MterpCommonFallback
@@ -64,7 +64,7 @@ common_errNullObject:
movl %eax, OUT_ARG0(%esp)
lea OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
- call MterpLogNullObjectException
+ call SYMBOL(MterpLogNullObjectException)
#endif
jmp MterpCommonFallback
@@ -75,7 +75,7 @@ common_exceptionThrown:
movl %eax, OUT_ARG0(%esp)
lea OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG0(%esp)
- call MterpLogExceptionThrownException
+ call SYMBOL(MterpLogExceptionThrownException)
#endif
jmp MterpCommonFallback
@@ -88,7 +88,7 @@ MterpSuspendFallback:
movl %ecx, OUT_ARG0(%esp)
movl THREAD_FLAGS_OFFSET(%eax), %eax
movl %eax, OUT_ARG2(%esp)
- call MterpLogSuspendFallback
+ call SYMBOL(MterpLogSuspendFallback)
#endif
jmp MterpCommonFallback
@@ -113,7 +113,7 @@ MterpException:
movl %eax, OUT_ARG0(%esp)
lea OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
- call MterpHandleException
+ call SYMBOL(MterpHandleException)
testl %eax, %eax
jz MterpExceptionReturn
REFRESH_IBASE
@@ -137,7 +137,7 @@ MterpCheckSuspendAndContinue:
testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
jz 1f
movl %eax, OUT_ARG0(%esp)
- call MterpSuspendCheck
+ call SYMBOL(MterpSuspendCheck)
REFRESH_IBASE
1:
GOTO_NEXT
@@ -152,7 +152,7 @@ MterpFallback:
movl %eax, OUT_ARG0(%esp)
lea OFF_FP_SHADOWFRAME(rFP), %ecx
movl %ecx, OUT_ARG1(%esp)
- call MterpLogFallback
+ call SYMBOL(MterpLogFallback)
#endif
MterpCommonFallback:
xor %eax, %eax
@@ -183,4 +183,4 @@ MterpDone:
ret
.cfi_endproc
- .size ExecuteMterpImpl, .-ExecuteMterpImpl
+ SIZE(ExecuteMterpImpl,ExecuteMterpImpl)
diff --git a/runtime/interpreter/mterp/x86/fpcmp.S b/runtime/interpreter/mterp/x86/fpcmp.S
index 2b98667974..5f9eef9d09 100644
--- a/runtime/interpreter/mterp/x86/fpcmp.S
+++ b/runtime/interpreter/mterp/x86/fpcmp.S
@@ -31,5 +31,5 @@
.L${opcode}_less:
decl %eax
.L${opcode}_finish:
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/header.S b/runtime/interpreter/mterp/x86/header.S
index 24817856fa..0977b901e2 100644
--- a/runtime/interpreter/mterp/x86/header.S
+++ b/runtime/interpreter/mterp/x86/header.S
@@ -89,6 +89,22 @@ unspecified registers or condition codes.
*/
#include "asm_support.h"
+/*
+ * Handle mac compiler specific
+ */
+#if defined(__APPLE__)
+ #define MACRO_LITERAL(value) $$(value)
+ #define FUNCTION_TYPE(name)
+ #define SIZE(start,end)
+ // Mac OS' symbols have an _ prefix.
+ #define SYMBOL(name) _ ## name
+#else
+ #define MACRO_LITERAL(value) $$value
+ #define FUNCTION_TYPE(name) .type name, @function
+ #define SIZE(start,end) .size start, .-end
+ #define SYMBOL(name) name
+#endif
+
/* Frame size must be 16-byte aligned.
* Remember about 4 bytes for return address
*/
@@ -192,7 +208,7 @@ unspecified registers or condition codes.
*/
.macro REFRESH_INST _opnum
movb rINSTbl, rINSTbh
- movb $$\_opnum, rINSTbl
+ movb MACRO_LITERAL(\_opnum), rINSTbl
.endm
/*
@@ -208,7 +224,7 @@ unspecified registers or condition codes.
.macro GOTO_NEXT
movzx rINSTbl,%eax
movzbl rINSTbh,rINST
- shll $$${handler_size_bits}, %eax
+ shll MACRO_LITERAL(${handler_size_bits}), %eax
addl rIBASE, %eax
jmp *%eax
.endm
@@ -248,7 +264,7 @@ unspecified registers or condition codes.
.macro SET_VREG _reg _vreg
movl \_reg, (rFP,\_vreg,4)
- movl $$0, (rREFS,\_vreg,4)
+ movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
.endm
/* Write wide value from xmm. xmm is clobbered. */
@@ -269,14 +285,14 @@ unspecified registers or condition codes.
.macro SET_VREG_HIGH _reg _vreg
movl \_reg, 4(rFP,\_vreg,4)
- movl $$0, 4(rREFS,\_vreg,4)
+ movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
.endm
.macro CLEAR_REF _vreg
- movl $$0, (rREFS,\_vreg,4)
+ movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
.endm
.macro CLEAR_WIDE_REF _vreg
- movl $$0, (rREFS,\_vreg,4)
- movl $$0, 4(rREFS,\_vreg,4)
+ movl MACRO_LITERAL(0), (rREFS,\_vreg,4)
+ movl MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
.endm
diff --git a/runtime/interpreter/mterp/x86/invoke.S b/runtime/interpreter/mterp/x86/invoke.S
index 80f782285a..054fbfdf69 100644
--- a/runtime/interpreter/mterp/x86/invoke.S
+++ b/runtime/interpreter/mterp/x86/invoke.S
@@ -13,7 +13,7 @@
movl rPC, OUT_ARG2(%esp)
REFRESH_INST ${opnum}
movl rINST, OUT_ARG3(%esp)
- call $helper
+ call SYMBOL($helper)
testl %eax, %eax
jz MterpException
REFRESH_IBASE
diff --git a/runtime/interpreter/mterp/x86/op_aget.S b/runtime/interpreter/mterp/x86/op_aget.S
index 52b5236a8c..338386ff40 100644
--- a/runtime/interpreter/mterp/x86/op_aget.S
+++ b/runtime/interpreter/mterp/x86/op_aget.S
@@ -8,12 +8,12 @@
/* op vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB (array object)
- GET_VREG %ecx %ecx # ecx <- vCC (requested index)
+ GET_VREG %eax, %eax # eax <- vBB (array object)
+ GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
testl %eax, %eax # null array object?
je common_errNullObject # bail if so
cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
jae common_errArrayIndex # index >= length, bail.
$load $data_offset(%eax,%ecx,$shift), %eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_aget_object.S b/runtime/interpreter/mterp/x86/op_aget_object.S
index 61f3e9194f..cbfb50cb09 100644
--- a/runtime/interpreter/mterp/x86/op_aget_object.S
+++ b/runtime/interpreter/mterp/x86/op_aget_object.S
@@ -6,15 +6,15 @@
/* op vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB (array object)
- GET_VREG %ecx %ecx # ecs <- vCC (requested index)
+ GET_VREG %eax, %eax # eax <- vBB (array object)
+ GET_VREG %ecx, %ecx # ecs <- vCC (requested index)
EXPORT_PC
movl %eax, OUT_ARG0(%esp)
movl %ecx, OUT_ARG1(%esp)
- call artAGetObjectFromMterp # (array, index)
+ call SYMBOL(artAGetObjectFromMterp) # (array, index)
movl rSELF, %ecx
REFRESH_IBASE_FROM_SELF %ecx
cmpl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException
- SET_VREG_OBJECT %eax rINST
+ SET_VREG_OBJECT %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_aget_wide.S b/runtime/interpreter/mterp/x86/op_aget_wide.S
index 663adc67f5..92c612a25c 100644
--- a/runtime/interpreter/mterp/x86/op_aget_wide.S
+++ b/runtime/interpreter/mterp/x86/op_aget_wide.S
@@ -4,13 +4,13 @@
/* aget-wide vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB (array object)
- GET_VREG %ecx %ecx # ecx <- vCC (requested index)
+ GET_VREG %eax, %eax # eax <- vBB (array object)
+ GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
testl %eax, %eax # null array object?
je common_errNullObject # bail if so
cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
jae common_errArrayIndex # index >= length, bail.
leal MIRROR_WIDE_ARRAY_DATA_OFFSET(%eax,%ecx,8), %eax
movq (%eax), %xmm0 # xmm0 <- vBB[vCC]
- SET_WIDE_FP_VREG %xmm0 rINST # vAA <- xmm0
+ SET_WIDE_FP_VREG %xmm0, rINST # vAA <- xmm0
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_aput.S b/runtime/interpreter/mterp/x86/op_aput.S
index 2ea465df90..9d8c52d127 100644
--- a/runtime/interpreter/mterp/x86/op_aput.S
+++ b/runtime/interpreter/mterp/x86/op_aput.S
@@ -8,13 +8,13 @@
/* op vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB (array object)
- GET_VREG %ecx %ecx # ecx <- vCC (requested index)
+ GET_VREG %eax, %eax # eax <- vBB (array object)
+ GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
testl %eax, %eax # null array object?
je common_errNullObject # bail if so
cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
jae common_errArrayIndex # index >= length, bail.
leal $data_offset(%eax,%ecx,$shift), %eax
- GET_VREG rINST rINST
+ GET_VREG rINST, rINST
$store $reg, (%eax)
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_aput_object.S b/runtime/interpreter/mterp/x86/op_aput_object.S
index 2af5acb288..9cfc2213d2 100644
--- a/runtime/interpreter/mterp/x86/op_aput_object.S
+++ b/runtime/interpreter/mterp/x86/op_aput_object.S
@@ -8,7 +8,7 @@
movl rPC, OUT_ARG1(%esp)
REFRESH_INST ${opnum}
movl rINST, OUT_ARG2(%esp)
- call MterpAputObject # (array, index)
+ call SYMBOL(MterpAputObject) # (array, index)
REFRESH_IBASE
testl %eax, %eax
jz MterpPossibleException
diff --git a/runtime/interpreter/mterp/x86/op_aput_wide.S b/runtime/interpreter/mterp/x86/op_aput_wide.S
index 7a3337166e..43ef64a54a 100644
--- a/runtime/interpreter/mterp/x86/op_aput_wide.S
+++ b/runtime/interpreter/mterp/x86/op_aput_wide.S
@@ -5,13 +5,13 @@
/* aput-wide vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB (array object)
- GET_VREG %ecx %ecx # ecx <- vCC (requested index)
+ GET_VREG %eax, %eax # eax <- vBB (array object)
+ GET_VREG %ecx, %ecx # ecx <- vCC (requested index)
testl %eax, %eax # null array object?
je common_errNullObject # bail if so
cmpl MIRROR_ARRAY_LENGTH_OFFSET(%eax), %ecx
jae common_errArrayIndex # index >= length, bail.
leal MIRROR_WIDE_ARRAY_DATA_OFFSET(%eax,%ecx,8), %eax
- GET_WIDE_FP_VREG %xmm0 rINST # xmm0 <- vAA
+ GET_WIDE_FP_VREG %xmm0, rINST # xmm0 <- vAA
movq %xmm0, (%eax) # vBB[vCC] <- xmm0
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_array_length.S b/runtime/interpreter/mterp/x86/op_array_length.S
index 3e42a7cddd..60ed80b541 100644
--- a/runtime/interpreter/mterp/x86/op_array_length.S
+++ b/runtime/interpreter/mterp/x86/op_array_length.S
@@ -3,10 +3,10 @@
*/
mov rINST, %eax # eax <- BA
sarl $$4, rINST # rINST <- B
- GET_VREG %ecx rINST # ecx <- vB (object ref)
+ GET_VREG %ecx, rINST # ecx <- vB (object ref)
testl %ecx, %ecx # is null?
je common_errNullObject
andb $$0xf, %al # eax <- A
movl MIRROR_ARRAY_LENGTH_OFFSET(%ecx), rINST
- SET_VREG rINST %eax
+ SET_VREG rINST, %eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_check_cast.S b/runtime/interpreter/mterp/x86/op_check_cast.S
index 018432a92a..ae2ff9ea21 100644
--- a/runtime/interpreter/mterp/x86/op_check_cast.S
+++ b/runtime/interpreter/mterp/x86/op_check_cast.S
@@ -11,7 +11,7 @@
movl %eax, OUT_ARG2(%esp)
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp)
- call MterpCheckCast # (index, &obj, method, self)
+ call SYMBOL(MterpCheckCast) # (index, &obj, method, self)
REFRESH_IBASE
testl %eax, %eax
jnz MterpPossibleException
diff --git a/runtime/interpreter/mterp/x86/op_cmp_long.S b/runtime/interpreter/mterp/x86/op_cmp_long.S
index bd86738950..1f729b078e 100644
--- a/runtime/interpreter/mterp/x86/op_cmp_long.S
+++ b/runtime/interpreter/mterp/x86/op_cmp_long.S
@@ -5,17 +5,17 @@
/* cmp-long vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG_HIGH %eax %eax # eax <- v[BB+1], BB is clobbered
+ GET_VREG_HIGH %eax, %eax # eax <- v[BB+1], BB is clobbered
cmpl VREG_HIGH_ADDRESS(%ecx), %eax
jl .L${opcode}_smaller
jg .L${opcode}_bigger
movzbl 2(rPC), %eax # eax <- BB, restore BB
- GET_VREG %eax %eax # eax <- v[BB]
+ GET_VREG %eax, %eax # eax <- v[BB]
sub VREG_ADDRESS(%ecx), %eax
ja .L${opcode}_bigger
jb .L${opcode}_smaller
.L${opcode}_finish:
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
.L${opcode}_bigger:
diff --git a/runtime/interpreter/mterp/x86/op_const.S b/runtime/interpreter/mterp/x86/op_const.S
index dc695307b3..544d63b22a 100644
--- a/runtime/interpreter/mterp/x86/op_const.S
+++ b/runtime/interpreter/mterp/x86/op_const.S
@@ -1,4 +1,4 @@
/* const vAA, #+BBBBbbbb */
movl 2(rPC), %eax # grab all 32 bits at once
- SET_VREG %eax rINST # vAA<- eax
+ SET_VREG %eax, rINST # vAA<- eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86/op_const_16.S b/runtime/interpreter/mterp/x86/op_const_16.S
index f5707cf22d..97cd5faf2f 100644
--- a/runtime/interpreter/mterp/x86/op_const_16.S
+++ b/runtime/interpreter/mterp/x86/op_const_16.S
@@ -1,4 +1,4 @@
/* const/16 vAA, #+BBBB */
movswl 2(rPC), %ecx # ecx <- ssssBBBB
- SET_VREG %ecx rINST # vAA <- ssssBBBB
+ SET_VREG %ecx, rINST # vAA <- ssssBBBB
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_const_4.S b/runtime/interpreter/mterp/x86/op_const_4.S
index c336411549..a60ba96c5a 100644
--- a/runtime/interpreter/mterp/x86/op_const_4.S
+++ b/runtime/interpreter/mterp/x86/op_const_4.S
@@ -3,5 +3,5 @@
movl $$0xf, rINST
andl %eax, rINST # rINST <- A
sarl $$4, %eax
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_const_class.S b/runtime/interpreter/mterp/x86/op_const_class.S
index eceb8bc60b..343e110f71 100644
--- a/runtime/interpreter/mterp/x86/op_const_class.S
+++ b/runtime/interpreter/mterp/x86/op_const_class.S
@@ -7,7 +7,7 @@
movl %eax, OUT_ARG2(%esp)
movl rSELF, %eax
movl %eax, OUT_ARG3(%esp)
- call MterpConstClass # (index, tgt_reg, shadow_frame, self)
+ call SYMBOL(MterpConstClass) # (index, tgt_reg, shadow_frame, self)
REFRESH_IBASE
testl %eax, %eax
jnz MterpPossibleException
diff --git a/runtime/interpreter/mterp/x86/op_const_high16.S b/runtime/interpreter/mterp/x86/op_const_high16.S
index da78d1b63e..576967af99 100644
--- a/runtime/interpreter/mterp/x86/op_const_high16.S
+++ b/runtime/interpreter/mterp/x86/op_const_high16.S
@@ -1,5 +1,5 @@
/* const/high16 vAA, #+BBBB0000 */
movzwl 2(rPC), %eax # eax <- 0000BBBB
sall $$16, %eax # eax <- BBBB0000
- SET_VREG %eax rINST # vAA <- eax
+ SET_VREG %eax, rINST # vAA <- eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_const_string.S b/runtime/interpreter/mterp/x86/op_const_string.S
index 9acd6fe76c..bbac69ced4 100644
--- a/runtime/interpreter/mterp/x86/op_const_string.S
+++ b/runtime/interpreter/mterp/x86/op_const_string.S
@@ -7,7 +7,7 @@
movl %eax, OUT_ARG2(%esp)
movl rSELF, %eax
movl %eax, OUT_ARG3(%esp)
- call MterpConstString # (index, tgt_reg, shadow_frame, self)
+ call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
REFRESH_IBASE
testl %eax, %eax
jnz MterpPossibleException
diff --git a/runtime/interpreter/mterp/x86/op_const_string_jumbo.S b/runtime/interpreter/mterp/x86/op_const_string_jumbo.S
index 5c728b269d..4236807dd3 100644
--- a/runtime/interpreter/mterp/x86/op_const_string_jumbo.S
+++ b/runtime/interpreter/mterp/x86/op_const_string_jumbo.S
@@ -7,7 +7,7 @@
movl %eax, OUT_ARG2(%esp)
movl rSELF, %eax
movl %eax, OUT_ARG3(%esp)
- call MterpConstString # (index, tgt_reg, shadow_frame, self)
+ call SYMBOL(MterpConstString) # (index, tgt_reg, shadow_frame, self)
REFRESH_IBASE
testl %eax, %eax
jnz MterpPossibleException
diff --git a/runtime/interpreter/mterp/x86/op_const_wide.S b/runtime/interpreter/mterp/x86/op_const_wide.S
index 745490ea83..3750728128 100644
--- a/runtime/interpreter/mterp/x86/op_const_wide.S
+++ b/runtime/interpreter/mterp/x86/op_const_wide.S
@@ -2,6 +2,6 @@
movl 2(rPC), %eax # eax <- lsw
movzbl rINSTbl, %ecx # ecx <- AA
movl 6(rPC), rINST # rINST <- msw
- SET_VREG %eax %ecx
- SET_VREG_HIGH rINST %ecx
+ SET_VREG %eax, %ecx
+ SET_VREG_HIGH rINST, %ecx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 5
diff --git a/runtime/interpreter/mterp/x86/op_const_wide_16.S b/runtime/interpreter/mterp/x86/op_const_wide_16.S
index 8029cfe80b..1331c329dc 100644
--- a/runtime/interpreter/mterp/x86/op_const_wide_16.S
+++ b/runtime/interpreter/mterp/x86/op_const_wide_16.S
@@ -2,7 +2,7 @@
movswl 2(rPC), %eax # eax <- ssssBBBB
movl rIBASE, %ecx # preserve rIBASE (cltd trashes it)
cltd # rIBASE:eax <- ssssssssssssBBBB
- SET_VREG_HIGH rIBASE rINST # store msw
- SET_VREG %eax rINST # store lsw
+ SET_VREG_HIGH rIBASE, rINST # store msw
+ SET_VREG %eax, rINST # store lsw
movl %ecx, rIBASE # restore rIBASE
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_const_wide_32.S b/runtime/interpreter/mterp/x86/op_const_wide_32.S
index 3e23d3a106..ed7d62b396 100644
--- a/runtime/interpreter/mterp/x86/op_const_wide_32.S
+++ b/runtime/interpreter/mterp/x86/op_const_wide_32.S
@@ -2,7 +2,7 @@
movl 2(rPC), %eax # eax <- BBBBbbbb
movl rIBASE, %ecx # preserve rIBASE (cltd trashes it)
cltd # rIBASE:eax <- ssssssssssssBBBB
- SET_VREG_HIGH rIBASE rINST # store msw
- SET_VREG %eax rINST # store lsw
+ SET_VREG_HIGH rIBASE, rINST # store msw
+ SET_VREG %eax, rINST # store lsw
movl %ecx, rIBASE # restore rIBASE
ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86/op_const_wide_high16.S b/runtime/interpreter/mterp/x86/op_const_wide_high16.S
index d2a11191b7..11b9310be5 100644
--- a/runtime/interpreter/mterp/x86/op_const_wide_high16.S
+++ b/runtime/interpreter/mterp/x86/op_const_wide_high16.S
@@ -1,7 +1,7 @@
/* const-wide/high16 vAA, #+BBBB000000000000 */
movzwl 2(rPC), %eax # eax <- 0000BBBB
sall $$16, %eax # eax <- BBBB0000
- SET_VREG_HIGH %eax rINST # v[AA+1] <- eax
+ SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
xorl %eax, %eax
- SET_VREG %eax rINST # v[AA+0] <- eax
+ SET_VREG %eax, rINST # v[AA+0] <- eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_div_long.S b/runtime/interpreter/mterp/x86/op_div_long.S
index 577282686d..e56a035f1d 100644
--- a/runtime/interpreter/mterp/x86/op_div_long.S
+++ b/runtime/interpreter/mterp/x86/op_div_long.S
@@ -7,17 +7,17 @@
mov rIBASE, LOCAL0(%esp) # save rIBASE/%edx
mov rINST, LOCAL1(%esp) # save rINST/%ebx
movzbl 3(rPC), %eax # eax <- CC
- GET_VREG %ecx %eax
- GET_VREG_HIGH %ebx %eax
+ GET_VREG %ecx, %eax
+ GET_VREG_HIGH %ebx, %eax
movl %ecx, %edx
orl %ebx, %ecx
jz common_errDivideByZero
movzbl 2(rPC), %eax # eax <- BB
- GET_VREG_HIGH %ecx %eax
- GET_VREG %eax %eax
- call $routine
+ GET_VREG_HIGH %ecx, %eax
+ GET_VREG %eax, %eax
+ call SYMBOL($routine)
mov LOCAL1(%esp), rINST # restore rINST/%ebx
- SET_VREG_HIGH rIBASE rINST
- SET_VREG %eax rINST
+ SET_VREG_HIGH rIBASE, rINST
+ SET_VREG %eax, rINST
mov LOCAL0(%esp), rIBASE # restore rIBASE/%edx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_div_long_2addr.S b/runtime/interpreter/mterp/x86/op_div_long_2addr.S
index 26960420cb..159cc44444 100644
--- a/runtime/interpreter/mterp/x86/op_div_long_2addr.S
+++ b/runtime/interpreter/mterp/x86/op_div_long_2addr.S
@@ -10,16 +10,16 @@
andb $$0xf, rINSTbl # rINST <- A
mov rINST, LOCAL1(%esp) # save rINST/%ebx
movl %ebx, %ecx
- GET_VREG %edx %eax
- GET_VREG_HIGH %ebx %eax
+ GET_VREG %edx, %eax
+ GET_VREG_HIGH %ebx, %eax
movl %edx, %eax
orl %ebx, %eax
jz common_errDivideByZero
- GET_VREG %eax %ecx
- GET_VREG_HIGH %ecx %ecx
- call $routine
+ GET_VREG %eax, %ecx
+ GET_VREG_HIGH %ecx, %ecx
+ call SYMBOL($routine)
mov LOCAL1(%esp), rINST # restore rINST/%ebx
- SET_VREG_HIGH rIBASE rINST
- SET_VREG %eax rINST
+ SET_VREG_HIGH rIBASE, rINST
+ SET_VREG %eax, rINST
mov LOCAL0(%esp), rIBASE # restore rIBASE/%edx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_fill_array_data.S b/runtime/interpreter/mterp/x86/op_fill_array_data.S
index 0cb05f6cf7..004aed9872 100644
--- a/runtime/interpreter/mterp/x86/op_fill_array_data.S
+++ b/runtime/interpreter/mterp/x86/op_fill_array_data.S
@@ -2,10 +2,10 @@
EXPORT_PC
movl 2(rPC), %ecx # ecx <- BBBBbbbb
leal (rPC,%ecx,2), %ecx # ecx <- PC + BBBBbbbb*2
- GET_VREG %eax rINST # eax <- vAA (array object)
+ GET_VREG %eax, rINST # eax <- vAA (array object)
movl %eax, OUT_ARG0(%esp)
movl %ecx, OUT_ARG1(%esp)
- call MterpFillArrayData # (obj, payload)
+ call SYMBOL(MterpFillArrayData) # (obj, payload)
REFRESH_IBASE
testl %eax, %eax # 0 means an exception is thrown
jz MterpPossibleException
diff --git a/runtime/interpreter/mterp/x86/op_filled_new_array.S b/runtime/interpreter/mterp/x86/op_filled_new_array.S
index c08b09f4d2..a2bac29bc1 100644
--- a/runtime/interpreter/mterp/x86/op_filled_new_array.S
+++ b/runtime/interpreter/mterp/x86/op_filled_new_array.S
@@ -13,7 +13,7 @@
movl rPC, OUT_ARG1(%esp)
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp)
- call $helper
+ call SYMBOL($helper)
REFRESH_IBASE
testl %eax, %eax # 0 means an exception is thrown
jz MterpPossibleException
diff --git a/runtime/interpreter/mterp/x86/op_iget.S b/runtime/interpreter/mterp/x86/op_iget.S
index 868ffd0a5c..99326105c0 100644
--- a/runtime/interpreter/mterp/x86/op_iget.S
+++ b/runtime/interpreter/mterp/x86/op_iget.S
@@ -15,15 +15,15 @@
movl %eax, OUT_ARG2(%esp) # referrer
mov rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call $helper
+ call SYMBOL($helper)
movl rSELF, %ecx
REFRESH_IBASE_FROM_SELF %ecx
cmpl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException # bail out
andb $$0xf, rINSTbl # rINST <- A
.if $is_object
- SET_VREG_OBJECT %eax rINST # fp[A] <-value
+ SET_VREG_OBJECT %eax, rINST # fp[A] <-value
.else
- SET_VREG %eax rINST # fp[A] <-value
+ SET_VREG %eax, rINST # fp[A] <-value
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_iget_object_quick.S b/runtime/interpreter/mterp/x86/op_iget_object_quick.S
index b09772f720..fe166948c9 100644
--- a/runtime/interpreter/mterp/x86/op_iget_object_quick.S
+++ b/runtime/interpreter/mterp/x86/op_iget_object_quick.S
@@ -2,16 +2,16 @@
/* op vA, vB, offset@CCCC */
movzbl rINSTbl, %ecx # ecx <- BA
sarl $$4, %ecx # ecx <- B
- GET_VREG %ecx %ecx # vB (object we're operating on)
+ GET_VREG %ecx, %ecx # vB (object we're operating on)
movzwl 2(rPC), %eax # eax <- field byte offset
movl %ecx, OUT_ARG0(%esp)
movl %eax, OUT_ARG1(%esp)
EXPORT_PC
- call artIGetObjectFromMterp # (obj, offset)
+ call SYMBOL(artIGetObjectFromMterp) # (obj, offset)
movl rSELF, %ecx
REFRESH_IBASE_FROM_SELF %ecx
cmpl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException # bail out
andb $$0xf,rINSTbl # rINST <- A
- SET_VREG_OBJECT %eax rINST # fp[A] <- value
+ SET_VREG_OBJECT %eax, rINST # fp[A] <- value
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_iget_quick.S b/runtime/interpreter/mterp/x86/op_iget_quick.S
index 372071ce9c..1b7440fc9a 100644
--- a/runtime/interpreter/mterp/x86/op_iget_quick.S
+++ b/runtime/interpreter/mterp/x86/op_iget_quick.S
@@ -3,11 +3,11 @@
/* op vA, vB, offset@CCCC */
movzbl rINSTbl, %ecx # ecx <- BA
sarl $$4, %ecx # ecx <- B
- GET_VREG %ecx %ecx # vB (object we're operating on)
+ GET_VREG %ecx, %ecx # vB (object we're operating on)
movzwl 2(rPC), %eax # eax <- field byte offset
testl %ecx, %ecx # is object null?
je common_errNullObject
${load} (%ecx,%eax,1), %eax
andb $$0xf,rINSTbl # rINST <- A
- SET_VREG %eax rINST # fp[A] <- value
+ SET_VREG %eax, rINST # fp[A] <- value
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_iget_wide.S b/runtime/interpreter/mterp/x86/op_iget_wide.S
index 58e5a65a89..92126b4473 100644
--- a/runtime/interpreter/mterp/x86/op_iget_wide.S
+++ b/runtime/interpreter/mterp/x86/op_iget_wide.S
@@ -14,12 +14,12 @@
movl %eax, OUT_ARG2(%esp) # referrer
mov rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call artGet64InstanceFromCode
+ call SYMBOL(artGet64InstanceFromCode)
mov rSELF, %ecx
cmpl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException # bail out
andb $$0xf, rINSTbl # rINST <- A
- SET_VREG %eax rINST
- SET_VREG_HIGH %edx rINST
+ SET_VREG %eax, rINST
+ SET_VREG_HIGH %edx, rINST
REFRESH_IBASE_FROM_SELF %ecx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_iget_wide_quick.S b/runtime/interpreter/mterp/x86/op_iget_wide_quick.S
index 8be336be73..7ce74cc71b 100644
--- a/runtime/interpreter/mterp/x86/op_iget_wide_quick.S
+++ b/runtime/interpreter/mterp/x86/op_iget_wide_quick.S
@@ -1,11 +1,11 @@
/* iget-wide-quick vA, vB, offset@CCCC */
movzbl rINSTbl, %ecx # ecx <- BA
sarl $$4, %ecx # ecx <- B
- GET_VREG %ecx %ecx # vB (object we're operating on)
+ GET_VREG %ecx, %ecx # vB (object we're operating on)
movzwl 2(rPC), %eax # eax <- field byte offset
testl %ecx, %ecx # is object null?
je common_errNullObject
movq (%ecx,%eax,1), %xmm0
andb $$0xf, rINSTbl # rINST <- A
- SET_WIDE_FP_VREG %xmm0 rINST
+ SET_WIDE_FP_VREG %xmm0, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_instance_of.S b/runtime/interpreter/mterp/x86/op_instance_of.S
index c9bfba5c36..fd5bf44c78 100644
--- a/runtime/interpreter/mterp/x86/op_instance_of.S
+++ b/runtime/interpreter/mterp/x86/op_instance_of.S
@@ -16,11 +16,11 @@
movl %eax, OUT_ARG2(%esp)
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp)
- call MterpInstanceOf # (index, &obj, method, self)
+ call SYMBOL(MterpInstanceOf) # (index, &obj, method, self)
movl rSELF, %ecx
REFRESH_IBASE_FROM_SELF %ecx
cmpl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException
andb $$0xf, rINSTbl # rINSTbl <- A
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_int_to_long.S b/runtime/interpreter/mterp/x86/op_int_to_long.S
index 736ea69869..6f9ea269f4 100644
--- a/runtime/interpreter/mterp/x86/op_int_to_long.S
+++ b/runtime/interpreter/mterp/x86/op_int_to_long.S
@@ -1,12 +1,12 @@
/* int to long vA, vB */
movzbl rINSTbl, %eax # eax <- +A
sarl $$4, %eax # eax <- B
- GET_VREG %eax %eax # eax <- vB
+ GET_VREG %eax, %eax # eax <- vB
andb $$0xf, rINSTbl # rINST <- A
movl rIBASE, %ecx # cltd trashes rIBASE/edx
cltd # rINST:eax<- sssssssBBBBBBBB
- SET_VREG_HIGH rIBASE rINST # v[A+1] <- rIBASE
- SET_VREG %eax rINST # v[A+0] <- %eax
+ SET_VREG_HIGH rIBASE, rINST # v[A+1] <- rIBASE
+ SET_VREG %eax, rINST # v[A+0] <- %eax
movl %ecx, rIBASE
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_iput.S b/runtime/interpreter/mterp/x86/op_iput.S
index f8a65499d7..13cfe5ca69 100644
--- a/runtime/interpreter/mterp/x86/op_iput.S
+++ b/runtime/interpreter/mterp/x86/op_iput.S
@@ -18,7 +18,7 @@
movl %eax, OUT_ARG2(%esp) # fp[A]
movl OFF_FP_METHOD(rFP), %eax
movl %eax, OUT_ARG3(%esp) # referrer
- call $handler
+ call SYMBOL($handler)
testl %eax, %eax
jnz MterpPossibleException
REFRESH_IBASE
diff --git a/runtime/interpreter/mterp/x86/op_iput_object.S b/runtime/interpreter/mterp/x86/op_iput_object.S
index 20d57aa35e..f63075c503 100644
--- a/runtime/interpreter/mterp/x86/op_iput_object.S
+++ b/runtime/interpreter/mterp/x86/op_iput_object.S
@@ -6,7 +6,7 @@
movl rINST, OUT_ARG2(%esp)
movl rSELF, %eax
movl %eax, OUT_ARG3(%esp)
- call MterpIputObject
+ call SYMBOL(MterpIputObject)
testl %eax, %eax
jz MterpException
REFRESH_IBASE
diff --git a/runtime/interpreter/mterp/x86/op_iput_object_quick.S b/runtime/interpreter/mterp/x86/op_iput_object_quick.S
index 4c7f4bd0cf..d54b1b772f 100644
--- a/runtime/interpreter/mterp/x86/op_iput_object_quick.S
+++ b/runtime/interpreter/mterp/x86/op_iput_object_quick.S
@@ -4,7 +4,7 @@
movl rPC, OUT_ARG1(%esp)
REFRESH_INST ${opnum}
movl rINST, OUT_ARG2(%esp)
- call MterpIputObjectQuick
+ call SYMBOL(MterpIputObjectQuick)
testl %eax, %eax
jz MterpException
REFRESH_IBASE
diff --git a/runtime/interpreter/mterp/x86/op_iput_quick.S b/runtime/interpreter/mterp/x86/op_iput_quick.S
index e2f7caf936..b67cee0859 100644
--- a/runtime/interpreter/mterp/x86/op_iput_quick.S
+++ b/runtime/interpreter/mterp/x86/op_iput_quick.S
@@ -3,11 +3,11 @@
/* op vA, vB, offset@CCCC */
movzbl rINSTbl, %ecx # ecx <- BA
sarl $$4, %ecx # ecx <- B
- GET_VREG %ecx %ecx # vB (object we're operating on)
+ GET_VREG %ecx, %ecx # vB (object we're operating on)
testl %ecx, %ecx # is object null?
je common_errNullObject
andb $$0xf, rINSTbl # rINST <- A
- GET_VREG rINST rINST # rINST <- v[A]
+ GET_VREG rINST, rINST # rINST <- v[A]
movzwl 2(rPC), %eax # eax <- field byte offset
${store} ${reg}, (%ecx,%eax,1)
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_iput_wide.S b/runtime/interpreter/mterp/x86/op_iput_wide.S
index 92cb770201..573e14d663 100644
--- a/runtime/interpreter/mterp/x86/op_iput_wide.S
+++ b/runtime/interpreter/mterp/x86/op_iput_wide.S
@@ -12,7 +12,7 @@
movl %eax, OUT_ARG2(%esp) # &fp[A]
movl OFF_FP_METHOD(rFP), %eax
movl %eax, OUT_ARG3(%esp) # referrer
- call artSet64InstanceFromMterp
+ call SYMBOL(artSet64InstanceFromMterp)
testl %eax, %eax
jnz MterpPossibleException
REFRESH_IBASE
diff --git a/runtime/interpreter/mterp/x86/op_iput_wide_quick.S b/runtime/interpreter/mterp/x86/op_iput_wide_quick.S
index 72285c5a5b..17de6f8502 100644
--- a/runtime/interpreter/mterp/x86/op_iput_wide_quick.S
+++ b/runtime/interpreter/mterp/x86/op_iput_wide_quick.S
@@ -1,12 +1,12 @@
/* iput-wide-quick vA, vB, offset@CCCC */
movzbl rINSTbl, %ecx # ecx<- BA
sarl $$4, %ecx # ecx<- B
- GET_VREG %ecx %ecx # vB (object we're operating on)
+ GET_VREG %ecx, %ecx # vB (object we're operating on)
testl %ecx, %ecx # is object null?
je common_errNullObject
movzwl 2(rPC), %eax # eax<- field byte offset
leal (%ecx,%eax,1), %ecx # ecx<- Address of 64-bit target
andb $$0xf, rINSTbl # rINST<- A
- GET_WIDE_FP_VREG %xmm0 rINST # xmm0<- fp[A]/fp[A+1]
+ GET_WIDE_FP_VREG %xmm0, rINST # xmm0<- fp[A]/fp[A+1]
movq %xmm0, (%ecx) # obj.field<- r0/r1
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_monitor_enter.S b/runtime/interpreter/mterp/x86/op_monitor_enter.S
index 8236fb3420..9e885bde93 100644
--- a/runtime/interpreter/mterp/x86/op_monitor_enter.S
+++ b/runtime/interpreter/mterp/x86/op_monitor_enter.S
@@ -3,11 +3,11 @@
*/
/* monitor-enter vAA */
EXPORT_PC
- GET_VREG %ecx rINST
+ GET_VREG %ecx, rINST
movl %ecx, OUT_ARG0(%esp)
movl rSELF, %eax
movl %eax, OUT_ARG1(%esp)
- call artLockObjectFromCode # (object, self)
+ call SYMBOL(artLockObjectFromCode) # (object, self)
REFRESH_IBASE
testl %eax, %eax
jnz MterpException
diff --git a/runtime/interpreter/mterp/x86/op_monitor_exit.S b/runtime/interpreter/mterp/x86/op_monitor_exit.S
index 56d4eb318e..090480056a 100644
--- a/runtime/interpreter/mterp/x86/op_monitor_exit.S
+++ b/runtime/interpreter/mterp/x86/op_monitor_exit.S
@@ -7,11 +7,11 @@
*/
/* monitor-exit vAA */
EXPORT_PC
- GET_VREG %ecx rINST
+ GET_VREG %ecx, rINST
movl %ecx, OUT_ARG0(%esp)
movl rSELF, %eax
movl %eax, OUT_ARG1(%esp)
- call artUnlockObjectFromCode # (object, self)
+ call SYMBOL(artUnlockObjectFromCode) # (object, self)
REFRESH_IBASE
testl %eax, %eax
jnz MterpException
diff --git a/runtime/interpreter/mterp/x86/op_move.S b/runtime/interpreter/mterp/x86/op_move.S
index 0a531be901..ea173b97d6 100644
--- a/runtime/interpreter/mterp/x86/op_move.S
+++ b/runtime/interpreter/mterp/x86/op_move.S
@@ -4,10 +4,10 @@
movzbl rINSTbl, %eax # eax <- BA
andb $$0xf, %al # eax <- A
shrl $$4, rINST # rINST <- B
- GET_VREG rINST rINST
+ GET_VREG rINST, rINST
.if $is_object
- SET_VREG_OBJECT rINST %eax # fp[A] <- fp[B]
+ SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
.else
- SET_VREG rINST %eax # fp[A] <- fp[B]
+ SET_VREG rINST, %eax # fp[A] <- fp[B]
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_move_16.S b/runtime/interpreter/mterp/x86/op_move_16.S
index 0773f41109..454deb5d08 100644
--- a/runtime/interpreter/mterp/x86/op_move_16.S
+++ b/runtime/interpreter/mterp/x86/op_move_16.S
@@ -3,10 +3,10 @@
/* op vAAAA, vBBBB */
movzwl 4(rPC), %ecx # ecx <- BBBB
movzwl 2(rPC), %eax # eax <- AAAA
- GET_VREG rINST %ecx
+ GET_VREG rINST, %ecx
.if $is_object
- SET_VREG_OBJECT rINST %eax # fp[A] <- fp[B]
+ SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
.else
- SET_VREG rINST %eax # fp[A] <- fp[B]
+ SET_VREG rINST, %eax # fp[A] <- fp[B]
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86/op_move_exception.S b/runtime/interpreter/mterp/x86/op_move_exception.S
index e37cdfa877..d8dc74fdac 100644
--- a/runtime/interpreter/mterp/x86/op_move_exception.S
+++ b/runtime/interpreter/mterp/x86/op_move_exception.S
@@ -1,6 +1,6 @@
/* move-exception vAA */
movl rSELF, %ecx
movl THREAD_EXCEPTION_OFFSET(%ecx), %eax
- SET_VREG_OBJECT %eax rINST # fp[AA] <- exception object
+ SET_VREG_OBJECT %eax, rINST # fp[AA] <- exception object
movl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_move_from16.S b/runtime/interpreter/mterp/x86/op_move_from16.S
index 623a4d354f..e86985536e 100644
--- a/runtime/interpreter/mterp/x86/op_move_from16.S
+++ b/runtime/interpreter/mterp/x86/op_move_from16.S
@@ -3,10 +3,10 @@
/* op vAA, vBBBB */
movzx rINSTbl, %eax # eax <- AA
movw 2(rPC), rINSTw # rINSTw <- BBBB
- GET_VREG rINST rINST # rINST <- fp[BBBB]
+ GET_VREG rINST, rINST # rINST <- fp[BBBB]
.if $is_object
- SET_VREG_OBJECT rINST %eax # fp[A] <- fp[B]
+ SET_VREG_OBJECT rINST, %eax # fp[A] <- fp[B]
.else
- SET_VREG rINST %eax # fp[A] <- fp[B]
+ SET_VREG rINST, %eax # fp[A] <- fp[B]
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_move_result.S b/runtime/interpreter/mterp/x86/op_move_result.S
index 414f2cb715..f6f2129f66 100644
--- a/runtime/interpreter/mterp/x86/op_move_result.S
+++ b/runtime/interpreter/mterp/x86/op_move_result.S
@@ -4,8 +4,8 @@
movl OFF_FP_RESULT_REGISTER(rFP), %eax # get pointer to result JType.
movl (%eax), %eax # r0 <- result.i.
.if $is_object
- SET_VREG_OBJECT %eax rINST # fp[A] <- fp[B]
+ SET_VREG_OBJECT %eax, rINST # fp[A] <- fp[B]
.else
- SET_VREG %eax rINST # fp[A] <- fp[B]
+ SET_VREG %eax, rINST # fp[A] <- fp[B]
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_move_result_wide.S b/runtime/interpreter/mterp/x86/op_move_result_wide.S
index 0c1683b45a..7818cceaf9 100644
--- a/runtime/interpreter/mterp/x86/op_move_result_wide.S
+++ b/runtime/interpreter/mterp/x86/op_move_result_wide.S
@@ -2,6 +2,6 @@
movl OFF_FP_RESULT_REGISTER(rFP), %eax # get pointer to result JType.
movl 4(%eax), %ecx # Get high
movl (%eax), %eax # Get low
- SET_VREG %eax rINST # v[AA+0] <- eax
- SET_VREG_HIGH %ecx rINST # v[AA+1] <- ecx
+ SET_VREG %eax, rINST # v[AA+0] <- eax
+ SET_VREG_HIGH %ecx, rINST # v[AA+1] <- ecx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_move_wide.S b/runtime/interpreter/mterp/x86/op_move_wide.S
index 9c0e9853ab..79ce7b77bc 100644
--- a/runtime/interpreter/mterp/x86/op_move_wide.S
+++ b/runtime/interpreter/mterp/x86/op_move_wide.S
@@ -3,6 +3,6 @@
movzbl rINSTbl, %ecx # ecx <- BA
sarl $$4, rINST # rINST <- B
andb $$0xf, %cl # ecx <- A
- GET_WIDE_FP_VREG %xmm0 rINST # xmm0 <- v[B]
- SET_WIDE_FP_VREG %xmm0 %ecx # v[A] <- xmm0
+ GET_WIDE_FP_VREG %xmm0, rINST # xmm0 <- v[B]
+ SET_WIDE_FP_VREG %xmm0, %ecx # v[A] <- xmm0
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_move_wide_16.S b/runtime/interpreter/mterp/x86/op_move_wide_16.S
index 7522c277a2..a6b8596b98 100644
--- a/runtime/interpreter/mterp/x86/op_move_wide_16.S
+++ b/runtime/interpreter/mterp/x86/op_move_wide_16.S
@@ -2,6 +2,6 @@
/* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
movzwl 4(rPC), %ecx # ecx<- BBBB
movzwl 2(rPC), %eax # eax<- AAAA
- GET_WIDE_FP_VREG %xmm0 %ecx # xmm0 <- v[B]
- SET_WIDE_FP_VREG %xmm0 %eax # v[A] <- xmm0
+ GET_WIDE_FP_VREG %xmm0, %ecx # xmm0 <- v[B]
+ SET_WIDE_FP_VREG %xmm0, %eax # v[A] <- xmm0
ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
diff --git a/runtime/interpreter/mterp/x86/op_move_wide_from16.S b/runtime/interpreter/mterp/x86/op_move_wide_from16.S
index 5ad2cb44b5..ec344de95f 100644
--- a/runtime/interpreter/mterp/x86/op_move_wide_from16.S
+++ b/runtime/interpreter/mterp/x86/op_move_wide_from16.S
@@ -2,6 +2,6 @@
/* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
movzwl 2(rPC), %ecx # ecx <- BBBB
movzbl rINSTbl, %eax # eax <- AAAA
- GET_WIDE_FP_VREG %xmm0 %ecx # xmm0 <- v[B]
- SET_WIDE_FP_VREG %xmm0 %eax # v[A] <- xmm0
+ GET_WIDE_FP_VREG %xmm0, %ecx # xmm0 <- v[B]
+ SET_WIDE_FP_VREG %xmm0, %eax # v[A] <- xmm0
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_mul_int.S b/runtime/interpreter/mterp/x86/op_mul_int.S
index a367ab7102..77f4659d6a 100644
--- a/runtime/interpreter/mterp/x86/op_mul_int.S
+++ b/runtime/interpreter/mterp/x86/op_mul_int.S
@@ -4,9 +4,9 @@
/* mul vAA, vBB, vCC */
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
- GET_VREG %eax %eax # eax <- vBB
+ GET_VREG %eax, %eax # eax <- vBB
mov rIBASE, LOCAL0(%esp)
imull (rFP,%ecx,4), %eax # trashes rIBASE/edx
mov LOCAL0(%esp), rIBASE
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_mul_int_2addr.S b/runtime/interpreter/mterp/x86/op_mul_int_2addr.S
index 60050759d0..f92a28e46a 100644
--- a/runtime/interpreter/mterp/x86/op_mul_int_2addr.S
+++ b/runtime/interpreter/mterp/x86/op_mul_int_2addr.S
@@ -1,10 +1,10 @@
/* mul vA, vB */
movzx rINSTbl, %ecx # ecx <- A+
sarl $$4, rINST # rINST <- B
- GET_VREG %eax rINST # eax <- vB
+ GET_VREG %eax, rINST # eax <- vB
andb $$0xf, %cl # ecx <- A
mov rIBASE, LOCAL0(%esp)
imull (rFP,%ecx,4), %eax # trashes rIBASE/edx
mov LOCAL0(%esp), rIBASE
- SET_VREG %eax %ecx
+ SET_VREG %eax, %ecx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_mul_int_lit16.S b/runtime/interpreter/mterp/x86/op_mul_int_lit16.S
index 1c0fde3cd1..31ab613805 100644
--- a/runtime/interpreter/mterp/x86/op_mul_int_lit16.S
+++ b/runtime/interpreter/mterp/x86/op_mul_int_lit16.S
@@ -2,11 +2,11 @@
/* Need A in rINST, ssssCCCC in ecx, vB in eax */
movzbl rINSTbl, %eax # eax <- 000000BA
sarl $$4, %eax # eax <- B
- GET_VREG %eax %eax # eax <- vB
+ GET_VREG %eax, %eax # eax <- vB
movswl 2(rPC), %ecx # ecx <- ssssCCCC
andb $$0xf, rINSTbl # rINST <- A
mov rIBASE, LOCAL0(%esp)
imull %ecx, %eax # trashes rIBASE/edx
mov LOCAL0(%esp), rIBASE
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_mul_int_lit8.S b/runtime/interpreter/mterp/x86/op_mul_int_lit8.S
index 4d7a22d898..6637aa7384 100644
--- a/runtime/interpreter/mterp/x86/op_mul_int_lit8.S
+++ b/runtime/interpreter/mterp/x86/op_mul_int_lit8.S
@@ -1,9 +1,9 @@
/* mul/lit8 vAA, vBB, #+CC */
movzbl 2(rPC), %eax # eax <- BB
movsbl 3(rPC), %ecx # ecx <- ssssssCC
- GET_VREG %eax %eax # eax <- rBB
+ GET_VREG %eax, %eax # eax <- rBB
mov rIBASE, LOCAL0(%esp)
imull %ecx, %eax # trashes rIBASE/edx
mov LOCAL0(%esp), rIBASE
- SET_VREG %eax rINST
+ SET_VREG %eax, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_mul_long.S b/runtime/interpreter/mterp/x86/op_mul_long.S
index 3746e41636..f35ca1372b 100644
--- a/runtime/interpreter/mterp/x86/op_mul_long.S
+++ b/runtime/interpreter/mterp/x86/op_mul_long.S
@@ -27,7 +27,7 @@
mov LOCAL0(%esp), rPC # restore Interpreter PC
mov LOCAL1(%esp), rFP # restore FP
leal (%ecx,rIBASE), rIBASE # full result now in rIBASE:%eax
- SET_VREG_HIGH rIBASE rINST # v[B+1] <- rIBASE
+ SET_VREG_HIGH rIBASE, rINST # v[B+1] <- rIBASE
mov LOCAL2(%esp), rIBASE # restore IBASE
- SET_VREG %eax rINST # v[B] <- eax
+ SET_VREG %eax, rINST # v[B] <- eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_neg_long.S b/runtime/interpreter/mterp/x86/op_neg_long.S
index 7cc17f019f..30da247208 100644
--- a/runtime/interpreter/mterp/x86/op_neg_long.S
+++ b/runtime/interpreter/mterp/x86/op_neg_long.S
@@ -2,12 +2,12 @@
movzbl rINSTbl, %ecx # ecx <- BA
sarl $$4, %ecx # ecx <- B
andb $$0xf, rINSTbl # rINST <- A
- GET_VREG %eax %ecx # eax <- v[B+0]
- GET_VREG_HIGH %ecx %ecx # ecx <- v[B+1]
+ GET_VREG %eax, %ecx # eax <- v[B+0]
+ GET_VREG_HIGH %ecx, %ecx # ecx <- v[B+1]
negl %eax
adcl $$0, %ecx
negl %ecx
- SET_VREG %eax rINST # v[A+0] <- eax
- SET_VREG_HIGH %ecx rINST # v[A+1] <- ecx
+ SET_VREG %eax, rINST # v[A+0] <- eax
+ SET_VREG_HIGH %ecx, rINST # v[A+1] <- ecx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_new_array.S b/runtime/interpreter/mterp/x86/op_new_array.S
index 68521830b3..24904774e1 100644
--- a/runtime/interpreter/mterp/x86/op_new_array.S
+++ b/runtime/interpreter/mterp/x86/op_new_array.S
@@ -14,7 +14,7 @@
movl rINST, OUT_ARG2(%esp)
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp)
- call MterpNewArray
+ call SYMBOL(MterpNewArray)
REFRESH_IBASE
testl %eax, %eax # 0 means an exception is thrown
jz MterpPossibleException
diff --git a/runtime/interpreter/mterp/x86/op_new_instance.S b/runtime/interpreter/mterp/x86/op_new_instance.S
index a3632e8c1f..712a5ebe96 100644
--- a/runtime/interpreter/mterp/x86/op_new_instance.S
+++ b/runtime/interpreter/mterp/x86/op_new_instance.S
@@ -9,7 +9,7 @@
movl %ecx, OUT_ARG1(%esp)
REFRESH_INST ${opnum}
movl rINST, OUT_ARG2(%esp)
- call MterpNewInstance
+ call SYMBOL(MterpNewInstance)
REFRESH_IBASE
testl %eax, %eax # 0 means an exception is thrown
jz MterpPossibleException
diff --git a/runtime/interpreter/mterp/x86/op_not_long.S b/runtime/interpreter/mterp/x86/op_not_long.S
index 55666a13ba..8f706e13be 100644
--- a/runtime/interpreter/mterp/x86/op_not_long.S
+++ b/runtime/interpreter/mterp/x86/op_not_long.S
@@ -2,10 +2,10 @@
movzbl rINSTbl, %ecx # ecx <- BA
sarl $$4, %ecx # ecx <- B
andb $$0xf, rINSTbl # rINST <- A
- GET_VREG %eax %ecx # eax <- v[B+0]
- GET_VREG_HIGH %ecx %ecx # ecx <- v[B+1]
+ GET_VREG %eax, %ecx # eax <- v[B+0]
+ GET_VREG_HIGH %ecx, %ecx # ecx <- v[B+1]
notl %eax
notl %ecx
- SET_VREG %eax rINST # v[A+0] <- eax
- SET_VREG_HIGH %ecx rINST # v[A+1] <- ecx
+ SET_VREG %eax, rINST # v[A+0] <- eax
+ SET_VREG_HIGH %ecx, rINST # v[A+1] <- ecx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_packed_switch.S b/runtime/interpreter/mterp/x86/op_packed_switch.S
index 4e39a48444..230b58e02b 100644
--- a/runtime/interpreter/mterp/x86/op_packed_switch.S
+++ b/runtime/interpreter/mterp/x86/op_packed_switch.S
@@ -10,11 +10,11 @@
*/
/* op vAA, +BBBB */
movl 2(rPC), %ecx # ecx <- BBBBbbbb
- GET_VREG %eax rINST # eax <- vAA
+ GET_VREG %eax, rINST # eax <- vAA
leal (rPC,%ecx,2), %ecx # ecx <- PC + BBBBbbbb*2
movl %eax, OUT_ARG1(%esp) # ARG1 <- vAA
movl %ecx, OUT_ARG0(%esp) # ARG0 <- switchData
- call $func
+ call SYMBOL($func)
addl %eax, %eax
leal (rPC, %eax), rPC
FETCH_INST
diff --git a/runtime/interpreter/mterp/x86/op_return.S b/runtime/interpreter/mterp/x86/op_return.S
index 183b3bfdd5..8e3cfad380 100644
--- a/runtime/interpreter/mterp/x86/op_return.S
+++ b/runtime/interpreter/mterp/x86/op_return.S
@@ -5,13 +5,13 @@
*/
/* op vAA */
.extern MterpThreadFenceForConstructor
- call MterpThreadFenceForConstructor
+ call SYMBOL(MterpThreadFenceForConstructor)
movl rSELF, %eax
testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
jz 1f
movl %eax, OUT_ARG0(%esp)
- call MterpSuspendCheck
+ call SYMBOL(MterpSuspendCheck)
1:
- GET_VREG %eax rINST # eax <- vAA
+ GET_VREG %eax, rINST # eax <- vAA
xorl %ecx, %ecx
jmp MterpReturn
diff --git a/runtime/interpreter/mterp/x86/op_return_void.S b/runtime/interpreter/mterp/x86/op_return_void.S
index f3e24c7990..a14a4f6394 100644
--- a/runtime/interpreter/mterp/x86/op_return_void.S
+++ b/runtime/interpreter/mterp/x86/op_return_void.S
@@ -1,10 +1,10 @@
.extern MterpThreadFenceForConstructor
- call MterpThreadFenceForConstructor
+ call SYMBOL(MterpThreadFenceForConstructor)
movl rSELF, %eax
testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
jz 1f
movl %eax, OUT_ARG0(%esp)
- call MterpSuspendCheck
+ call SYMBOL(MterpSuspendCheck)
1:
xorl %eax, %eax
xorl %ecx, %ecx
diff --git a/runtime/interpreter/mterp/x86/op_return_void_no_barrier.S b/runtime/interpreter/mterp/x86/op_return_void_no_barrier.S
index add4e203fb..1d0e93331b 100644
--- a/runtime/interpreter/mterp/x86/op_return_void_no_barrier.S
+++ b/runtime/interpreter/mterp/x86/op_return_void_no_barrier.S
@@ -2,7 +2,7 @@
testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
jz 1f
movl %eax, OUT_ARG0(%esp)
- call MterpSuspendCheck
+ call SYMBOL(MterpSuspendCheck)
1:
xorl %eax, %eax
xorl %ecx, %ecx
diff --git a/runtime/interpreter/mterp/x86/op_return_wide.S b/runtime/interpreter/mterp/x86/op_return_wide.S
index 34a3380326..7d1850a962 100644
--- a/runtime/interpreter/mterp/x86/op_return_wide.S
+++ b/runtime/interpreter/mterp/x86/op_return_wide.S
@@ -3,13 +3,13 @@
*/
/* return-wide vAA */
.extern MterpThreadFenceForConstructor
- call MterpThreadFenceForConstructor
+ call SYMBOL(MterpThreadFenceForConstructor)
movl rSELF, %eax
testl $$(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax)
jz 1f
movl %eax, OUT_ARG0(%esp)
- call MterpSuspendCheck
+ call SYMBOL(MterpSuspendCheck)
1:
- GET_VREG %eax rINST # eax <- v[AA+0]
- GET_VREG_HIGH %ecx rINST # ecx <- v[AA+1]
+ GET_VREG %eax, rINST # eax <- v[AA+0]
+ GET_VREG_HIGH %ecx, rINST # ecx <- v[AA+1]
jmp MterpReturn
diff --git a/runtime/interpreter/mterp/x86/op_sget.S b/runtime/interpreter/mterp/x86/op_sget.S
index ed5aedf719..ec964581af 100644
--- a/runtime/interpreter/mterp/x86/op_sget.S
+++ b/runtime/interpreter/mterp/x86/op_sget.S
@@ -13,14 +13,14 @@
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call $helper
+ call SYMBOL($helper)
movl rSELF, %ecx
REFRESH_IBASE_FROM_SELF %ecx
cmpl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException
.if $is_object
- SET_VREG_OBJECT %eax rINST # fp[A] <- value
+ SET_VREG_OBJECT %eax, rINST # fp[A] <- value
.else
- SET_VREG %eax rINST # fp[A] <- value
+ SET_VREG %eax, rINST # fp[A] <- value
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_sget_wide.S b/runtime/interpreter/mterp/x86/op_sget_wide.S
index 76b993bf54..833f266dd5 100644
--- a/runtime/interpreter/mterp/x86/op_sget_wide.S
+++ b/runtime/interpreter/mterp/x86/op_sget_wide.S
@@ -11,11 +11,11 @@
movl %eax, OUT_ARG1(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG2(%esp) # self
- call artGet64StaticFromCode
+ call SYMBOL(artGet64StaticFromCode)
movl rSELF, %ecx
cmpl $$0, THREAD_EXCEPTION_OFFSET(%ecx)
jnz MterpException
- SET_VREG %eax rINST # fp[A]<- low part
- SET_VREG_HIGH %edx rINST # fp[A+1]<- high part
+ SET_VREG %eax, rINST # fp[A]<- low part
+ SET_VREG_HIGH %edx, rINST # fp[A+1]<- high part
REFRESH_IBASE_FROM_SELF %ecx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_shl_long.S b/runtime/interpreter/mterp/x86/op_shl_long.S
index 56d13e3168..aa58a93f9c 100644
--- a/runtime/interpreter/mterp/x86/op_shl_long.S
+++ b/runtime/interpreter/mterp/x86/op_shl_long.S
@@ -13,9 +13,9 @@
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE %eax # ecx <- v[BB+1]
- GET_VREG %ecx %ecx # ecx <- vCC
- GET_VREG %eax %eax # eax <- v[BB+0]
+ GET_VREG_HIGH rIBASE, %eax # ecx <- v[BB+1]
+ GET_VREG %ecx, %ecx # ecx <- vCC
+ GET_VREG %eax, %eax # eax <- v[BB+0]
shldl %eax,rIBASE
sall %cl, %eax
testb $$32, %cl
@@ -23,7 +23,7 @@
movl %eax, rIBASE
xorl %eax, %eax
2:
- SET_VREG_HIGH rIBASE rINST # v[AA+1] <- rIBASE
+ SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
movl LOCAL0(%esp), rIBASE
- SET_VREG %eax rINST # v[AA+0] <- %eax
+ SET_VREG %eax, rINST # v[AA+0] <- %eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_shl_long_2addr.S b/runtime/interpreter/mterp/x86/op_shl_long_2addr.S
index 5da873f9b1..6bbf49ca69 100644
--- a/runtime/interpreter/mterp/x86/op_shl_long_2addr.S
+++ b/runtime/interpreter/mterp/x86/op_shl_long_2addr.S
@@ -8,11 +8,11 @@
/* rINSTw gets AA */
movzbl rINSTbl, %ecx # ecx <- BA
andb $$0xf, rINSTbl # rINST <- A
- GET_VREG %eax rINST # eax <- v[AA+0]
+ GET_VREG %eax, rINST # eax <- v[AA+0]
sarl $$4, %ecx # ecx <- B
movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE rINST # rIBASE <- v[AA+1]
- GET_VREG %ecx %ecx # ecx <- vBB
+ GET_VREG_HIGH rIBASE, rINST # rIBASE <- v[AA+1]
+ GET_VREG %ecx, %ecx # ecx <- vBB
shldl %eax, rIBASE
sall %cl, %eax
testb $$32, %cl
@@ -20,7 +20,7 @@
movl %eax, rIBASE
xorl %eax, %eax
2:
- SET_VREG_HIGH rIBASE rINST # v[AA+1] <- rIBASE
+ SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
movl LOCAL0(%esp), rIBASE
- SET_VREG %eax rINST # v[AA+0] <- eax
+ SET_VREG %eax, rINST # v[AA+0] <- eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_shr_long.S b/runtime/interpreter/mterp/x86/op_shr_long.S
index 4490a9ad73..68aa0ee837 100644
--- a/runtime/interpreter/mterp/x86/op_shr_long.S
+++ b/runtime/interpreter/mterp/x86/op_shr_long.S
@@ -13,9 +13,9 @@
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE %eax # rIBASE<- v[BB+1]
- GET_VREG %ecx %ecx # ecx <- vCC
- GET_VREG %eax %eax # eax <- v[BB+0]
+ GET_VREG_HIGH rIBASE, %eax # rIBASE<- v[BB+1]
+ GET_VREG %ecx, %ecx # ecx <- vCC
+ GET_VREG %eax, %eax # eax <- v[BB+0]
shrdl rIBASE, %eax
sarl %cl, rIBASE
testb $$32, %cl
@@ -23,7 +23,7 @@
movl rIBASE, %eax
sarl $$31, rIBASE
2:
- SET_VREG_HIGH rIBASE rINST # v[AA+1] <- rIBASE
+ SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
movl LOCAL0(%esp), rIBASE
- SET_VREG %eax rINST # v[AA+0] <- eax
+ SET_VREG %eax, rINST # v[AA+0] <- eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_shr_long_2addr.S b/runtime/interpreter/mterp/x86/op_shr_long_2addr.S
index 57494f9ac0..148bd1b9eb 100644
--- a/runtime/interpreter/mterp/x86/op_shr_long_2addr.S
+++ b/runtime/interpreter/mterp/x86/op_shr_long_2addr.S
@@ -8,11 +8,11 @@
/* rINSTw gets AA */
movzbl rINSTbl, %ecx # ecx <- BA
andb $$0xf, rINSTbl # rINST <- A
- GET_VREG %eax rINST # eax <- v[AA+0]
+ GET_VREG %eax, rINST # eax <- v[AA+0]
sarl $$4, %ecx # ecx <- B
movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE rINST # rIBASE <- v[AA+1]
- GET_VREG %ecx %ecx # ecx <- vBB
+ GET_VREG_HIGH rIBASE, rINST # rIBASE <- v[AA+1]
+ GET_VREG %ecx, %ecx # ecx <- vBB
shrdl rIBASE, %eax
sarl %cl, rIBASE
testb $$32, %cl
@@ -20,7 +20,7 @@
movl rIBASE, %eax
sarl $$31, rIBASE
2:
- SET_VREG_HIGH rIBASE rINST # v[AA+1] <- rIBASE
+ SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
movl LOCAL0(%esp), rIBASE
- SET_VREG %eax rINST # v[AA+0] <- eax
+ SET_VREG %eax, rINST # v[AA+0] <- eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/op_sput.S b/runtime/interpreter/mterp/x86/op_sput.S
index 04a8f23f65..a199281088 100644
--- a/runtime/interpreter/mterp/x86/op_sput.S
+++ b/runtime/interpreter/mterp/x86/op_sput.S
@@ -9,13 +9,13 @@
EXPORT_PC
movzwl 2(rPC), %eax
movl %eax, OUT_ARG0(%esp) # field ref BBBB
- GET_VREG rINST rINST
+ GET_VREG rINST, rINST
movl rINST, OUT_ARG1(%esp) # fp[AA]
movl OFF_FP_METHOD(rFP), %eax
movl %eax, OUT_ARG2(%esp) # referrer
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call $helper
+ call SYMBOL($helper)
testl %eax, %eax
jnz MterpException
REFRESH_IBASE
diff --git a/runtime/interpreter/mterp/x86/op_sput_object.S b/runtime/interpreter/mterp/x86/op_sput_object.S
index 0480e00cfd..e3e57fc87b 100644
--- a/runtime/interpreter/mterp/x86/op_sput_object.S
+++ b/runtime/interpreter/mterp/x86/op_sput_object.S
@@ -6,7 +6,7 @@
movl rINST, OUT_ARG2(%esp)
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp)
- call MterpSputObject
+ call SYMBOL(MterpSputObject)
testl %eax, %eax
jz MterpException
REFRESH_IBASE
diff --git a/runtime/interpreter/mterp/x86/op_sput_wide.S b/runtime/interpreter/mterp/x86/op_sput_wide.S
index d58d5af1a6..7544838d52 100644
--- a/runtime/interpreter/mterp/x86/op_sput_wide.S
+++ b/runtime/interpreter/mterp/x86/op_sput_wide.S
@@ -13,7 +13,7 @@
movl %eax, OUT_ARG2(%esp) # &fp[AA]
movl rSELF, %ecx
movl %ecx, OUT_ARG3(%esp) # self
- call artSet64IndirectStaticFromMterp
+ call SYMBOL(artSet64IndirectStaticFromMterp)
testl %eax, %eax
jnz MterpException
REFRESH_IBASE
diff --git a/runtime/interpreter/mterp/x86/op_throw.S b/runtime/interpreter/mterp/x86/op_throw.S
index 15b20b56be..a6e6b1ed56 100644
--- a/runtime/interpreter/mterp/x86/op_throw.S
+++ b/runtime/interpreter/mterp/x86/op_throw.S
@@ -3,7 +3,7 @@
*/
/* throw vAA */
EXPORT_PC
- GET_VREG %eax rINST # eax<- vAA (exception object)
+ GET_VREG %eax, rINST # eax<- vAA (exception object)
testl %eax, %eax
jz common_errNullObject
movl rSELF,%ecx
diff --git a/runtime/interpreter/mterp/x86/op_ushr_long.S b/runtime/interpreter/mterp/x86/op_ushr_long.S
index 287946ebde..9527c9c2e4 100644
--- a/runtime/interpreter/mterp/x86/op_ushr_long.S
+++ b/runtime/interpreter/mterp/x86/op_ushr_long.S
@@ -13,9 +13,9 @@
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE %eax # rIBASE <- v[BB+1]
- GET_VREG %ecx %ecx # ecx <- vCC
- GET_VREG %eax %eax # eax <- v[BB+0]
+ GET_VREG_HIGH rIBASE, %eax # rIBASE <- v[BB+1]
+ GET_VREG %ecx, %ecx # ecx <- vCC
+ GET_VREG %eax, %eax # eax <- v[BB+0]
shrdl rIBASE, %eax
shrl %cl, rIBASE
testb $$32, %cl
@@ -23,7 +23,7 @@
movl rIBASE, %eax
xorl rIBASE, rIBASE
2:
- SET_VREG_HIGH rIBASE rINST # v[AA+1] <- rIBASE
+ SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
movl LOCAL0(%esp), rIBASE
- SET_VREG %eax rINST # v[BB+0] <- eax
+ SET_VREG %eax, rINST # v[BB+0] <- eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
diff --git a/runtime/interpreter/mterp/x86/op_ushr_long_2addr.S b/runtime/interpreter/mterp/x86/op_ushr_long_2addr.S
index 39c27242c4..72fcc36fff 100644
--- a/runtime/interpreter/mterp/x86/op_ushr_long_2addr.S
+++ b/runtime/interpreter/mterp/x86/op_ushr_long_2addr.S
@@ -8,11 +8,11 @@
/* rINSTw gets AA */
movzbl rINSTbl, %ecx # ecx <- BA
andb $$0xf, rINSTbl # rINST <- A
- GET_VREG %eax rINST # eax <- v[AA+0]
+ GET_VREG %eax, rINST # eax <- v[AA+0]
sarl $$4, %ecx # ecx <- B
movl rIBASE, LOCAL0(%esp)
- GET_VREG_HIGH rIBASE rINST # rIBASE <- v[AA+1]
- GET_VREG %ecx %ecx # ecx <- vBB
+ GET_VREG_HIGH rIBASE, rINST # rIBASE <- v[AA+1]
+ GET_VREG %ecx, %ecx # ecx <- vBB
shrdl rIBASE, %eax
shrl %cl, rIBASE
testb $$32, %cl
@@ -20,7 +20,7 @@
movl rIBASE, %eax
xorl rIBASE, rIBASE
2:
- SET_VREG_HIGH rIBASE rINST # v[AA+1] <- rIBASE
+ SET_VREG_HIGH rIBASE, rINST # v[AA+1] <- rIBASE
movl LOCAL0(%esp), rIBASE
- SET_VREG %eax rINST # v[AA+0] <- eax
+ SET_VREG %eax, rINST # v[AA+0] <- eax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/shop2addr.S b/runtime/interpreter/mterp/x86/shop2addr.S
index 94d35452f6..96c9954d21 100644
--- a/runtime/interpreter/mterp/x86/shop2addr.S
+++ b/runtime/interpreter/mterp/x86/shop2addr.S
@@ -5,9 +5,9 @@
/* shift/2addr vA, vB */
movzx rINSTbl, %ecx # eax <- BA
sarl $$4, %ecx # ecx <- B
- GET_VREG %ecx %ecx # eax <- vBB
+ GET_VREG %ecx, %ecx # eax <- vBB
andb $$0xf, rINSTbl # rINST <- A
- GET_VREG %eax rINST # eax <- vAA
+ GET_VREG %eax, rINST # eax <- vAA
$instr # ex: sarl %cl, %eax
- SET_VREG $result rINST
+ SET_VREG $result, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/unop.S b/runtime/interpreter/mterp/x86/unop.S
index 00d3e156fc..db09fc0487 100644
--- a/runtime/interpreter/mterp/x86/unop.S
+++ b/runtime/interpreter/mterp/x86/unop.S
@@ -6,8 +6,8 @@
/* unop vA, vB */
movzbl rINSTbl,%ecx # ecx <- A+
sarl $$4,rINST # rINST <- B
- GET_VREG %eax rINST # eax <- vB
+ GET_VREG %eax, rINST # eax <- vB
andb $$0xf,%cl # ecx <- A
$instr
- SET_VREG %eax %ecx
+ SET_VREG %eax, %ecx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index e38a684824..80c174c100 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -36,7 +36,7 @@
namespace art {
namespace jit {
-static constexpr bool kEnableOnStackReplacement = false;
+static constexpr bool kEnableOnStackReplacement = true;
JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& options) {
auto* jit_options = new JitOptions;
@@ -164,6 +164,7 @@ bool Jit::LoadCompiler(std::string* error_msg) {
bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool osr) {
DCHECK(!method->IsRuntimeMethod());
+
// Don't compile the method if it has breakpoints.
if (Dbg::IsDebuggerActive() && Dbg::MethodHasAnyBreakpoints(method)) {
VLOG(jit) << "JIT not compiling " << PrettyMethod(method) << " due to breakpoint";
@@ -177,12 +178,15 @@ bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool osr) {
return false;
}
- if (!code_cache_->NotifyCompilationOf(method, self, osr)) {
+ // If we get a request to compile a proxy method, we pass the actual Java method
+ // of that proxy method, as the compiler does not expect a proxy method.
+ ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(sizeof(void*));
+ if (!code_cache_->NotifyCompilationOf(method_to_compile, self, osr)) {
VLOG(jit) << "JIT not compiling " << PrettyMethod(method) << " due to code cache";
return false;
}
- bool success = jit_compile_method_(jit_compiler_handle_, method, self, osr);
- code_cache_->DoneCompiling(method, self);
+ bool success = jit_compile_method_(jit_compiler_handle_, method_to_compile, self, osr);
+ code_cache_->DoneCompiling(method_to_compile, self);
return success;
}
@@ -300,73 +304,91 @@ bool Jit::MaybeDoOnStackReplacement(Thread* thread,
return false;
}
- const OatQuickMethodHeader* osr_method = jit->GetCodeCache()->LookupOsrMethodHeader(method);
- if (osr_method == nullptr) {
- // No osr method yet, just return to the interpreter.
- return false;
- }
-
+ // Fetch some data before looking up for an OSR method, as we don't want thread
+ // suspension once we hold an OSR method.
const size_t number_of_vregs = method->GetCodeItem()->registers_size_;
- CodeInfo code_info = osr_method->GetOptimizedCodeInfo();
- StackMapEncoding encoding = code_info.ExtractEncoding();
-
- // Find stack map starting at the target dex_pc.
- StackMap stack_map = code_info.GetOsrStackMapForDexPc(dex_pc + dex_pc_offset, encoding);
- if (!stack_map.IsValid()) {
- // There is no OSR stack map for this dex pc offset. Just return to the interpreter in the
- // hope that the next branch has one.
- return false;
- }
+ const char* shorty = method->GetShorty();
+ std::string method_name(VLOG_IS_ON(jit) ? PrettyMethod(method) : "");
+ void** memory = nullptr;
+ size_t frame_size = 0;
+ ShadowFrame* shadow_frame = nullptr;
+ const uint8_t* native_pc = nullptr;
- // We found a stack map, now fill the frame with dex register values from the interpreter's
- // shadow frame.
- DexRegisterMap vreg_map =
- code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_vregs);
-
- ShadowFrame* shadow_frame = thread->PopShadowFrame();
-
- size_t frame_size = osr_method->GetFrameSizeInBytes();
- void** memory = reinterpret_cast<void**>(malloc(frame_size));
- memset(memory, 0, frame_size);
-
- // Art ABI: ArtMethod is at the bottom of the stack.
- memory[0] = method;
-
- if (!vreg_map.IsValid()) {
- // If we don't have a dex register map, then there are no live dex registers at
- // this dex pc.
- } else {
- for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
- DexRegisterLocation::Kind location =
- vreg_map.GetLocationKind(vreg, number_of_vregs, code_info, encoding);
- if (location == DexRegisterLocation::Kind::kNone) {
- // Dex register is dead or unitialized.
- continue;
- }
+ {
+ ScopedAssertNoThreadSuspension sts(thread, "Holding OSR method");
+ const OatQuickMethodHeader* osr_method = jit->GetCodeCache()->LookupOsrMethodHeader(method);
+ if (osr_method == nullptr) {
+ // No osr method yet, just return to the interpreter.
+ return false;
+ }
- if (location == DexRegisterLocation::Kind::kConstant) {
- // We skip constants because the compiled code knows how to handle them.
- continue;
- }
+ CodeInfo code_info = osr_method->GetOptimizedCodeInfo();
+ StackMapEncoding encoding = code_info.ExtractEncoding();
- DCHECK(location == DexRegisterLocation::Kind::kInStack);
+ // Find stack map starting at the target dex_pc.
+ StackMap stack_map = code_info.GetOsrStackMapForDexPc(dex_pc + dex_pc_offset, encoding);
+ if (!stack_map.IsValid()) {
+ // There is no OSR stack map for this dex pc offset. Just return to the interpreter in the
+ // hope that the next branch has one.
+ return false;
+ }
- int32_t vreg_value = shadow_frame->GetVReg(vreg);
- int32_t slot_offset = vreg_map.GetStackOffsetInBytes(vreg,
- number_of_vregs,
- code_info,
- encoding);
- DCHECK_LT(slot_offset, static_cast<int32_t>(frame_size));
- DCHECK_GT(slot_offset, 0);
- (reinterpret_cast<int32_t*>(memory))[slot_offset / sizeof(int32_t)] = vreg_value;
+ // We found a stack map, now fill the frame with dex register values from the interpreter's
+ // shadow frame.
+ DexRegisterMap vreg_map =
+ code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_vregs);
+
+ frame_size = osr_method->GetFrameSizeInBytes();
+
+ // Allocate memory to put shadow frame values. The osr stub will copy that memory to
+ // stack.
+ // Note that we could pass the shadow frame to the stub, and let it copy the values there,
+ // but that is engineering complexity not worth the effort for something like OSR.
+ memory = reinterpret_cast<void**>(malloc(frame_size));
+ CHECK(memory != nullptr);
+ memset(memory, 0, frame_size);
+
+ // Art ABI: ArtMethod is at the bottom of the stack.
+ memory[0] = method;
+
+ shadow_frame = thread->PopShadowFrame();
+ if (!vreg_map.IsValid()) {
+ // If we don't have a dex register map, then there are no live dex registers at
+ // this dex pc.
+ } else {
+ for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
+ DexRegisterLocation::Kind location =
+ vreg_map.GetLocationKind(vreg, number_of_vregs, code_info, encoding);
+ if (location == DexRegisterLocation::Kind::kNone) {
+ // Dex register is dead or unitialized.
+ continue;
+ }
+
+ if (location == DexRegisterLocation::Kind::kConstant) {
+ // We skip constants because the compiled code knows how to handle them.
+ continue;
+ }
+
+ DCHECK(location == DexRegisterLocation::Kind::kInStack);
+
+ int32_t vreg_value = shadow_frame->GetVReg(vreg);
+ int32_t slot_offset = vreg_map.GetStackOffsetInBytes(vreg,
+ number_of_vregs,
+ code_info,
+ encoding);
+ DCHECK_LT(slot_offset, static_cast<int32_t>(frame_size));
+ DCHECK_GT(slot_offset, 0);
+ (reinterpret_cast<int32_t*>(memory))[slot_offset / sizeof(int32_t)] = vreg_value;
+ }
}
+
+ native_pc = stack_map.GetNativePcOffset(encoding) + osr_method->GetEntryPoint();
+ VLOG(jit) << "Jumping to "
+ << method_name
+ << "@"
+ << std::hex << reinterpret_cast<uintptr_t>(native_pc);
}
- const uint8_t* native_pc = stack_map.GetNativePcOffset(encoding) + osr_method->GetEntryPoint();
- VLOG(jit) << "Jumping to "
- << PrettyMethod(method)
- << "@"
- << std::hex << reinterpret_cast<uintptr_t>(native_pc);
{
ManagedStack fragment;
thread->PushManagedStackFragment(&fragment);
@@ -374,8 +396,9 @@ bool Jit::MaybeDoOnStackReplacement(Thread* thread,
frame_size,
native_pc,
result,
- method->GetInterfaceMethodIfProxy(sizeof(void*))->GetShorty(),
+ shorty,
thread);
+
if (UNLIKELY(thread->GetException() == Thread::GetDeoptimizationException())) {
thread->DeoptimizeWithDeoptimizationException(result);
}
@@ -383,7 +406,7 @@ bool Jit::MaybeDoOnStackReplacement(Thread* thread,
}
free(memory);
thread->PushShadowFrame(shadow_frame);
- VLOG(jit) << "Done running OSR code for " << PrettyMethod(method);
+ VLOG(jit) << "Done running OSR code for " << method_name;
return true;
}
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 464c441e8e..9111ddf9f5 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -578,7 +578,7 @@ void JitCodeCache::GarbageCollectCache(Thread* self) {
}
}
- // Empty osr method map, as osr compile code will be deleted (except the ones
+ // Empty osr method map, as osr compiled code will be deleted (except the ones
// on thread stacks).
osr_code_map_.clear();
}
diff --git a/runtime/jit/offline_profiling_info.cc b/runtime/jit/offline_profiling_info.cc
index b4b872ff50..0aff1f7ec3 100644
--- a/runtime/jit/offline_profiling_info.cc
+++ b/runtime/jit/offline_profiling_info.cc
@@ -33,6 +33,21 @@
namespace art {
+// Transform the actual dex location into relative paths.
+// Note: this is OK because we don't store profiles of different apps into the same file.
+// Apps with split apks don't cause trouble because each split has a different name and will not
+// collide with other entries.
+std::string ProfileCompilationInfo::GetProfileDexFileKey(const std::string& dex_location) {
+ DCHECK(!dex_location.empty());
+ size_t last_sep_index = dex_location.find_last_of('/');
+ if (last_sep_index == std::string::npos) {
+ return dex_location;
+ } else {
+ DCHECK(last_sep_index < dex_location.size());
+ return dex_location.substr(last_sep_index + 1);
+ }
+}
+
bool ProfileCompilationInfo::SaveProfilingInfo(const std::string& filename,
const std::vector<ArtMethod*>& methods) {
if (methods.empty()) {
@@ -58,7 +73,7 @@ bool ProfileCompilationInfo::SaveProfilingInfo(const std::string& filename,
ScopedObjectAccess soa(Thread::Current());
for (auto it = methods.begin(); it != methods.end(); it++) {
const DexFile* dex_file = (*it)->GetDexFile();
- if (!info.AddData(dex_file->GetLocation(),
+ if (!info.AddData(GetProfileDexFileKey(dex_file->GetLocation()),
dex_file->GetLocationChecksum(),
(*it)->GetDexMethodIndex())) {
return false;
@@ -107,8 +122,8 @@ static constexpr const char kLineSeparator = '\n';
* dex_location1,dex_location_checksum1,method_id11,method_id12...
* dex_location2,dex_location_checksum2,method_id21,method_id22...
* e.g.
- * /system/priv-app/app/app.apk,131232145,11,23,454,54
- * /system/priv-app/app/app.apk:classes5.dex,218490184,39,13,49,1
+ * app.apk,131232145,11,23,454,54
+ * app.apk:classes5.dex,218490184,39,13,49,1
**/
bool ProfileCompilationInfo::Save(uint32_t fd) {
DCHECK_GE(fd, 0u);
@@ -270,7 +285,7 @@ bool ProfileCompilationInfo::Load(const ProfileCompilationInfo& other) {
}
bool ProfileCompilationInfo::ContainsMethod(const MethodReference& method_ref) const {
- auto info_it = info_.find(method_ref.dex_file->GetLocation());
+ auto info_it = info_.find(GetProfileDexFileKey(method_ref.dex_file->GetLocation()));
if (info_it != info_.end()) {
if (method_ref.dex_file->GetLocationChecksum() != info_it->second.checksum) {
return false;
diff --git a/runtime/jit/offline_profiling_info.h b/runtime/jit/offline_profiling_info.h
index ffd14335d7..c388c4a42f 100644
--- a/runtime/jit/offline_profiling_info.h
+++ b/runtime/jit/offline_profiling_info.h
@@ -66,6 +66,8 @@ class ProfileCompilationInfo {
// For testing purposes.
bool Equals(ProfileCompilationInfo& other);
+ // Exposed for testing purpose.
+ static std::string GetProfileDexFileKey(const std::string& dex_location);
private:
bool AddData(const std::string& dex_location, uint32_t checksum, uint16_t method_idx);
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index a7881ac52f..a092b9f248 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -66,6 +66,7 @@ static void EnableDebugFeatures(uint32_t debug_flags) {
DEBUG_ENABLE_JNI_LOGGING = 1 << 4,
DEBUG_GENERATE_DEBUG_INFO = 1 << 5,
DEBUG_ALWAYS_JIT = 1 << 6,
+ DEBUG_NATIVE_DEBUGGABLE = 1 << 7,
};
Runtime* const runtime = Runtime::Current();
@@ -117,6 +118,11 @@ static void EnableDebugFeatures(uint32_t debug_flags) {
debug_flags &= ~DEBUG_ALWAYS_JIT;
}
+ if ((debug_flags & DEBUG_NATIVE_DEBUGGABLE) != 0) {
+ runtime->AddCompilerOption("--native-debuggable");
+ debug_flags &= ~DEBUG_NATIVE_DEBUGGABLE;
+ }
+
if (debug_flags != 0) {
LOG(ERROR) << StringPrintf("Unknown bits set in debug_flags: %#x", debug_flags);
}
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index e76e443cf7..18cf81aa7c 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -387,12 +387,16 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
runtime->GetHeap()->AddSpace(image_space.get());
}
added_image_space = true;
- if (!runtime->GetClassLinker()->AddImageSpace(image_space.get(),
- h_loader,
- dex_elements,
- dex_location,
- /*out*/&dex_files,
- /*out*/&temp_error_msg)) {
+ if (runtime->GetClassLinker()->AddImageSpace(image_space.get(),
+ h_loader,
+ dex_elements,
+ dex_location,
+ /*out*/&dex_files,
+ /*out*/&temp_error_msg)) {
+ // Successfully added image space to heap, release the map so that it does not get
+ // freed.
+ image_space.release();
+ } else {
LOG(INFO) << "Failed to add image file " << temp_error_msg;
dex_files.clear();
{
@@ -406,7 +410,6 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
added_image_space = false;
// Non-fatal, don't update error_msg.
}
- image_space.release();
}
}
}
diff --git a/runtime/openjdkjvm/Android.mk b/runtime/openjdkjvm/Android.mk
new file mode 100644
index 0000000000..9b7404ebf5
--- /dev/null
+++ b/runtime/openjdkjvm/Android.mk
@@ -0,0 +1,20 @@
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := openjdkjvm-phony
+include $(BUILD_PHONY_PACKAGE)
diff --git a/runtime/openjdkjvm/MODULE_LICENSE_GPL_WITH_CLASSPATH_EXCEPTION b/runtime/openjdkjvm/MODULE_LICENSE_GPL_WITH_CLASSPATH_EXCEPTION
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/runtime/openjdkjvm/MODULE_LICENSE_GPL_WITH_CLASSPATH_EXCEPTION
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 2ea4b1453d..f9d916a92e 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -146,6 +146,10 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize
.Define({"-XX:EnableHSpaceCompactForOOM", "-XX:DisableHSpaceCompactForOOM"})
.WithValues({true, false})
.IntoKey(M::EnableHSpaceCompactForOOM)
+ .Define("-XX:DumpNativeStackOnSigQuit:_")
+ .WithType<bool>()
+ .WithValueMap({{"false", false}, {"true", true}})
+ .IntoKey(M::DumpNativeStackOnSigQuit)
.Define("-Xusejit:_")
.WithType<bool>()
.WithValueMap({{"false", false}, {"true", true}})
@@ -667,6 +671,7 @@ void ParsedOptions::Usage(const char* fmt, ...) {
UsageMessage(stream, " -XX:BackgroundGC=none\n");
UsageMessage(stream, " -XX:LargeObjectSpace={disabled,map,freelist}\n");
UsageMessage(stream, " -XX:LargeObjectThreshold=N\n");
+ UsageMessage(stream, " -XX:DumpNativeStackOnSigQuit=booleanvalue\n");
UsageMessage(stream, " -Xmethod-trace\n");
UsageMessage(stream, " -Xmethod-trace-file:filename");
UsageMessage(stream, " -Xmethod-trace-file-size:integervalue\n");
diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc
index 6b84c8faa2..9b10f2e0b8 100644
--- a/runtime/quick/inline_method_analyser.cc
+++ b/runtime/quick/inline_method_analyser.cc
@@ -22,6 +22,7 @@
#include "dex_file-inl.h"
#include "dex_instruction.h"
#include "dex_instruction-inl.h"
+#include "dex_instruction_utils.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache-inl.h"
#include "verifier/method_verifier-inl.h"
@@ -33,6 +34,366 @@
namespace art {
+namespace { // anonymous namespace
+
+// Helper class for matching a pattern.
+class Matcher {
+ public:
+ // Match function type.
+ typedef bool MatchFn(Matcher* matcher);
+
+ template <size_t size>
+ static bool Match(const DexFile::CodeItem* code_item, MatchFn* const (&pattern)[size]);
+
+ // Match and advance.
+
+ static bool Mark(Matcher* matcher);
+
+ template <bool (Matcher::*Fn)()>
+ static bool Required(Matcher* matcher);
+
+ template <bool (Matcher::*Fn)()>
+ static bool Repeated(Matcher* matcher); // On match, returns to the mark.
+
+ // Match an individual instruction.
+
+ template <Instruction::Code opcode> bool Opcode();
+ bool Const0();
+ bool IPutOnThis();
+
+ private:
+ explicit Matcher(const DexFile::CodeItem* code_item)
+ : code_item_(code_item),
+ instruction_(Instruction::At(code_item->insns_)),
+ pos_(0u),
+ mark_(0u) { }
+
+ static bool DoMatch(const DexFile::CodeItem* code_item, MatchFn* const* pattern, size_t size);
+
+ const DexFile::CodeItem* const code_item_;
+ const Instruction* instruction_;
+ size_t pos_;
+ size_t mark_;
+};
+
+template <size_t size>
+bool Matcher::Match(const DexFile::CodeItem* code_item, MatchFn* const (&pattern)[size]) {
+ return DoMatch(code_item, pattern, size);
+}
+
+bool Matcher::Mark(Matcher* matcher) {
+ matcher->pos_ += 1u; // Advance to the next match function before marking.
+ matcher->mark_ = matcher->pos_;
+ return true;
+}
+
+template <bool (Matcher::*Fn)()>
+bool Matcher::Required(Matcher* matcher) {
+ if (!(matcher->*Fn)()) {
+ return false;
+ }
+ matcher->pos_ += 1u;
+ matcher->instruction_ = matcher->instruction_->Next();
+ return true;
+}
+
+template <bool (Matcher::*Fn)()>
+bool Matcher::Repeated(Matcher* matcher) {
+ if (!(matcher->*Fn)()) {
+ // Didn't match optional instruction, try the next match function.
+ matcher->pos_ += 1u;
+ return true;
+ }
+ matcher->pos_ = matcher->mark_;
+ matcher->instruction_ = matcher->instruction_->Next();
+ return true;
+}
+
+template <Instruction::Code opcode>
+bool Matcher::Opcode() {
+ return instruction_->Opcode() == opcode;
+}
+
+// Match const 0.
+bool Matcher::Const0() {
+ return IsInstructionDirectConst(instruction_->Opcode()) &&
+ (instruction_->Opcode() == Instruction::CONST_WIDE ? instruction_->VRegB_51l() == 0
+ : instruction_->VRegB() == 0);
+}
+
+bool Matcher::IPutOnThis() {
+ DCHECK_NE(code_item_->ins_size_, 0u);
+ return IsInstructionIPut(instruction_->Opcode()) &&
+ instruction_->VRegB_22c() == code_item_->registers_size_ - code_item_->ins_size_;
+}
+
+bool Matcher::DoMatch(const DexFile::CodeItem* code_item, MatchFn* const* pattern, size_t size) {
+ Matcher matcher(code_item);
+ while (matcher.pos_ != size) {
+ if (!pattern[matcher.pos_](&matcher)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Used for a single invoke in a constructor. In that situation, the method verifier makes
+// sure we invoke a constructor either in the same class or superclass with at least "this".
+ArtMethod* GetTargetConstructor(ArtMethod* method, const Instruction* invoke_direct)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK_EQ(invoke_direct->Opcode(), Instruction::INVOKE_DIRECT);
+ DCHECK_EQ(invoke_direct->VRegC_35c(),
+ method->GetCodeItem()->registers_size_ - method->GetCodeItem()->ins_size_);
+ uint32_t method_index = invoke_direct->VRegB_35c();
+ size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ ArtMethod* target_method =
+ method->GetDexCache()->GetResolvedMethod(method_index, pointer_size);
+ if (kIsDebugBuild && target_method != nullptr) {
+ CHECK(!target_method->IsStatic());
+ CHECK(target_method->IsConstructor());
+ CHECK(target_method->GetDeclaringClass() == method->GetDeclaringClass() ||
+ target_method->GetDeclaringClass() == method->GetDeclaringClass()->GetSuperClass());
+ }
+ return target_method;
+}
+
+// Return the forwarded arguments and check that all remaining arguments are zero.
+// If the check fails, return static_cast<size_t>(-1).
+size_t CountForwardedConstructorArguments(const DexFile::CodeItem* code_item,
+ const Instruction* invoke_direct,
+ uint16_t zero_vreg_mask) {
+ DCHECK_EQ(invoke_direct->Opcode(), Instruction::INVOKE_DIRECT);
+ size_t number_of_args = invoke_direct->VRegA_35c();
+ DCHECK_NE(number_of_args, 0u);
+ uint32_t args[Instruction::kMaxVarArgRegs];
+ invoke_direct->GetVarArgs(args);
+ uint16_t this_vreg = args[0];
+ DCHECK_EQ(this_vreg, code_item->registers_size_ - code_item->ins_size_); // Checked by verifier.
+ size_t forwarded = 1u;
+ while (forwarded < number_of_args &&
+ args[forwarded] == this_vreg + forwarded &&
+ (zero_vreg_mask & (1u << args[forwarded])) == 0) {
+ ++forwarded;
+ }
+ for (size_t i = forwarded; i != number_of_args; ++i) {
+ if ((zero_vreg_mask & (1u << args[i])) == 0) {
+ return static_cast<size_t>(-1);
+ }
+ }
+ return forwarded;
+}
+
+uint16_t GetZeroVRegMask(const Instruction* const0) {
+ DCHECK(IsInstructionDirectConst(const0->Opcode()));
+ DCHECK((const0->Opcode() == Instruction::CONST_WIDE) ? const0->VRegB_51l() == 0u
+ : const0->VRegB() == 0);
+ uint16_t base_mask = IsInstructionConstWide(const0->Opcode()) ? 3u : 1u;
+ return base_mask << const0->VRegA();
+}
+
+// We limit the number of IPUTs storing parameters. There can be any number
+// of IPUTs that store the value 0 as they are useless in a constructor as
+// the object always starts zero-initialized. We also eliminate all but the
+// last store to any field as they are not observable; not even if the field
+// is volatile as no reference to the object can escape from a constructor
+// with this pattern.
+static constexpr size_t kMaxConstructorIPuts = 3u;
+
+struct ConstructorIPutData {
+ ConstructorIPutData() : field_index(DexFile::kDexNoIndex16), arg(0u) { }
+
+ uint16_t field_index;
+ uint16_t arg;
+};
+
+bool RecordConstructorIPut(ArtMethod* method,
+ const Instruction* new_iput,
+ uint16_t this_vreg,
+ uint16_t zero_vreg_mask,
+ /*inout*/ ConstructorIPutData (&iputs)[kMaxConstructorIPuts])
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsInstructionIPut(new_iput->Opcode()));
+ uint32_t field_index = new_iput->VRegC_22c();
+ size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ mirror::DexCache* dex_cache = method->GetDexCache();
+ ArtField* field = dex_cache->GetResolvedField(field_index, pointer_size);
+ if (UNLIKELY(field == nullptr)) {
+ return false;
+ }
+ // Remove previous IPUT to the same field, if any. Different field indexes may refer
+ // to the same field, so we need to compare resolved fields from the dex cache.
+ for (size_t old_pos = 0; old_pos != arraysize(iputs); ++old_pos) {
+ if (iputs[old_pos].field_index == DexFile::kDexNoIndex16) {
+ break;
+ }
+ ArtField* f = dex_cache->GetResolvedField(iputs[old_pos].field_index, pointer_size);
+ DCHECK(f != nullptr);
+ if (f == field) {
+ auto back_it = std::copy(iputs + old_pos + 1, iputs + arraysize(iputs), iputs + old_pos);
+ *back_it = ConstructorIPutData();
+ break;
+ }
+ }
+ // If the stored value isn't zero, record the IPUT.
+ if ((zero_vreg_mask & (1u << new_iput->VRegA_22c())) == 0u) {
+ size_t new_pos = 0;
+ while (new_pos != arraysize(iputs) && iputs[new_pos].field_index != DexFile::kDexNoIndex16) {
+ ++new_pos;
+ }
+ if (new_pos == arraysize(iputs)) {
+ return false; // Exceeded capacity of the output array.
+ }
+ iputs[new_pos].field_index = field_index;
+ iputs[new_pos].arg = new_iput->VRegA_22c() - this_vreg;
+ }
+ return true;
+}
+
+bool DoAnalyseConstructor(const DexFile::CodeItem* code_item,
+ ArtMethod* method,
+ /*inout*/ ConstructorIPutData (&iputs)[kMaxConstructorIPuts])
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ // On entry we should not have any IPUTs yet.
+ DCHECK_EQ(0, std::count_if(
+ iputs,
+ iputs + arraysize(iputs),
+ [](const ConstructorIPutData& iput_data) {
+ return iput_data.field_index != DexFile::kDexNoIndex16;
+ }));
+
+ // Limit the maximum number of code units we're willing to match.
+ static constexpr size_t kMaxCodeUnits = 16u;
+
+ // Limit the number of registers that the constructor may use to 16.
+ // Given that IPUTs must use low 16 registers and we do not match MOVEs,
+ // this is a reasonable limitation.
+ static constexpr size_t kMaxVRegs = 16u;
+
+ // We try to match a constructor that calls another constructor (either in
+ // superclass or in the same class) with the same parameters, or with some
+ // parameters truncated (allowed only for calls to superclass constructor)
+ // or with extra parameters with value 0 (with any type, including null).
+ // This call can be followed by optional IPUTs on "this" storing either one
+ // of the parameters or 0 and the code must then finish with RETURN_VOID.
+ // The called constructor must be either java.lang.Object.<init>() or it
+ // must also match the same pattern.
+ static Matcher::MatchFn* const kConstructorPattern[] = {
+ &Matcher::Mark,
+ &Matcher::Repeated<&Matcher::Const0>,
+ &Matcher::Required<&Matcher::Opcode<Instruction::INVOKE_DIRECT>>,
+ &Matcher::Mark,
+ &Matcher::Repeated<&Matcher::Const0>,
+ &Matcher::Repeated<&Matcher::IPutOnThis>,
+ &Matcher::Required<&Matcher::Opcode<Instruction::RETURN_VOID>>,
+ };
+
+ DCHECK(method != nullptr);
+ DCHECK(!method->IsStatic());
+ DCHECK(method->IsConstructor());
+ DCHECK(code_item != nullptr);
+ if (!method->GetDeclaringClass()->IsVerified() ||
+ code_item->insns_size_in_code_units_ > kMaxCodeUnits ||
+ code_item->registers_size_ > kMaxVRegs ||
+ !Matcher::Match(code_item, kConstructorPattern)) {
+ return false;
+ }
+
+ // Verify the invoke, prevent a few odd cases and collect IPUTs.
+ uint16_t this_vreg = code_item->registers_size_ - code_item->ins_size_;
+ uint16_t zero_vreg_mask = 0u;
+ for (const Instruction* instruction = Instruction::At(code_item->insns_);
+ instruction->Opcode() != Instruction::RETURN_VOID;
+ instruction = instruction->Next()) {
+ if (instruction->Opcode() == Instruction::INVOKE_DIRECT) {
+ ArtMethod* target_method = GetTargetConstructor(method, instruction);
+ if (target_method == nullptr) {
+ return false;
+ }
+ // We allow forwarding constructors only if they pass more arguments
+ // to prevent infinite recursion.
+ if (target_method->GetDeclaringClass() == method->GetDeclaringClass() &&
+ instruction->VRegA_35c() <= code_item->ins_size_) {
+ return false;
+ }
+ size_t forwarded = CountForwardedConstructorArguments(code_item, instruction, zero_vreg_mask);
+ if (forwarded == static_cast<size_t>(-1)) {
+ return false;
+ }
+ if (target_method->GetDeclaringClass()->IsObjectClass()) {
+ DCHECK_EQ(Instruction::At(target_method->GetCodeItem()->insns_)->Opcode(),
+ Instruction::RETURN_VOID);
+ } else {
+ const DexFile::CodeItem* target_code_item = target_method->GetCodeItem();
+ if (target_code_item == nullptr) {
+ return false; // Native constructor?
+ }
+ if (!DoAnalyseConstructor(target_code_item, target_method, iputs)) {
+ return false;
+ }
+ // Prune IPUTs with zero input.
+ auto kept_end = std::remove_if(
+ iputs,
+ iputs + arraysize(iputs),
+ [forwarded](const ConstructorIPutData& iput_data) {
+ return iput_data.arg >= forwarded;
+ });
+ std::fill(kept_end, iputs + arraysize(iputs), ConstructorIPutData());
+ // If we have any IPUTs from the call, check that the target method is in the same
+ // dex file (compare DexCache references), otherwise field_indexes would be bogus.
+ if (iputs[0].field_index != DexFile::kDexNoIndex16 &&
+ target_method->GetDexCache() != method->GetDexCache()) {
+ return false;
+ }
+ }
+ } else if (IsInstructionDirectConst(instruction->Opcode())) {
+ zero_vreg_mask |= GetZeroVRegMask(instruction);
+ if ((zero_vreg_mask & (1u << this_vreg)) != 0u) {
+ return false; // Overwriting `this` is unsupported.
+ }
+ } else {
+ DCHECK(IsInstructionIPut(instruction->Opcode()));
+ DCHECK_EQ(instruction->VRegB_22c(), this_vreg);
+ if (!RecordConstructorIPut(method, instruction, this_vreg, zero_vreg_mask, iputs)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+} // anonymous namespace
+
+bool AnalyseConstructor(const DexFile::CodeItem* code_item,
+ ArtMethod* method,
+ InlineMethod* result)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ ConstructorIPutData iputs[kMaxConstructorIPuts];
+ if (!DoAnalyseConstructor(code_item, method, iputs)) {
+ return false;
+ }
+ static_assert(kMaxConstructorIPuts == 3, "Unexpected limit"); // Code below depends on this.
+ DCHECK(iputs[0].field_index != DexFile::kDexNoIndex16 ||
+ iputs[1].field_index == DexFile::kDexNoIndex16);
+ DCHECK(iputs[1].field_index != DexFile::kDexNoIndex16 ||
+ iputs[2].field_index == DexFile::kDexNoIndex16);
+
+#define STORE_IPUT(n) \
+ do { \
+ result->d.constructor_data.iput##n##_field_index = iputs[n].field_index; \
+ result->d.constructor_data.iput##n##_arg = iputs[n].arg; \
+ } while (false)
+
+ STORE_IPUT(0);
+ STORE_IPUT(1);
+ STORE_IPUT(2);
+#undef STORE_IPUT
+
+ result->opcode = kInlineOpConstructor;
+ result->flags = kInlineSpecial;
+ result->d.constructor_data.reserved = 0u;
+ return true;
+}
+
static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET), "iget type");
static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_WIDE), "iget_wide type");
static_assert(InlineMethodAnalyser::IsInstructionIGet(Instruction::IGET_OBJECT),
@@ -123,7 +484,19 @@ bool InlineMethodAnalyser::AnalyseMethodCode(const DexFile::CodeItem* code_item,
case Instruction::CONST_16:
case Instruction::CONST_HIGH16:
// TODO: Support wide constants (RETURN_WIDE).
- return AnalyseConstMethod(code_item, result);
+ if (AnalyseConstMethod(code_item, result)) {
+ return true;
+ }
+ FALLTHROUGH_INTENDED;
+ case Instruction::CONST_WIDE:
+ case Instruction::CONST_WIDE_16:
+ case Instruction::CONST_WIDE_32:
+ case Instruction::CONST_WIDE_HIGH16:
+ case Instruction::INVOKE_DIRECT:
+ if (method != nullptr && !method->IsStatic() && method->IsConstructor()) {
+ return AnalyseConstructor(code_item, method, result);
+ }
+ return false;
case Instruction::IGET:
case Instruction::IGET_OBJECT:
case Instruction::IGET_BOOLEAN:
diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h
index 046d2257f4..0b09a70be4 100644
--- a/runtime/quick/inline_method_analyser.h
+++ b/runtime/quick/inline_method_analyser.h
@@ -107,6 +107,7 @@ enum InlineMethodOpcode : uint16_t {
kInlineOpNonWideConst,
kInlineOpIGet,
kInlineOpIPut,
+ kInlineOpConstructor,
kInlineStringInit,
};
std::ostream& operator<<(std::ostream& os, const InlineMethodOpcode& rhs);
@@ -168,6 +169,19 @@ struct InlineReturnArgData {
static_assert(sizeof(InlineReturnArgData) == sizeof(uint64_t),
"Invalid size of InlineReturnArgData");
+struct InlineConstructorData {
+ // There can be up to 3 IPUTs, unused fields are marked with kNoDexIndex16.
+ uint16_t iput0_field_index;
+ uint16_t iput1_field_index;
+ uint16_t iput2_field_index;
+ uint16_t iput0_arg : 4;
+ uint16_t iput1_arg : 4;
+ uint16_t iput2_arg : 4;
+ uint16_t reserved : 4;
+};
+static_assert(sizeof(InlineConstructorData) == sizeof(uint64_t),
+ "Invalid size of InlineConstructorData");
+
struct InlineMethod {
InlineMethodOpcode opcode;
InlineMethodFlags flags;
@@ -175,6 +189,7 @@ struct InlineMethod {
uint64_t data;
InlineIGetIPutData ifield_data;
InlineReturnArgData return_data;
+ InlineConstructorData constructor_data;
} d;
};
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 1b59c6fde0..2aeb7921ce 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -335,6 +335,7 @@ struct AbortState {
os << "Runtime aborting...\n";
if (Runtime::Current() == nullptr) {
os << "(Runtime does not yet exist!)\n";
+ DumpNativeStack(os, GetTid(), nullptr, " native: ", nullptr);
return;
}
Thread* self = Thread::Current();
@@ -917,6 +918,7 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
is_explicit_gc_disabled_ = runtime_options.Exists(Opt::DisableExplicitGC);
dex2oat_enabled_ = runtime_options.GetOrDefault(Opt::Dex2Oat);
image_dex2oat_enabled_ = runtime_options.GetOrDefault(Opt::ImageDex2Oat);
+ dump_native_stack_on_sig_quit_ = runtime_options.GetOrDefault(Opt::DumpNativeStackOnSigQuit);
vfprintf_ = runtime_options.GetOrDefault(Opt::HookVfprintf);
exit_ = runtime_options.GetOrDefault(Opt::HookExit);
diff --git a/runtime/runtime.h b/runtime/runtime.h
index bec26f8eaa..1956bae52a 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -603,6 +603,10 @@ class Runtime {
safe_mode_ = mode;
}
+ bool GetDumpNativeStackOnSigQuit() const {
+ return dump_native_stack_on_sig_quit_;
+ }
+
private:
static void InitPlatformSignalHandlers();
@@ -813,6 +817,9 @@ class Runtime {
// Whether the application should run in safe mode, that is, interpreter only.
bool safe_mode_;
+ // Whether threads should dump their native stack on SIGQUIT.
+ bool dump_native_stack_on_sig_quit_;
+
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
std::ostream& operator<<(std::ostream& os, const Runtime::CalleeSaveType& rhs);
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 097bccb372..838d1a9649 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -67,6 +67,7 @@ RUNTIME_OPTIONS_KEY (Unit, LowMemoryMode)
RUNTIME_OPTIONS_KEY (bool, UseTLAB, (kUseTlab || kUseReadBarrier))
RUNTIME_OPTIONS_KEY (bool, EnableHSpaceCompactForOOM, true)
RUNTIME_OPTIONS_KEY (bool, UseJIT, false)
+RUNTIME_OPTIONS_KEY (bool, DumpNativeStackOnSigQuit, true)
RUNTIME_OPTIONS_KEY (unsigned int, JITCompileThreshold, jit::Jit::kDefaultCompileThreshold)
RUNTIME_OPTIONS_KEY (unsigned int, JITWarmupThreshold, jit::Jit::kDefaultWarmupThreshold)
RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheInitialCapacity, jit::JitCodeCache::kInitialCapacity)
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 7a455949e6..2ee160571e 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -936,9 +936,9 @@ void Thread::ShortDump(std::ostream& os) const {
<< "]";
}
-void Thread::Dump(std::ostream& os, BacktraceMap* backtrace_map) const {
+void Thread::Dump(std::ostream& os, bool dump_native_stack, BacktraceMap* backtrace_map) const {
DumpState(os);
- DumpStack(os, backtrace_map);
+ DumpStack(os, dump_native_stack, backtrace_map);
}
mirror::String* Thread::GetThreadName(const ScopedObjectAccessAlreadyRunnable& soa) const {
@@ -1497,7 +1497,9 @@ void Thread::DumpJavaStack(std::ostream& os) const {
}
}
-void Thread::DumpStack(std::ostream& os, BacktraceMap* backtrace_map) const {
+void Thread::DumpStack(std::ostream& os,
+ bool dump_native_stack,
+ BacktraceMap* backtrace_map) const {
// TODO: we call this code when dying but may not have suspended the thread ourself. The
// IsSuspended check is therefore racy with the use for dumping (normally we inhibit
// the race with the thread_suspend_count_lock_).
@@ -1510,7 +1512,7 @@ void Thread::DumpStack(std::ostream& os, BacktraceMap* backtrace_map) const {
}
if (safe_to_dump) {
// If we're currently in native code, dump that stack before dumping the managed stack.
- if (dump_for_abort || ShouldShowNativeStack(this)) {
+ if (dump_native_stack && (dump_for_abort || ShouldShowNativeStack(this))) {
DumpKernelStack(os, GetTid(), " kernel: ", false);
ArtMethod* method = GetCurrentMethod(nullptr, !dump_for_abort);
DumpNativeStack(os, GetTid(), backtrace_map, " native: ", method);
diff --git a/runtime/thread.h b/runtime/thread.h
index 3a5d72efcb..2726e91130 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -187,7 +187,9 @@ class Thread {
void ShortDump(std::ostream& os) const;
// Dumps the detailed thread state and the thread stack (used for SIGQUIT).
- void Dump(std::ostream& os, BacktraceMap* backtrace_map = nullptr) const
+ void Dump(std::ostream& os,
+ bool dump_native_stack = true,
+ BacktraceMap* backtrace_map = nullptr) const
REQUIRES(!Locks::thread_suspend_count_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -1111,7 +1113,9 @@ class Thread {
void VerifyStackImpl() SHARED_REQUIRES(Locks::mutator_lock_);
void DumpState(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_);
- void DumpStack(std::ostream& os, BacktraceMap* backtrace_map = nullptr) const
+ void DumpStack(std::ostream& os,
+ bool dump_native_stack = true,
+ BacktraceMap* backtrace_map = nullptr) const
REQUIRES(!Locks::thread_suspend_count_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index c8714a60a8..49d54fda00 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -140,7 +140,7 @@ void ThreadList::DumpForSigQuit(std::ostream& os) {
suspend_all_historam_.PrintConfidenceIntervals(os, 0.99, data); // Dump time to suspend.
}
}
- Dump(os);
+ Dump(os, Runtime::Current()->GetDumpNativeStackOnSigQuit());
DumpUnattachedThreads(os);
}
@@ -189,8 +189,11 @@ static constexpr uint32_t kDumpWaitTimeout = kIsTargetBuild ? 10000 : 20000;
// A closure used by Thread::Dump.
class DumpCheckpoint FINAL : public Closure {
public:
- explicit DumpCheckpoint(std::ostream* os)
- : os_(os), barrier_(0), backtrace_map_(BacktraceMap::Create(getpid())) {}
+ DumpCheckpoint(std::ostream* os, bool dump_native_stack)
+ : os_(os),
+ barrier_(0),
+ backtrace_map_(dump_native_stack ? BacktraceMap::Create(getpid()) : nullptr),
+ dump_native_stack_(dump_native_stack) {}
void Run(Thread* thread) OVERRIDE {
// Note thread and self may not be equal if thread was already suspended at the point of the
@@ -199,7 +202,7 @@ class DumpCheckpoint FINAL : public Closure {
std::ostringstream local_os;
{
ScopedObjectAccess soa(self);
- thread->Dump(local_os, backtrace_map_.get());
+ thread->Dump(local_os, dump_native_stack_, backtrace_map_.get());
}
local_os << "\n";
{
@@ -228,14 +231,16 @@ class DumpCheckpoint FINAL : public Closure {
Barrier barrier_;
// A backtrace map, so that all threads use a shared info and don't reacquire/parse separately.
std::unique_ptr<BacktraceMap> backtrace_map_;
+ // Whether we should dump the native stack.
+ const bool dump_native_stack_;
};
-void ThreadList::Dump(std::ostream& os) {
+void ThreadList::Dump(std::ostream& os, bool dump_native_stack) {
{
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
os << "DALVIK THREADS (" << list_.size() << "):\n";
}
- DumpCheckpoint checkpoint(&os);
+ DumpCheckpoint checkpoint(&os, dump_native_stack);
size_t threads_running_checkpoint = RunCheckpoint(&checkpoint);
if (threads_running_checkpoint != 0) {
checkpoint.WaitForThreadsToRunThroughCheckpoint(threads_running_checkpoint);
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index 2e73f6af7f..363cab83f4 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -49,7 +49,7 @@ class ThreadList {
void DumpForSigQuit(std::ostream& os)
REQUIRES(!Locks::thread_list_lock_, !Locks::mutator_lock_);
// For thread suspend timeout dumps.
- void Dump(std::ostream& os)
+ void Dump(std::ostream& os, bool dump_native_stack = true)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
pid_t GetLockOwner(); // For SignalCatcher.
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index c7ac172820..1d31408cf0 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -2411,6 +2411,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(res_type)) {
if (res_type.IsUninitializedTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "thrown exception not initialized";
+ } else if (!res_type.IsReferenceTypes()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "thrown value of non-reference type " << res_type;
} else {
Fail(res_type.IsUnresolvedTypes() ? VERIFY_ERROR_NO_CLASS : VERIFY_ERROR_BAD_CLASS_SOFT)
<< "thrown class " << res_type << " not instanceof Throwable";
@@ -4524,6 +4526,19 @@ void MethodVerifier::VerifyISFieldAccess(const Instruction* inst, const RegType&
if (UNLIKELY(have_pending_hard_failure_)) {
return;
}
+ if (should_adjust) {
+ if (field == nullptr) {
+ Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "Might be accessing a superclass instance field prior "
+ << "to the superclass being initialized in "
+ << PrettyMethod(dex_method_idx_, *dex_file_);
+ } else if (field->GetDeclaringClass() != GetDeclaringClass().GetClass()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "cannot access superclass instance field "
+ << PrettyField(field) << " of a not fully initialized "
+ << "object within the context of "
+ << PrettyMethod(dex_method_idx_, *dex_file_);
+ return;
+ }
+ }
}
const RegType* field_type = nullptr;
if (field != nullptr) {
diff --git a/test/127-checker-secondarydex/src/Test.java b/test/127-checker-secondarydex/src/Test.java
index 266ed191bc..438e8545ff 100644
--- a/test/127-checker-secondarydex/src/Test.java
+++ b/test/127-checker-secondarydex/src/Test.java
@@ -23,7 +23,7 @@ public class Test extends Super {
System.out.println("Test");
}
- /// CHECK-START: java.lang.Integer Test.toInteger() ssa_builder (after)
+ /// CHECK-START: java.lang.Integer Test.toInteger() builder (after)
/// CHECK: LoadClass needs_access_check:false klass:java.lang.Integer
public Integer toInteger() {
diff --git a/test/137-cfi/cfi.cc b/test/137-cfi/cfi.cc
index 87656bcf0e..45251b8f7d 100644
--- a/test/137-cfi/cfi.cc
+++ b/test/137-cfi/cfi.cc
@@ -53,6 +53,7 @@ static void CauseSegfault() {
extern "C" JNIEXPORT jboolean JNICALL Java_Main_sleep(JNIEnv*, jobject, jint, jboolean, jdouble) {
// Keep pausing.
+ printf("Going to sleep\n");
for (;;) {
pause();
}
diff --git a/test/137-cfi/expected.txt b/test/137-cfi/expected.txt
index 8db7853696..6a5618ebc6 100644
--- a/test/137-cfi/expected.txt
+++ b/test/137-cfi/expected.txt
@@ -1,2 +1 @@
JNI_OnLoad called
-JNI_OnLoad called
diff --git a/test/137-cfi/run b/test/137-cfi/run
index 6f4bcfe658..8ec98c11dc 100755
--- a/test/137-cfi/run
+++ b/test/137-cfi/run
@@ -20,4 +20,5 @@ ${RUN} "$@" -Xcompiler-option --generate-debug-info --args --full-signatures
# Test with minimal compressed debugging information.
# Check only method names (parameters are omitted to save space).
-${RUN} "$@" -Xcompiler-option --generate-mini-debug-info
+# Temporarily disable due to bug 27172087 (leak/race in libunwind).
+# ${RUN} "$@" -Xcompiler-option --generate-mini-debug-info
diff --git a/test/137-cfi/src/Main.java b/test/137-cfi/src/Main.java
index 77553380c8..d60a4ebba8 100644
--- a/test/137-cfi/src/Main.java
+++ b/test/137-cfi/src/Main.java
@@ -16,10 +16,7 @@
import java.io.BufferedReader;
import java.io.FileReader;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
+import java.io.InputStreamReader;
import java.util.Arrays;
import java.util.Comparator;
@@ -98,9 +95,12 @@ public class Main implements Comparator<Main> {
throw new RuntimeException("Couldn't parse process");
}
- // Wait a bit, so the forked process has time to run until its sleep phase.
+ // Wait until the forked process had time to run until its sleep phase.
try {
- Thread.sleep(5000);
+ InputStreamReader stdout = new InputStreamReader(p.getInputStream(), "UTF-8");
+ BufferedReader lineReader = new BufferedReader(stdout);
+ while (!lineReader.readLine().contains("Going to sleep")) {
+ }
} catch (Exception e) {
throw new RuntimeException(e);
}
diff --git a/test/444-checker-nce/src/Main.java b/test/444-checker-nce/src/Main.java
index 865355ce97..c96b18c71b 100644
--- a/test/444-checker-nce/src/Main.java
+++ b/test/444-checker-nce/src/Main.java
@@ -27,7 +27,7 @@ public class Main {
return m.g();
}
- /// CHECK-START: Main Main.thisTest() ssa_builder (after)
+ /// CHECK-START: Main Main.thisTest() builder (after)
/// CHECK: NullCheck
/// CHECK: InvokeStaticOrDirect
@@ -38,7 +38,7 @@ public class Main {
return g();
}
- /// CHECK-START: Main Main.newInstanceRemoveTest() ssa_builder (after)
+ /// CHECK-START: Main Main.newInstanceRemoveTest() builder (after)
/// CHECK: NewInstance
/// CHECK: NullCheck
/// CHECK: InvokeStaticOrDirect
@@ -52,7 +52,7 @@ public class Main {
return m.g();
}
- /// CHECK-START: Main Main.newArrayRemoveTest() ssa_builder (after)
+ /// CHECK-START: Main Main.newArrayRemoveTest() builder (after)
/// CHECK: NewArray
/// CHECK: NullCheck
/// CHECK: ArrayGet
@@ -178,7 +178,7 @@ public class Main {
return n.g();
}
- /// CHECK-START: Main Main.scopeRemoveTest(int, Main) ssa_builder (after)
+ /// CHECK-START: Main Main.scopeRemoveTest(int, Main) builder (after)
/// CHECK: NullCheck
/// CHECK-START: Main Main.scopeRemoveTest(int, Main) instruction_simplifier (after)
diff --git a/test/449-checker-bce/src/Main.java b/test/449-checker-bce/src/Main.java
index 8f9a32ab3a..31bb94cb8c 100644
--- a/test/449-checker-bce/src/Main.java
+++ b/test/449-checker-bce/src/Main.java
@@ -122,8 +122,9 @@ public class Main {
/// CHECK: ArraySet
static void constantIndexing1(int[] array) {
- array[5] = 1;
- array[4] = 1;
+ // Decreasing order: bc for 5 but not for 4.
+ array[5] = 11;
+ array[4] = 11;
}
@@ -136,17 +137,18 @@ public class Main {
/// CHECK: ArraySet
/// CHECK: BoundsCheck
/// CHECK: ArraySet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArraySet
/// CHECK-START: void Main.$opt$noinline$constantIndexing2(int[]) BCE (after)
- /// CHECK: LessThanOrEqual
- /// CHECK: Deoptimize
- /// CHECK-NOT: BoundsCheck
+ /// CHECK-NOT: Deoptimize
+ /// CHECK: BoundsCheck
/// CHECK: ArraySet
- /// CHECK-NOT: BoundsCheck
+ /// CHECK: BoundsCheck
/// CHECK: ArraySet
- /// CHECK-NOT: BoundsCheck
+ /// CHECK: BoundsCheck
/// CHECK: ArraySet
- /// CHECK-NOT: BoundsCheck
+ /// CHECK: BoundsCheck
/// CHECK: ArraySet
/// CHECK: BoundsCheck
/// CHECK: ArraySet
@@ -156,12 +158,39 @@ public class Main {
array[2] = 1;
array[3] = 1;
array[4] = 1;
- array[-1] = 1;
+ array[-1] = 1; // prevents the whole opt on [-1:4]
if (array[1] == 1) {
throw new Error("");
}
}
+ /// CHECK-START: void Main.constantIndexing2b(int[]) BCE (before)
+ /// CHECK: BoundsCheck
+ /// CHECK: ArraySet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArraySet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArraySet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArraySet
+
+ /// CHECK-START: void Main.constantIndexing2b(int[]) BCE (after)
+ /// CHECK: Deoptimize
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK: ArraySet
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK: ArraySet
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK: ArraySet
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK: ArraySet
+
+ static void constantIndexing2b(int[] array) {
+ array[0] = 7;
+ array[1] = 7;
+ array[2] = 7;
+ array[3] = 7;
+ }
/// CHECK-START: int[] Main.constantIndexing3(int[], int[], boolean) BCE (before)
/// CHECK: BoundsCheck
@@ -182,11 +211,9 @@ public class Main {
/// CHECK: ArraySet
/// CHECK-START: int[] Main.constantIndexing3(int[], int[], boolean) BCE (after)
- /// CHECK: LessThanOrEqual
/// CHECK: Deoptimize
/// CHECK-NOT: BoundsCheck
/// CHECK: ArrayGet
- /// CHECK: LessThanOrEqual
/// CHECK: Deoptimize
/// CHECK-NOT: BoundsCheck
/// CHECK: ArraySet
@@ -220,14 +247,14 @@ public class Main {
/// CHECK: ArraySet
/// CHECK-START: void Main.constantIndexing4(int[]) BCE (after)
- /// CHECK-NOT: LessThanOrEqual
+ /// CHECK-NOT: Deoptimize
/// CHECK: BoundsCheck
/// CHECK: ArraySet
// There is only one array access. It's not beneficial
// to create a compare with deoptimization instruction.
static void constantIndexing4(int[] array) {
- array[0] = 1;
+ array[0] = -1;
}
@@ -260,10 +287,221 @@ public class Main {
/// CHECK-START: void Main.constantIndexing6(int[]) BCE (after)
/// CHECK: Deoptimize
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK: ArraySet
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK: ArraySet
static void constantIndexing6(int[] array) {
- array[3] = 1;
- array[4] = 1;
+ array[3] = 111;
+ array[4] = 111;
+ }
+
+ /// CHECK-START: void Main.constantIndexing7(int[], int) BCE (before)
+ /// CHECK: BoundsCheck
+ /// CHECK: ArraySet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArraySet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArraySet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArraySet
+
+ /// CHECK-START: void Main.constantIndexing7(int[], int) BCE (after)
+ /// CHECK: Deoptimize
+ /// CHECK: Deoptimize
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK: ArraySet
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK: ArraySet
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK: ArraySet
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK: ArraySet
+
+ static void constantIndexing7(int[] array, int base) {
+ // With constant offsets to symbolic base.
+ array[base] = 10;
+ array[base + 1] = 20;
+ array[base + 2] = 30;
+ array[base + 3] = 40;
+ }
+
+ /// CHECK-START: void Main.constantIndexing8(int[], int) BCE (before)
+ /// CHECK: BoundsCheck
+ /// CHECK: ArraySet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArraySet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArraySet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArraySet
+
+ /// CHECK-START: void Main.constantIndexing8(int[], int) BCE (after)
+ /// CHECK: Deoptimize
+ /// CHECK: Deoptimize
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK: ArraySet
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK: ArraySet
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK: ArraySet
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK: ArraySet
+
+ static void constantIndexing8(int[] array, int base) {
+ // With constant offsets "both ways" to symbolic base.
+ array[base - 1] = 100;
+ array[base] = 200;
+ array[base + 1] = 300;
+ array[base + 2] = 400;
+ }
+
+ /// CHECK-START: void Main.constantIndexing9(int[], int) BCE (before)
+ /// CHECK: BoundsCheck
+ /// CHECK: ArraySet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArraySet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArraySet
+ /// CHECK: BoundsCheck
+ /// CHECK: ArraySet
+
+ /// CHECK-START: void Main.constantIndexing9(int[], int) BCE (after)
+ /// CHECK: Deoptimize
+ /// CHECK: Deoptimize
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK: ArraySet
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK: ArraySet
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK: ArraySet
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK: ArraySet
+ /// CHECK-NOT: BoundsCheck
+
+ static void constantIndexing9(int[] array, int base) {
+ // Final range is base..base+3 so conditional
+ // references may be included in the end.
+ array[base] = 0;
+ if (base != 12345)
+ array[base + 2] = 2;
+ array[base + 3] = 3;
+ if (base != 67890)
+ array[base + 1] = 1;
+ }
+
+ static void runAllConstantIndices() {
+ int[] a1 = { 0 };
+ int[] a6 = { 0, 0, 0, 0, 0, 0 };
+
+ boolean caught = false;
+ try {
+ constantIndexing1(a1);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ caught = true;
+ }
+ if (!caught) {
+ System.out.println("constant indices 1 failed!");
+ }
+
+ constantIndexing1(a6);
+ if (a6[4] != 11 || a6[5] != 11) {
+ System.out.println("constant indices 1 failed!");
+ }
+
+ caught = false;
+ try {
+ $opt$noinline$constantIndexing2(a6);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ caught = true;
+ }
+ if (!caught || a6[0] != 0 || a6[1] != 1 || a6[2] != 1 ||
+ a6[3] != 1 || a6[4] != 1 || a6[5] != 11) {
+ System.out.println("constant indices 2 failed!");
+ }
+
+ caught = false;
+ try {
+ constantIndexing2b(a1);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ caught = true;
+ }
+ if (!caught || a1[0] != 7) {
+ System.out.println("constant indices 2b failed!");
+ }
+
+ constantIndexing2b(a6);
+ if (a6[0] != 7 || a6[1] != 7 || a6[2] != 7 ||
+ a6[3] != 7 || a6[4] != 1 || a6[5] != 11) {
+ System.out.println("constant indices 2b failed!");
+ }
+
+ int[] b4 = new int[4];
+ constantIndexing3(a6, b4, true);
+ if (b4[0] != 7 || b4[1] != 7 || b4[2] != 7 || b4[3] != 7) {
+ System.out.println("constant indices 3 failed!");
+ }
+
+ constantIndexing4(a1);
+ if (a1[0] != -1) {
+ System.out.println("constant indices 4 failed!");
+ }
+
+ caught = false;
+ try {
+ constantIndexing5(a6);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ caught = true;
+ }
+ if (!caught) {
+ System.out.println("constant indices 5 failed!");
+ }
+
+ constantIndexing6(a6);
+ if (a6[0] != 7 || a6[1] != 7 || a6[2] != 7 ||
+ a6[3] != 111 || a6[4] != 111 || a6[5] != 11) {
+ System.out.println("constant indices 6 failed!");
+ }
+
+ constantIndexing7(a6, 1);
+ if (a6[0] != 7 || a6[1] != 10 || a6[2] != 20 ||
+ a6[3] != 30 || a6[4] != 40 || a6[5] != 11) {
+ System.out.println("constant indices 7 failed!");
+ }
+
+ caught = false;
+ try {
+ constantIndexing7(a6, 5);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ caught = true;
+ }
+ if (!caught || a6[0] != 7 || a6[1] != 10 || a6[2] != 20 ||
+ a6[3] != 30 || a6[4] != 40 || a6[5] != 10) {
+ System.out.println("constant indices 7 failed!");
+ }
+
+ constantIndexing8(a6, 1);
+ if (a6[0] != 100 || a6[1] != 200 || a6[2] != 300 ||
+ a6[3] != 400 || a6[4] != 40 || a6[5] != 10) {
+ System.out.println("constant indices 8 failed!");
+ }
+
+ caught = false;
+ try {
+ constantIndexing8(a6, 0);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ caught = true;
+ }
+ if (!caught || a6[0] != 100) {
+ System.out.println("constant indices 8 failed!");
+ }
+
+ constantIndexing9(a6, 0);
+ if (a6[0] != 0 || a6[1] != 1 || a6[2] != 2 ||
+ a6[3] != 3 || a6[4] != 40 || a6[5] != 10) {
+ System.out.println("constant indices 9 failed!");
+ }
}
// A helper into which the actual throwing function should be inlined.
@@ -1102,6 +1340,9 @@ public class Main {
static void testUnknownBounds() {
boolean caught = false;
+
+ runAllConstantIndices();
+
Main main = new Main();
main.foo1(new int[10], 0, 10, false);
if (main.sum != 10) {
diff --git a/test/450-checker-types/src/Main.java b/test/450-checker-types/src/Main.java
index d48b30e324..027a9d9487 100644
--- a/test/450-checker-types/src/Main.java
+++ b/test/450-checker-types/src/Main.java
@@ -205,7 +205,7 @@ public class Main {
public static boolean $inline$InstanceofSubclassB(Object o) { return o instanceof SubclassB; }
public static boolean $inline$InstanceofSubclassC(Object o) { return o instanceof SubclassC; }
- /// CHECK-START: void Main.testInstanceOf_NotInlined(java.lang.Object) ssa_builder (after)
+ /// CHECK-START: void Main.testInstanceOf_NotInlined(java.lang.Object) builder (after)
/// CHECK-DAG: <<Cst0:i\d+>> IntConstant 0
/// CHECK-DAG: <<Cst1:i\d+>> IntConstant 1
/// CHECK-DAG: <<IOf1:z\d+>> InstanceOf
@@ -229,7 +229,7 @@ public class Main {
}
}
- /// CHECK-START: void Main.testNotInstanceOf_NotInlined(java.lang.Object) ssa_builder (after)
+ /// CHECK-START: void Main.testNotInstanceOf_NotInlined(java.lang.Object) builder (after)
/// CHECK-DAG: <<Cst0:i\d+>> IntConstant 0
/// CHECK-DAG: <<Cst1:i\d+>> IntConstant 1
/// CHECK-DAG: <<IOf1:z\d+>> InstanceOf
@@ -487,7 +487,7 @@ public class Main {
((SubclassA)a[0]).$noinline$g();
}
- /// CHECK-START: int Main.testLoadExceptionInCatchNonExact(int, int) ssa_builder (after)
+ /// CHECK-START: int Main.testLoadExceptionInCatchNonExact(int, int) builder (after)
/// CHECK: LoadException klass:java.lang.ArithmeticException can_be_null:false exact:false
public int testLoadExceptionInCatchNonExact(int x, int y) {
try {
@@ -497,7 +497,7 @@ public class Main {
}
}
- /// CHECK-START: int Main.testLoadExceptionInCatchExact(int) ssa_builder (after)
+ /// CHECK-START: int Main.testLoadExceptionInCatchExact(int) builder (after)
/// CHECK: LoadException klass:FinalException can_be_null:false exact:true
public int testLoadExceptionInCatchExact(int x) {
try {
@@ -511,7 +511,7 @@ public class Main {
}
}
- /// CHECK-START: int Main.testLoadExceptionInCatchAll(int, int) ssa_builder (after)
+ /// CHECK-START: int Main.testLoadExceptionInCatchAll(int, int) builder (after)
/// CHECK: LoadException klass:java.lang.Throwable can_be_null:false exact:false
public int testLoadExceptionInCatchAll(int x, int y) {
try {
@@ -532,7 +532,7 @@ public class Main {
return genericFinal.get();
}
- /// CHECK-START: SubclassC Main.inlineGenerics() ssa_builder (after)
+ /// CHECK-START: SubclassC Main.inlineGenerics() builder (after)
/// CHECK: <<Invoke:l\d+>> InvokeStaticOrDirect klass:SubclassC exact:false
/// CHECK-NEXT: Return [<<Invoke>>]
@@ -544,7 +544,7 @@ public class Main {
return c;
}
- /// CHECK-START: Final Main.inlineGenericsFinal() ssa_builder (after)
+ /// CHECK-START: Final Main.inlineGenericsFinal() builder (after)
/// CHECK: <<Invoke:l\d+>> InvokeStaticOrDirect klass:Final exact:true
/// CHECK-NEXT: Return [<<Invoke>>]
@@ -586,7 +586,7 @@ public class Main {
return new SubclassA();
}
- /// CHECK-START: void Main.updateNodesInTheSameBlockAsPhi(boolean) ssa_builder (after)
+ /// CHECK-START: void Main.updateNodesInTheSameBlockAsPhi(boolean) builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:Super
/// CHECK: NullCheck [<<Phi>>] klass:Super
@@ -620,7 +620,7 @@ public class Main {
}
- /// CHECK-START: void Main.argumentCheck(Super, double, SubclassA, Final) ssa_builder (after)
+ /// CHECK-START: void Main.argumentCheck(Super, double, SubclassA, Final) builder (after)
/// CHECK: ParameterValue klass:Main can_be_null:false exact:false
/// CHECK: ParameterValue klass:Super can_be_null:true exact:false
/// CHECK: ParameterValue
@@ -636,7 +636,7 @@ public class Main {
private int mainField = 0;
- /// CHECK-START: SuperInterface Main.getWiderType(boolean, Interface, OtherInterface) ssa_builder (after)
+ /// CHECK-START: SuperInterface Main.getWiderType(boolean, Interface, OtherInterface) builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:java.lang.Object
/// CHECK: Return [<<Phi>>]
private SuperInterface getWiderType(boolean cond, Interface a, OtherInterface b) {
@@ -692,7 +692,7 @@ public class Main {
getSuper();
}
- /// CHECK-START: void Main.testLoopPhiWithNullFirstInput(boolean) ssa_builder (after)
+ /// CHECK-START: void Main.testLoopPhiWithNullFirstInput(boolean) builder (after)
/// CHECK-DAG: <<Null:l\d+>> NullConstant
/// CHECK-DAG: <<Main:l\d+>> NewInstance klass:Main exact:true
/// CHECK-DAG: <<LoopPhi:l\d+>> Phi [<<Null>>,<<LoopPhi>>,<<Main>>] klass:Main exact:true
@@ -705,7 +705,7 @@ public class Main {
}
}
- /// CHECK-START: java.lang.Object[] Main.testInstructionsWithUntypedParent() ssa_builder (after)
+ /// CHECK-START: java.lang.Object[] Main.testInstructionsWithUntypedParent() builder (after)
/// CHECK-DAG: <<Null:l\d+>> NullConstant
/// CHECK-DAG: <<LoopPhi:l\d+>> Phi [<<Null>>,<<Phi:l\d+>>] klass:java.lang.Object[] exact:true
/// CHECK-DAG: <<Array:l\d+>> NewArray klass:java.lang.Object[] exact:true
diff --git a/test/458-checker-instruction-simplification/src/Main.java b/test/458-checker-instruction-simplification/src/Main.java
index 2f80470cb3..8d6bb653f4 100644
--- a/test/458-checker-instruction-simplification/src/Main.java
+++ b/test/458-checker-instruction-simplification/src/Main.java
@@ -46,6 +46,12 @@ public class Main {
}
}
+ public static void assertStringEquals(String expected, String result) {
+ if (expected == null ? result != null : !expected.equals(result)) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
/**
* Tiny programs exercising optimizations of arithmetic identities.
*/
@@ -1401,7 +1407,7 @@ public class Main {
// Test that conditions on float/double are not flipped.
- /// CHECK-START: int Main.floatConditionNotEqualOne(float) ssa_builder (after)
+ /// CHECK-START: int Main.floatConditionNotEqualOne(float) builder (after)
/// CHECK: LessThanOrEqual
/// CHECK-START: int Main.floatConditionNotEqualOne(float) instruction_simplifier_before_codegen (after)
@@ -1417,7 +1423,7 @@ public class Main {
return ((f > 42.0f) == true) ? 13 : 54;
}
- /// CHECK-START: int Main.doubleConditionEqualZero(double) ssa_builder (after)
+ /// CHECK-START: int Main.doubleConditionEqualZero(double) builder (after)
/// CHECK: LessThanOrEqual
/// CHECK-START: int Main.doubleConditionEqualZero(double) instruction_simplifier_before_codegen (after)
@@ -1433,6 +1439,337 @@ public class Main {
return ((d > 42.0) != false) ? 13 : 54;
}
+ /// CHECK-START: int Main.intToDoubleToInt(int) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Double:d\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: <<Int:i\d+>> TypeConversion [<<Double>>]
+ /// CHECK-DAG: Return [<<Int>>]
+
+ /// CHECK-START: int Main.intToDoubleToInt(int) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: Return [<<Arg>>]
+
+ /// CHECK-START: int Main.intToDoubleToInt(int) instruction_simplifier (after)
+ /// CHECK-NOT: TypeConversion
+
+ public static int intToDoubleToInt(int value) {
+ // Lossless conversion followed by a conversion back.
+ return (int) (double) value;
+ }
+
+ /// CHECK-START: java.lang.String Main.intToDoubleToIntPrint(int) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Double:d\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: {{i\d+}} TypeConversion [<<Double>>]
+
+ /// CHECK-START: java.lang.String Main.intToDoubleToIntPrint(int) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: {{d\d+}} TypeConversion [<<Arg>>]
+
+ /// CHECK-START: java.lang.String Main.intToDoubleToIntPrint(int) instruction_simplifier (after)
+ /// CHECK-DAG: TypeConversion
+ /// CHECK-NOT: TypeConversion
+
+ public static String intToDoubleToIntPrint(int value) {
+ // Lossless conversion followed by a conversion back
+ // with another use of the intermediate result.
+ double d = (double) value;
+ int i = (int) d;
+ return "d=" + d + ", i=" + i;
+ }
+
+ /// CHECK-START: int Main.byteToDoubleToInt(byte) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:b\d+>> ParameterValue
+ /// CHECK-DAG: <<Double:d\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: <<Int:i\d+>> TypeConversion [<<Double>>]
+ /// CHECK-DAG: Return [<<Int>>]
+
+ /// CHECK-START: int Main.byteToDoubleToInt(byte) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:b\d+>> ParameterValue
+ /// CHECK-DAG: Return [<<Arg>>]
+
+ /// CHECK-START: int Main.byteToDoubleToInt(byte) instruction_simplifier (after)
+ /// CHECK-NOT: TypeConversion
+
+ public static int byteToDoubleToInt(byte value) {
+ // Lossless conversion followed by another conversion, use implicit conversion.
+ return (int) (double) value;
+ }
+
+ /// CHECK-START: int Main.floatToDoubleToInt(float) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:f\d+>> ParameterValue
+ /// CHECK-DAG: <<Double:d\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: <<Int:i\d+>> TypeConversion [<<Double>>]
+ /// CHECK-DAG: Return [<<Int>>]
+
+ /// CHECK-START: int Main.floatToDoubleToInt(float) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:f\d+>> ParameterValue
+ /// CHECK-DAG: <<Int:i\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: Return [<<Int>>]
+
+ /// CHECK-START: int Main.floatToDoubleToInt(float) instruction_simplifier (after)
+ /// CHECK-DAG: TypeConversion
+ /// CHECK-NOT: TypeConversion
+
+ public static int floatToDoubleToInt(float value) {
+ // Lossless conversion followed by another conversion.
+ return (int) (double) value;
+ }
+
+ /// CHECK-START: java.lang.String Main.floatToDoubleToIntPrint(float) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:f\d+>> ParameterValue
+ /// CHECK-DAG: <<Double:d\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: {{i\d+}} TypeConversion [<<Double>>]
+
+ /// CHECK-START: java.lang.String Main.floatToDoubleToIntPrint(float) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:f\d+>> ParameterValue
+ /// CHECK-DAG: <<Double:d\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: {{i\d+}} TypeConversion [<<Double>>]
+
+ public static String floatToDoubleToIntPrint(float value) {
+ // Lossless conversion followed by another conversion with
+ // an extra use of the intermediate result.
+ double d = (double) value;
+ int i = (int) d;
+ return "d=" + d + ", i=" + i;
+ }
+
+ /// CHECK-START: short Main.byteToDoubleToShort(byte) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:b\d+>> ParameterValue
+ /// CHECK-DAG: <<Double:d\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: <<Int:i\d+>> TypeConversion [<<Double>>]
+ /// CHECK-DAG: <<Short:s\d+>> TypeConversion [<<Int>>]
+ /// CHECK-DAG: Return [<<Short>>]
+
+ /// CHECK-START: short Main.byteToDoubleToShort(byte) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:b\d+>> ParameterValue
+ /// CHECK-DAG: Return [<<Arg>>]
+
+ /// CHECK-START: short Main.byteToDoubleToShort(byte) instruction_simplifier (after)
+ /// CHECK-NOT: TypeConversion
+
+ public static short byteToDoubleToShort(byte value) {
+ // Originally, this is byte->double->int->short. The first conversion is lossless,
+ // so we merge this with the second one to byte->int which we omit as it's an implicit
+ // conversion. Then we eliminate the resulting byte->short as an implicit conversion.
+ return (short) (double) value;
+ }
+
+ /// CHECK-START: short Main.charToDoubleToShort(char) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:c\d+>> ParameterValue
+ /// CHECK-DAG: <<Double:d\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: <<Int:i\d+>> TypeConversion [<<Double>>]
+ /// CHECK-DAG: <<Short:s\d+>> TypeConversion [<<Int>>]
+ /// CHECK-DAG: Return [<<Short>>]
+
+ /// CHECK-START: short Main.charToDoubleToShort(char) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:c\d+>> ParameterValue
+ /// CHECK-DAG: <<Short:s\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: Return [<<Short>>]
+
+ /// CHECK-START: short Main.charToDoubleToShort(char) instruction_simplifier (after)
+ /// CHECK-DAG: TypeConversion
+ /// CHECK-NOT: TypeConversion
+
+ public static short charToDoubleToShort(char value) {
+ // Originally, this is char->double->int->short. The first conversion is lossless,
+ // so we merge this with the second one to char->int which we omit as it's an implicit
+ // conversion. Then we are left with the resulting char->short conversion.
+ return (short) (double) value;
+ }
+
+ /// CHECK-START: short Main.floatToIntToShort(float) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:f\d+>> ParameterValue
+ /// CHECK-DAG: <<Int:i\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: <<Short:s\d+>> TypeConversion [<<Int>>]
+ /// CHECK-DAG: Return [<<Short>>]
+
+ /// CHECK-START: short Main.floatToIntToShort(float) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:f\d+>> ParameterValue
+ /// CHECK-DAG: <<Int:i\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: <<Short:s\d+>> TypeConversion [<<Int>>]
+ /// CHECK-DAG: Return [<<Short>>]
+
+ public static short floatToIntToShort(float value) {
+ // Lossy FP to integral conversion followed by another conversion: no simplification.
+ return (short) value;
+ }
+
+ /// CHECK-START: int Main.intToFloatToInt(int) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Float:f\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: <<Int:i\d+>> TypeConversion [<<Float>>]
+ /// CHECK-DAG: Return [<<Int>>]
+
+ /// CHECK-START: int Main.intToFloatToInt(int) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Float:f\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: <<Int:i\d+>> TypeConversion [<<Float>>]
+ /// CHECK-DAG: Return [<<Int>>]
+
+ public static int intToFloatToInt(int value) {
+ // Lossy integral to FP conversion followed another conversion: no simplification.
+ return (int) (float) value;
+ }
+
+ /// CHECK-START: double Main.longToIntToDouble(long) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ /// CHECK-DAG: <<Int:i\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: <<Double:d\d+>> TypeConversion [<<Int>>]
+ /// CHECK-DAG: Return [<<Double>>]
+
+ /// CHECK-START: double Main.longToIntToDouble(long) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ /// CHECK-DAG: <<Int:i\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: <<Double:d\d+>> TypeConversion [<<Int>>]
+ /// CHECK-DAG: Return [<<Double>>]
+
+ public static double longToIntToDouble(long value) {
+ // Lossy long-to-int conversion followed an integral to FP conversion: no simplification.
+ return (double) (int) value;
+ }
+
+ /// CHECK-START: long Main.longToIntToLong(long) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ /// CHECK-DAG: <<Int:i\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: <<Long:j\d+>> TypeConversion [<<Int>>]
+ /// CHECK-DAG: Return [<<Long>>]
+
+ /// CHECK-START: long Main.longToIntToLong(long) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ /// CHECK-DAG: <<Int:i\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: <<Long:j\d+>> TypeConversion [<<Int>>]
+ /// CHECK-DAG: Return [<<Long>>]
+
+ public static long longToIntToLong(long value) {
+ // Lossy long-to-int conversion followed an int-to-long conversion: no simplification.
+ return (long) (int) value;
+ }
+
+ /// CHECK-START: short Main.shortToCharToShort(short) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:s\d+>> ParameterValue
+ /// CHECK-DAG: <<Char:c\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: <<Short:s\d+>> TypeConversion [<<Char>>]
+ /// CHECK-DAG: Return [<<Short>>]
+
+ /// CHECK-START: short Main.shortToCharToShort(short) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:s\d+>> ParameterValue
+ /// CHECK-DAG: Return [<<Arg>>]
+
+ public static short shortToCharToShort(short value) {
+ // Integral conversion followed by non-widening integral conversion to original type.
+ return (short) (char) value;
+ }
+
+ /// CHECK-START: int Main.shortToLongToInt(short) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:s\d+>> ParameterValue
+ /// CHECK-DAG: <<Long:j\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: <<Int:i\d+>> TypeConversion [<<Long>>]
+ /// CHECK-DAG: Return [<<Int>>]
+
+ /// CHECK-START: int Main.shortToLongToInt(short) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:s\d+>> ParameterValue
+ /// CHECK-DAG: Return [<<Arg>>]
+
+ public static int shortToLongToInt(short value) {
+ // Integral conversion followed by non-widening integral conversion, use implicit conversion.
+ return (int) (long) value;
+ }
+
+ /// CHECK-START: byte Main.shortToCharToByte(short) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:s\d+>> ParameterValue
+ /// CHECK-DAG: <<Char:c\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: <<Byte:b\d+>> TypeConversion [<<Char>>]
+ /// CHECK-DAG: Return [<<Byte>>]
+
+ /// CHECK-START: byte Main.shortToCharToByte(short) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:s\d+>> ParameterValue
+ /// CHECK-DAG: <<Byte:b\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: Return [<<Byte>>]
+
+ public static byte shortToCharToByte(short value) {
+ // Integral conversion followed by non-widening integral conversion losing bits
+ // from the original type. Simplify to use only one conversion.
+ return (byte) (char) value;
+ }
+
+ /// CHECK-START: java.lang.String Main.shortToCharToBytePrint(short) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:s\d+>> ParameterValue
+ /// CHECK-DAG: <<Char:c\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: {{b\d+}} TypeConversion [<<Char>>]
+
+ /// CHECK-START: java.lang.String Main.shortToCharToBytePrint(short) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:s\d+>> ParameterValue
+ /// CHECK-DAG: <<Char:c\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: {{b\d+}} TypeConversion [<<Char>>]
+
+ public static String shortToCharToBytePrint(short value) {
+ // Integral conversion followed by non-widening integral conversion losing bits
+ // from the original type with an extra use of the intermediate result.
+ char c = (char) value;
+ byte b = (byte) c;
+ return "c=" + ((int) c) + ", b=" + ((int) b); // implicit conversions.
+ }
+
+ /// CHECK-START: byte Main.longAnd0xffToByte(long) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ /// CHECK-DAG: <<Mask:j\d+>> LongConstant 255
+ /// CHECK-DAG: <<And:j\d+>> And [<<Mask>>,<<Arg>>]
+ /// CHECK-DAG: <<Int:i\d+>> TypeConversion [<<And>>]
+ /// CHECK-DAG: <<Byte:b\d+>> TypeConversion [<<Int>>]
+ /// CHECK-DAG: Return [<<Byte>>]
+
+ /// CHECK-START: byte Main.longAnd0xffToByte(long) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ /// CHECK-DAG: <<Byte:b\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: Return [<<Byte>>]
+
+ /// CHECK-START: byte Main.longAnd0xffToByte(long) instruction_simplifier (after)
+ /// CHECK-NOT: And
+
+ public static byte longAnd0xffToByte(long value) {
+ return (byte) (value & 0xff);
+ }
+
+ /// CHECK-START: char Main.intAnd0x1ffffToChar(int) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Mask:i\d+>> IntConstant 131071
+ /// CHECK-DAG: <<And:i\d+>> And [<<Mask>>,<<Arg>>]
+ /// CHECK-DAG: <<Char:c\d+>> TypeConversion [<<And>>]
+ /// CHECK-DAG: Return [<<Char>>]
+
+ /// CHECK-START: char Main.intAnd0x1ffffToChar(int) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Char:c\d+>> TypeConversion [<<Arg>>]
+ /// CHECK-DAG: Return [<<Char>>]
+
+ /// CHECK-START: char Main.intAnd0x1ffffToChar(int) instruction_simplifier (after)
+ /// CHECK-NOT: And
+
+ public static char intAnd0x1ffffToChar(int value) {
+ // Keeping all significant bits and one more.
+ return (char) (value & 0x1ffff);
+ }
+
+ /// CHECK-START: short Main.intAnd0x17fffToShort(int) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Mask:i\d+>> IntConstant 98303
+ /// CHECK-DAG: <<And:i\d+>> And [<<Mask>>,<<Arg>>]
+ /// CHECK-DAG: <<Short:s\d+>> TypeConversion [<<And>>]
+ /// CHECK-DAG: Return [<<Short>>]
+
+ /// CHECK-START: short Main.intAnd0x17fffToShort(int) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Mask:i\d+>> IntConstant 98303
+ /// CHECK-DAG: <<And:i\d+>> And [<<Mask>>,<<Arg>>]
+ /// CHECK-DAG: <<Short:s\d+>> TypeConversion [<<And>>]
+ /// CHECK-DAG: Return [<<Short>>]
+
+ public static short intAnd0x17fffToShort(int value) {
+ // No simplification: clearing a significant bit.
+ return (short) (value & 0x17fff);
+ }
+
public static void main(String[] args) {
int arg = 123456;
@@ -1518,6 +1855,57 @@ public class Main {
assertIntEquals(floatConditionNotEqualOne(43.0f), 13);
assertIntEquals(doubleConditionEqualZero(6.0), 54);
assertIntEquals(doubleConditionEqualZero(43.0), 13);
+
+ assertIntEquals(intToDoubleToInt(1234567), 1234567);
+ assertIntEquals(intToDoubleToInt(Integer.MIN_VALUE), Integer.MIN_VALUE);
+ assertIntEquals(intToDoubleToInt(Integer.MAX_VALUE), Integer.MAX_VALUE);
+ assertStringEquals(intToDoubleToIntPrint(7654321), "d=7654321.0, i=7654321");
+ assertIntEquals(byteToDoubleToInt((byte) 12), 12);
+ assertIntEquals(byteToDoubleToInt(Byte.MIN_VALUE), Byte.MIN_VALUE);
+ assertIntEquals(byteToDoubleToInt(Byte.MAX_VALUE), Byte.MAX_VALUE);
+ assertIntEquals(floatToDoubleToInt(11.3f), 11);
+ assertStringEquals(floatToDoubleToIntPrint(12.25f), "d=12.25, i=12");
+ assertIntEquals(byteToDoubleToShort((byte) 123), 123);
+ assertIntEquals(byteToDoubleToShort(Byte.MIN_VALUE), Byte.MIN_VALUE);
+ assertIntEquals(byteToDoubleToShort(Byte.MAX_VALUE), Byte.MAX_VALUE);
+ assertIntEquals(charToDoubleToShort((char) 1234), 1234);
+ assertIntEquals(charToDoubleToShort(Character.MIN_VALUE), Character.MIN_VALUE);
+ assertIntEquals(charToDoubleToShort(Character.MAX_VALUE), /* sign-extended */ -1);
+ assertIntEquals(floatToIntToShort(12345.75f), 12345);
+ assertIntEquals(floatToIntToShort((float)(Short.MIN_VALUE - 1)), Short.MAX_VALUE);
+ assertIntEquals(floatToIntToShort((float)(Short.MAX_VALUE + 1)), Short.MIN_VALUE);
+ assertIntEquals(intToFloatToInt(-54321), -54321);
+ assertDoubleEquals(longToIntToDouble(0x1234567812345678L), (double) 0x12345678);
+ assertDoubleEquals(longToIntToDouble(Long.MIN_VALUE), 0.0);
+ assertDoubleEquals(longToIntToDouble(Long.MAX_VALUE), -1.0);
+ assertLongEquals(longToIntToLong(0x1234567812345678L), 0x0000000012345678L);
+ assertLongEquals(longToIntToLong(0x1234567887654321L), 0xffffffff87654321L);
+ assertLongEquals(longToIntToLong(Long.MIN_VALUE), 0L);
+ assertLongEquals(longToIntToLong(Long.MAX_VALUE), -1L);
+ assertIntEquals(shortToCharToShort((short) -5678), (short) -5678);
+ assertIntEquals(shortToCharToShort(Short.MIN_VALUE), Short.MIN_VALUE);
+ assertIntEquals(shortToCharToShort(Short.MAX_VALUE), Short.MAX_VALUE);
+ assertIntEquals(shortToLongToInt((short) 5678), 5678);
+ assertIntEquals(shortToLongToInt(Short.MIN_VALUE), Short.MIN_VALUE);
+ assertIntEquals(shortToLongToInt(Short.MAX_VALUE), Short.MAX_VALUE);
+ assertIntEquals(shortToCharToByte((short) 0x1234), 0x34);
+ assertIntEquals(shortToCharToByte((short) 0x12f0), -0x10);
+ assertIntEquals(shortToCharToByte(Short.MIN_VALUE), 0);
+ assertIntEquals(shortToCharToByte(Short.MAX_VALUE), -1);
+ assertStringEquals(shortToCharToBytePrint((short) 1025), "c=1025, b=1");
+ assertStringEquals(shortToCharToBytePrint((short) 1023), "c=1023, b=-1");
+ assertStringEquals(shortToCharToBytePrint((short) -1), "c=65535, b=-1");
+
+ assertIntEquals(longAnd0xffToByte(0x1234432112344321L), 0x21);
+ assertIntEquals(longAnd0xffToByte(Long.MIN_VALUE), 0);
+ assertIntEquals(longAnd0xffToByte(Long.MAX_VALUE), -1);
+ assertIntEquals(intAnd0x1ffffToChar(0x43211234), 0x1234);
+ assertIntEquals(intAnd0x1ffffToChar(Integer.MIN_VALUE), 0);
+ assertIntEquals(intAnd0x1ffffToChar(Integer.MAX_VALUE), Character.MAX_VALUE);
+ assertIntEquals(intAnd0x17fffToShort(0x87654321), 0x4321);
+ assertIntEquals(intAnd0x17fffToShort(0x88888888), 0x0888);
+ assertIntEquals(intAnd0x17fffToShort(Integer.MIN_VALUE), 0);
+ assertIntEquals(intAnd0x17fffToShort(Integer.MAX_VALUE), Short.MAX_VALUE);
}
public static boolean booleanField;
diff --git a/test/464-checker-inline-sharpen-calls/src/Main.java b/test/464-checker-inline-sharpen-calls/src/Main.java
index 2222e0fa0c..3f256352b7 100644
--- a/test/464-checker-inline-sharpen-calls/src/Main.java
+++ b/test/464-checker-inline-sharpen-calls/src/Main.java
@@ -39,7 +39,7 @@ public final class Main {
m.invokeVirtual();
}
- /// CHECK-START: int Main.inlineSharpenHelperInvoke() ssa_builder (after)
+ /// CHECK-START: int Main.inlineSharpenHelperInvoke() builder (after)
/// CHECK-DAG: <<Invoke:i\d+>> InvokeVirtual {{.*\.getFoo.*}}
/// CHECK-DAG: Return [<<Invoke>>]
diff --git a/test/477-checker-bound-type/src/Main.java b/test/477-checker-bound-type/src/Main.java
index 0f65e44678..2504ab2839 100644
--- a/test/477-checker-bound-type/src/Main.java
+++ b/test/477-checker-bound-type/src/Main.java
@@ -17,7 +17,7 @@
public class Main {
- /// CHECK-START: java.lang.Object Main.boundTypeForIf(java.lang.Object) ssa_builder (after)
+ /// CHECK-START: java.lang.Object Main.boundTypeForIf(java.lang.Object) builder (after)
/// CHECK: BoundType
public static Object boundTypeForIf(Object a) {
if (a != null) {
@@ -27,7 +27,7 @@ public class Main {
}
}
- /// CHECK-START: java.lang.Object Main.boundTypeForInstanceOf(java.lang.Object) ssa_builder (after)
+ /// CHECK-START: java.lang.Object Main.boundTypeForInstanceOf(java.lang.Object) builder (after)
/// CHECK: BoundType
public static Object boundTypeForInstanceOf(Object a) {
if (a instanceof Main) {
@@ -37,7 +37,7 @@ public class Main {
}
}
- /// CHECK-START: java.lang.Object Main.noBoundTypeForIf(java.lang.Object) ssa_builder (after)
+ /// CHECK-START: java.lang.Object Main.noBoundTypeForIf(java.lang.Object) builder (after)
/// CHECK-NOT: BoundType
public static Object noBoundTypeForIf(Object a) {
if (a == null) {
@@ -47,7 +47,7 @@ public class Main {
}
}
- /// CHECK-START: java.lang.Object Main.noBoundTypeForInstanceOf(java.lang.Object) ssa_builder (after)
+ /// CHECK-START: java.lang.Object Main.noBoundTypeForInstanceOf(java.lang.Object) builder (after)
/// CHECK-NOT: BoundType
public static Object noBoundTypeForInstanceOf(Object a) {
if (a instanceof Main) {
diff --git a/test/492-checker-inline-invoke-interface/src/Main.java b/test/492-checker-inline-invoke-interface/src/Main.java
index 3106ce4f3e..a919690000 100644
--- a/test/492-checker-inline-invoke-interface/src/Main.java
+++ b/test/492-checker-inline-invoke-interface/src/Main.java
@@ -31,7 +31,7 @@ public class Main implements Itf {
int a = ForceStatic.field;
}
- /// CHECK-START: void Main.main(java.lang.String[]) ssa_builder (after)
+ /// CHECK-START: void Main.main(java.lang.String[]) builder (after)
/// CHECK: InvokeStaticOrDirect {{.*Main.<init>.*}}
/// CHECK: InvokeInterface
diff --git a/test/510-checker-try-catch/smali/Builder.smali b/test/510-checker-try-catch/smali/Builder.smali
index 1fde5edc23..8ec840d159 100644
--- a/test/510-checker-try-catch/smali/Builder.smali
+++ b/test/510-checker-try-catch/smali/Builder.smali
@@ -41,28 +41,35 @@
## CHECK: predecessors "<<BEnterTry2>>"
## CHECK: successors "<<BExitTry2:B\d+>>"
## CHECK: DivZeroCheck
+## CHECK: <<Div:i\d+>> Div
-## CHECK: name "<<BReturn:B\d+>>"
-## CHECK: predecessors "<<BExitTry2>>" "<<BCatch1:B\d+>>" "<<BCatch2:B\d+>>" "<<BCatch3:B\d+>>"
+## CHECK: name "<<BAfterTry2:B\d+>>"
+## CHECK: predecessors "<<BExitTry2>>"
+## CHECK: successors "<<BReturn:B\d+>>"
+## CHECK: Goto
+
+## CHECK: name "<<BReturn>>"
+## CHECK: predecessors "<<BAfterTry2>>" "<<BCatch1:B\d+>>" "<<BCatch2:B\d+>>" "<<BCatch3:B\d+>>"
+## CHECK: Phi [<<Div>>,<<Minus1>>,<<Minus2>>,<<Minus3>>]
## CHECK: Return
## CHECK: name "<<BCatch1>>"
## CHECK: predecessors "<<BEnterTry1>>" "<<BExitTry1>>"
## CHECK: successors "<<BReturn>>"
## CHECK: flags "catch_block"
-## CHECK: StoreLocal [v0,<<Minus1>>]
+## CHECK: Goto
## CHECK: name "<<BCatch2>>"
## CHECK: predecessors "<<BEnterTry2>>" "<<BExitTry2>>"
## CHECK: successors "<<BReturn>>"
## CHECK: flags "catch_block"
-## CHECK: StoreLocal [v0,<<Minus2>>]
+## CHECK: Goto
## CHECK: name "<<BCatch3>>"
## CHECK: predecessors "<<BEnterTry1>>" "<<BEnterTry2>>" "<<BExitTry1>>" "<<BExitTry2>>"
## CHECK: successors "<<BReturn>>"
## CHECK: flags "catch_block"
-## CHECK: StoreLocal [v0,<<Minus3>>]
+## CHECK: Goto
## CHECK: name "<<BEnterTry1>>"
## CHECK: predecessors "B0"
@@ -84,7 +91,7 @@
## CHECK: name "<<BExitTry2>>"
## CHECK: predecessors "<<BTry2>>"
-## CHECK: successors "<<BReturn>>"
+## CHECK: successors "<<BAfterTry2>>"
## CHECK: xhandlers "<<BCatch2>>" "<<BCatch3>>"
## CHECK: TryBoundary kind:exit
@@ -105,6 +112,8 @@
.catch Ljava/lang/OutOfMemoryError; {:try_start_2 .. :try_end_2} :catch_mem
.catchall {:try_start_2 .. :try_end_2} :catch_other
+ nop
+
:return
return p0
@@ -131,7 +140,7 @@
## CHECK: name "<<BIf>>"
## CHECK: predecessors "B0"
-## CHECK: successors "<<BEnterTry2:B\d+>>" "<<BThen:B\d+>>"
+## CHECK: successors "<<BSplit1:B\d+>>" "<<BThen:B\d+>>"
## CHECK: If
## CHECK: name "<<BThen>>"
@@ -145,19 +154,19 @@
## CHECK: Div
## CHECK: name "<<BTry2:B\d+>>"
-## CHECK: predecessors "<<BEnterTry2>>"
+## CHECK: predecessors "<<BEnterTry2:B\d+>>"
## CHECK: successors "<<BExitTry2:B\d+>>"
## CHECK: Div
## CHECK: name "<<BReturn:B\d+>>"
-## CHECK: predecessors "<<BExitTry2>>" "<<BCatch:B\d+>>"
+## CHECK: predecessors "<<BSplit3:B\d+>>" "<<BCatch:B\d+>>"
## CHECK: Return
## CHECK: name "<<BCatch>>"
## CHECK: predecessors "<<BEnterTry1>>" "<<BEnterTry2>>" "<<BExitTry1>>" "<<BExitTry2>>"
## CHECK: successors "<<BReturn>>"
## CHECK: flags "catch_block"
-## CHECK: StoreLocal [v0,<<Minus1>>]
+## CHECK: Goto
## CHECK: name "<<BEnterTry1>>"
## CHECK: predecessors "<<BThen>>"
@@ -166,23 +175,38 @@
## CHECK: TryBoundary kind:entry
## CHECK: name "<<BEnterTry2>>"
-## CHECK: predecessors "<<BIf>>" "<<BExitTry1>>"
+## CHECK: predecessors "<<BSplit1>>" "<<BSplit2:B\d+>>"
## CHECK: successors "<<BTry2>>"
## CHECK: xhandlers "<<BCatch>>"
## CHECK: TryBoundary kind:entry
## CHECK: name "<<BExitTry1>>"
## CHECK: predecessors "<<BTry1>>"
-## CHECK: successors "<<BEnterTry2>>"
+## CHECK: successors "<<BSplit2>>"
## CHECK: xhandlers "<<BCatch>>"
## CHECK: TryBoundary kind:exit
## CHECK: name "<<BExitTry2>>"
## CHECK: predecessors "<<BTry2>>"
-## CHECK: successors "<<BReturn>>"
+## CHECK: successors "<<BSplit3>>"
## CHECK: xhandlers "<<BCatch>>"
## CHECK: TryBoundary kind:exit
+## CHECK: name "<<BSplit1>>"
+## CHECK: predecessors "<<BIf>>"
+## CHECK: successors "<<BEnterTry2>>"
+## CHECK: Goto
+
+## CHECK: name "<<BSplit2>>"
+## CHECK: predecessors "<<BExitTry1>>"
+## CHECK: successors "<<BEnterTry2>>"
+## CHECK: Goto
+
+## CHECK: name "<<BSplit3>>"
+## CHECK: predecessors "<<BExitTry2>>"
+## CHECK: successors "<<BReturn>>"
+## CHECK: Goto
+
.method public static testMultipleEntries(IIII)I
.registers 4
@@ -220,23 +244,24 @@
## CHECK: name "<<BTry:B\d+>>"
## CHECK: predecessors "<<BEnterTry>>"
## CHECK: successors "<<BExitTry1:B\d+>>" "<<BExitTry2:B\d+>>"
-## CHECK: Div
+## CHECK: <<Div:i\d+>> Div
## CHECK: If
## CHECK: name "<<BReturn:B\d+>>"
-## CHECK: predecessors "<<BExitTry2>>" "<<BThen:B\d+>>" "<<BCatch:B\d+>>"
+## CHECK: predecessors "<<BSplit:B\d+>>" "<<BThen:B\d+>>" "<<BCatch:B\d+>>"
+## CHECK: Phi [<<Div>>,<<Minus1>>,<<Minus2>>]
## CHECK: Return
## CHECK: name "<<BThen>>"
## CHECK: predecessors "<<BExitTry1>>"
## CHECK: successors "<<BReturn>>"
-## CHECK: StoreLocal [v0,<<Minus1>>]
+## CHECK: Goto
## CHECK: name "<<BCatch>>"
## CHECK: predecessors "<<BEnterTry>>" "<<BExitTry1>>" "<<BExitTry2>>"
## CHECK: successors "<<BReturn>>"
## CHECK: flags "catch_block"
-## CHECK: StoreLocal [v0,<<Minus2>>]
+## CHECK: Goto
## CHECK: name "<<BEnterTry>>"
## CHECK: predecessors "B0"
@@ -252,10 +277,15 @@
## CHECK: name "<<BExitTry2>>"
## CHECK: predecessors "<<BTry>>"
-## CHECK: successors "<<BReturn>>"
+## CHECK: successors "<<BSplit>>"
## CHECK: xhandlers "<<BCatch>>"
## CHECK: TryBoundary kind:exit
+## CHECK: name "<<BSplit>>"
+## CHECK: predecessors "<<BExitTry2>>"
+## CHECK: successors "<<BReturn>>"
+## CHECK: Goto
+
.method public static testMultipleExits(II)I
.registers 2
@@ -295,23 +325,25 @@
## CHECK: name "<<BTry2:B\d+>>"
## CHECK: predecessors "<<BEnter2:B\d+>>"
## CHECK: successors "<<BExit2:B\d+>>"
-## CHECK: Div
+## CHECK: <<Div:i\d+>> Div
+## CHECK: Goto
## CHECK: name "<<BReturn:B\d+>>"
-## CHECK: predecessors "<<BExit2>>" "<<BCatch1:B\d+>>" "<<BCatch2:B\d+>>"
+## CHECK: predecessors "<<BSplit:B\d+>>" "<<BCatch1:B\d+>>" "<<BCatch2:B\d+>>"
+## CHECK: Phi [<<Div>>,<<Minus1>>,<<Minus2>>]
## CHECK: Return
## CHECK: name "<<BCatch1>>"
## CHECK: predecessors "<<BEnter1>>" "<<BExit1>>"
## CHECK: successors "<<BReturn>>"
## CHECK: flags "catch_block"
-## CHECK: StoreLocal [v0,<<Minus1>>]
+## CHECK: Goto
## CHECK: name "<<BCatch2>>"
## CHECK: predecessors "<<BEnter2>>" "<<BExit2>>"
## CHECK: successors "<<BReturn>>"
## CHECK: flags "catch_block"
-## CHECK: StoreLocal [v0,<<Minus2>>]
+## CHECK: Goto
## CHECK: name "<<BEnter1>>"
## CHECK: predecessors "B0"
@@ -333,10 +365,15 @@
## CHECK: name "<<BExit2>>"
## CHECK: predecessors "<<BTry2>>"
-## CHECK: successors "<<BReturn>>"
+## CHECK: successors "<<BSplit>>"
## CHECK: xhandlers "<<BCatch2>>"
## CHECK: TryBoundary kind:exit
+## CHECK: name "<<BSplit>>"
+## CHECK: predecessors "<<BExit2>>"
+## CHECK: successors "<<BReturn>>"
+## CHECK: Goto
+
.method public static testSharedBoundary(III)I
.registers 3
@@ -378,28 +415,31 @@
## CHECK: name "<<BTry1:B\d+>>"
## CHECK: predecessors "<<BEnter1:B\d+>>"
## CHECK: successors "<<BExit1:B\d+>>"
-## CHECK: Div
+## CHECK: <<Div:i\d+>> Div
+## CHECK: Goto
## CHECK: name "<<BTry2:B\d+>>"
## CHECK: predecessors "<<BEnter2>>"
## CHECK: successors "<<BExit2:B\d+>>"
## CHECK: Div
+## CHECK: Goto
## CHECK: name "<<BReturn:B\d+>>"
-## CHECK: predecessors "<<BExit1>>" "<<BCatch1:B\d+>>" "<<BCatch2:B\d+>>"
+## CHECK: predecessors "<<BSplit:B\d+>>" "<<BCatch1:B\d+>>" "<<BCatch2:B\d+>>"
+## CHECK: Phi [<<Div>>,<<Minus1>>,<<Minus2>>]
## CHECK: Return
## CHECK: name "<<BCatch1>>"
## CHECK: predecessors "<<BEnter1>>" "<<BExit1>>"
## CHECK: successors "<<BReturn>>"
## CHECK: flags "catch_block"
-## CHECK: StoreLocal [v0,<<Minus1>>]
+## CHECK: Goto
## CHECK: name "<<BCatch2>>"
## CHECK: predecessors "<<BEnter2>>" "<<BExit2>>"
## CHECK: successors "<<BReturn>>"
## CHECK: flags "catch_block"
-## CHECK: StoreLocal [v0,<<Minus2>>]
+## CHECK: Goto
## CHECK: name "<<BEnter1>>"
## CHECK: predecessors "<<BExit2>>"
@@ -415,7 +455,7 @@
## CHECK: name "<<BExit1>>"
## CHECK: predecessors "<<BTry1>>"
-## CHECK: successors "<<BReturn>>"
+## CHECK: successors "<<BSplit>>"
## CHECK: xhandlers "<<BCatch1>>"
## CHECK: TryBoundary kind:exit
@@ -425,6 +465,11 @@
## CHECK: xhandlers "<<BCatch2>>"
## CHECK: TryBoundary kind:exit
+## CHECK: name "<<BSplit>>"
+## CHECK: predecessors "<<BExit1>>"
+## CHECK: successors "<<BReturn>>"
+## CHECK: Goto
+
.method public static testSharedBoundary_Reverse(III)I
.registers 3
@@ -472,26 +517,30 @@
## CHECK: predecessors "<<BEnter2:B\d+>>"
## CHECK: successors "<<BExit2:B\d+>>"
## CHECK: Div
+## CHECK: Goto
## CHECK: name "<<BTry3:B\d+>>"
## CHECK: predecessors "<<BEnter3:B\d+>>"
## CHECK: successors "<<BExit3:B\d+>>"
-## CHECK: Div
+## CHECK: <<Div:i\d+>> Div
+## CHECK: Goto
## CHECK: name "<<BReturn:B\d+>>"
-## CHECK: predecessors "<<BExit3>>" "<<BCatchArith:B\d+>>" "<<BCatchAll:B\d+>>"
+## CHECK: predecessors "<<BSplit:B\d+>>" "<<BCatchArith:B\d+>>" "<<BCatchAll:B\d+>>"
+## CHECK: Phi [<<Div>>,<<Minus1>>,<<Minus2>>]
+## CHECK: Return
## CHECK: name "<<BCatchArith>>"
## CHECK: predecessors "<<BEnter2>>" "<<BExit2>>"
## CHECK: successors "<<BReturn>>"
## CHECK: flags "catch_block"
-## CHECK: StoreLocal [v0,<<Minus1>>]
+## CHECK: Goto
## CHECK: name "<<BCatchAll>>"
## CHECK: predecessors "<<BEnter1>>" "<<BEnter2>>" "<<BEnter3>>" "<<BExit1>>" "<<BExit2>>" "<<BExit3>>"
## CHECK: successors "<<BReturn>>"
## CHECK: flags "catch_block"
-## CHECK: StoreLocal [v0,<<Minus2>>]
+## CHECK: Goto
## CHECK: name "<<BEnter1>>"
## CHECK: predecessors "B0"
@@ -525,10 +574,15 @@
## CHECK: name "<<BExit3>>"
## CHECK: predecessors "<<BTry3>>"
-## CHECK: successors "<<BReturn>>"
+## CHECK: successors "<<BSplit>>"
## CHECK: xhandlers "<<BCatchAll>>"
## CHECK: TryBoundary kind:exit
+## CHECK: name "<<BSplit>>"
+## CHECK: predecessors "<<BExit3>>"
+## CHECK: successors "<<BReturn>>"
+## CHECK: Goto
+
.method public static testNestedTry(IIII)I
.registers 4
@@ -567,14 +621,18 @@
## CHECK: predecessors "<<BEnterTry1:B\d+>>"
## CHECK: successors "<<BExitTry1:B\d+>>"
## CHECK: Div
+## CHECK: Goto
## CHECK: name "<<BTry2:B\d+>>"
## CHECK: predecessors "<<BEnterTry2:B\d+>>"
## CHECK: successors "<<BExitTry2:B\d+>>"
-## CHECK: Div
+## CHECK: <<Div:i\d+>> Div
+## CHECK: Goto
## CHECK: name "<<BReturn:B\d+>>"
-## CHECK: predecessors "<<BExitTry2>>" "<<BCatch:B\d+>>"
+## CHECK: predecessors "<<BSplit:B\d+>>" "<<BCatch:B\d+>>"
+## CHECK: Phi [<<Div>>,<<Minus1>>]
+## CHECK: Return
## CHECK: name "<<BOutside:B\d+>>"
## CHECK: predecessors "<<BExitTry1>>"
@@ -585,7 +643,7 @@
## CHECK: predecessors "<<BEnterTry1>>" "<<BEnterTry2>>" "<<BExitTry1>>" "<<BExitTry2>>"
## CHECK: successors "<<BReturn>>"
## CHECK: flags "catch_block"
-## CHECK: StoreLocal [v0,<<Minus1>>]
+## CHECK: Goto
## CHECK: name "<<BEnterTry1>>"
## CHECK: predecessors "B0"
@@ -607,10 +665,15 @@
## CHECK: name "<<BExitTry2>>"
## CHECK: predecessors "<<BTry2>>"
-## CHECK: successors "<<BReturn>>"
+## CHECK: successors "<<BSplit>>"
## CHECK: xhandlers "<<BCatch>>"
## CHECK: TryBoundary kind:exit
+## CHECK: name "<<BSplit>>"
+## CHECK: predecessors "<<BExitTry2>>"
+## CHECK: successors "<<BReturn>>"
+## CHECK: Goto
+
.method public static testIncontinuousTry(IIII)I
.registers 4
@@ -642,12 +705,12 @@
## CHECK: name "<<BPSwitch0>>"
## CHECK: predecessors "B0"
-## CHECK: successors "<<BEnterTry2:B\d+>>" "<<BPSwitch1:B\d+>>"
+## CHECK: successors "<<BSplit1:B\d+>>" "<<BPSwitch1:B\d+>>"
## CHECK: If
## CHECK: name "<<BPSwitch1>>"
## CHECK: predecessors "<<BPSwitch0>>"
-## CHECK: successors "<<BOutside:B\d+>>" "<<BEnterTry1:B\d+>>"
+## CHECK: successors "<<BSplit2:B\d+>>" "<<BEnterTry1:B\d+>>"
## CHECK: If
## CHECK: name "<<BTry1:B\d+>>"
@@ -656,44 +719,68 @@
## CHECK: Div
## CHECK: name "<<BTry2:B\d+>>"
-## CHECK: predecessors "<<BEnterTry2>>"
+## CHECK: predecessors "<<BEnterTry2:B\d+>>"
## CHECK: successors "<<BExitTry2:B\d+>>"
## CHECK: Div
-## CHECK: name "<<BOutside>>"
-## CHECK: predecessors "<<BPSwitch1>>" "<<BExitTry2>>"
-## CHECK: successors "<<BCatchReturn:B\d+>>"
+## CHECK: name "<<BOutside:B\d+>>"
+## CHECK: predecessors "<<BSplit2>>" "<<BSplit4:B\d+>>"
+## CHECK: successors "<<BReturn:B\d+>>"
## CHECK: Div
-## CHECK: name "<<BCatchReturn>>"
-## CHECK: predecessors "<<BOutside>>" "<<BEnterTry1>>" "<<BEnterTry2>>" "<<BExitTry1>>" "<<BExitTry2>>"
+## CHECK: name "<<BCatch:B\d+>>"
+## CHECK: predecessors "<<BEnterTry1>>" "<<BEnterTry2>>" "<<BExitTry1>>" "<<BExitTry2>>"
## CHECK: flags "catch_block"
-## CHECK: Return
+## CHECK: Goto
## CHECK: name "<<BEnterTry1>>"
## CHECK: predecessors "<<BPSwitch1>>"
## CHECK: successors "<<BTry1>>"
-## CHECK: xhandlers "<<BCatchReturn>>"
+## CHECK: xhandlers "<<BCatch>>"
## CHECK: TryBoundary kind:entry
## CHECK: name "<<BEnterTry2>>"
-## CHECK: predecessors "<<BPSwitch0>>"
+## CHECK: predecessors "<<BSplit1>>" "<<BSplit3:B\d+>>"
## CHECK: successors "<<BTry2>>"
-## CHECK: xhandlers "<<BCatchReturn>>"
+## CHECK: xhandlers "<<BCatch>>"
## CHECK: TryBoundary kind:entry
## CHECK: name "<<BExitTry1>>"
## CHECK: predecessors "<<BTry1>>"
-## CHECK: successors "<<BEnterTry2>>"
-## CHECK: xhandlers "<<BCatchReturn>>"
+## CHECK: successors "<<BSplit3>>"
+## CHECK: xhandlers "<<BCatch>>"
## CHECK: TryBoundary kind:exit
## CHECK: name "<<BExitTry2>>"
## CHECK: predecessors "<<BTry2>>"
-## CHECK: successors "<<BOutside>>"
-## CHECK: xhandlers "<<BCatchReturn>>"
+## CHECK: successors "<<BSplit4>>"
+## CHECK: xhandlers "<<BCatch>>"
## CHECK: TryBoundary kind:exit
+## CHECK: name "<<BReturn>>"
+## CHECK: predecessors "<<BCatch>>" "<<BOutside>>"
+## CHECK: Return
+
+## CHECK: name "<<BSplit1>>"
+## CHECK: predecessors "<<BPSwitch0>>"
+## CHECK: successors "<<BEnterTry2>>"
+## CHECK: Goto
+
+## CHECK: name "<<BSplit2>>"
+## CHECK: predecessors "<<BPSwitch1>>"
+## CHECK: successors "<<BOutside>>"
+## CHECK: Goto
+
+## CHECK: name "<<BSplit3>>"
+## CHECK: predecessors "<<BExitTry1>>"
+## CHECK: successors "<<BEnterTry2>>"
+## CHECK: Goto
+
+## CHECK: name "<<BSplit4>>"
+## CHECK: predecessors "<<BExitTry2>>"
+## CHECK: successors "<<BOutside>>"
+## CHECK: Goto
+
.method public static testSwitchTryEnter(IIII)I
.registers 4
@@ -728,58 +815,78 @@
## CHECK: name "<<BPSwitch0:B\d+>>"
## CHECK: predecessors "<<BEnterTry1>>"
-## CHECK: successors "<<BTry2:B\d+>>" "<<BExitTry1:B\d+>>"
+## CHECK: successors "<<BSplit1:B\d+>>" "<<BExitTry1:B\d+>>"
## CHECK: If
## CHECK: name "<<BPSwitch1:B\d+>>"
## CHECK: predecessors "<<BExitTry1>>"
-## CHECK: successors "<<BOutside:B\d+>>" "<<BEnterTry2:B\d+>>"
+## CHECK: successors "<<BSplit2:B\d+>>" "<<BEnterTry2:B\d+>>"
## CHECK: If
## CHECK: name "<<BTry1:B\d+>>"
## CHECK: predecessors "<<BEnterTry2>>"
-## CHECK: successors "<<BTry2>>"
+## CHECK: successors "<<BTry2:B\d+>>"
## CHECK: Div
## CHECK: name "<<BTry2>>"
-## CHECK: predecessors "<<BPSwitch0>>"
+## CHECK: predecessors "<<BSplit1>>" "<<BTry1>>"
## CHECK: successors "<<BExitTry2:B\d+>>"
## CHECK: Div
-## CHECK: name "<<BOutside>>"
-## CHECK: predecessors "<<BPSwitch1>>" "<<BExitTry2>>"
-## CHECK: successors "<<BCatchReturn:B\d+>>"
+## CHECK: name "<<BOutside:B\d+>>"
+## CHECK: predecessors "<<BSplit2>>" "<<BSplit3:B\d+>>"
+## CHECK: successors "<<BReturn:B\d+>>"
## CHECK: Div
-## CHECK: name "<<BCatchReturn>>"
-## CHECK: predecessors "<<BOutside>>" "<<BEnterTry1>>" "<<BEnterTry2>>" "<<BExitTry1>>" "<<BExitTry2>>"
+## CHECK: name "<<BCatch:B\d+>>"
+## CHECK: predecessors "<<BEnterTry1>>" "<<BEnterTry2>>" "<<BExitTry1>>" "<<BExitTry2>>"
+## CHECK: successors "<<BReturn>>"
## CHECK: flags "catch_block"
-## CHECK: Return
+## CHECK: Goto
## CHECK: name "<<BEnterTry1>>"
## CHECK: predecessors "B0"
## CHECK: successors "<<BPSwitch0>>"
-## CHECK: xhandlers "<<BCatchReturn>>"
+## CHECK: xhandlers "<<BCatch>>"
## CHECK: TryBoundary kind:entry
## CHECK: name "<<BEnterTry2>>"
## CHECK: predecessors "<<BPSwitch1>>"
## CHECK: successors "<<BTry1>>"
-## CHECK: xhandlers "<<BCatchReturn>>"
+## CHECK: xhandlers "<<BCatch>>"
## CHECK: TryBoundary kind:entry
## CHECK: name "<<BExitTry1>>"
## CHECK: predecessors "<<BPSwitch0>>"
## CHECK: successors "<<BPSwitch1>>"
-## CHECK: xhandlers "<<BCatchReturn>>"
+## CHECK: xhandlers "<<BCatch>>"
## CHECK: TryBoundary kind:exit
## CHECK: name "<<BExitTry2>>"
## CHECK: predecessors "<<BTry2>>"
-## CHECK: successors "<<BOutside>>"
-## CHECK: xhandlers "<<BCatchReturn>>"
+## CHECK: successors "<<BSplit3>>"
+## CHECK: xhandlers "<<BCatch>>"
## CHECK: TryBoundary kind:exit
+## CHECK: name "<<BReturn>>"
+## CHECK: predecessors "<<BCatch>>" "<<BOutside>>"
+## CHECK: Return
+
+## CHECK: name "<<BSplit1>>"
+## CHECK: predecessors "<<BPSwitch0>>"
+## CHECK: successors "<<BTry2>>"
+## CHECK: Goto
+
+## CHECK: name "<<BSplit2>>"
+## CHECK: predecessors "<<BPSwitch1>>"
+## CHECK: successors "<<BOutside>>"
+## CHECK: Goto
+
+## CHECK: name "<<BSplit3>>"
+## CHECK: predecessors "<<BExitTry2>>"
+## CHECK: successors "<<BOutside>>"
+## CHECK: Goto
+
.method public static testSwitchTryExit(IIII)I
.registers 4
@@ -825,7 +932,7 @@
## CHECK: predecessors "<<BEnterTry>>" "<<BExitTry>>"
## CHECK: successors "<<BExit:B\d+>>"
## CHECK: flags "catch_block"
-## CHECK: StoreLocal [v0,<<Minus1>>]
+## CHECK: Return [<<Minus1>>]
## CHECK: name "<<BExit>>"
## CHECK: predecessors "<<BExitTry>>" "<<BCatch>>"
@@ -861,18 +968,21 @@
## CHECK-START: int Builder.testCatchLoop(int, int, int) builder (after)
## CHECK: name "B0"
-## CHECK: successors "<<BCatch:B\d+>>"
+## CHECK: successors "<<BSplit2:B\d+>>"
-## CHECK: name "<<BCatch>>"
-## CHECK: predecessors "B0" "<<BEnterTry:B\d+>>" "<<BExitTry:B\d+>>"
-## CHECK: successors "<<BEnterTry>>"
+## CHECK: name "<<BCatch:B\d+>>"
+## CHECK: predecessors "<<BEnterTry:B\d+>>" "<<BExitTry:B\d+>>"
+## CHECK: successors "<<BSplit1:B\d+>>"
## CHECK: flags "catch_block"
## CHECK: name "<<BReturn:B\d+>>"
## CHECK: predecessors "<<BExitTry>>"
## CHECK: successors "<<BExit:B\d+>>"
+## CHECK: Return
## CHECK: name "<<BExit>>"
+## CHECK: predecessors "<<BReturn>>"
+## CHECK: Exit
## CHECK: name "<<BTry:B\d+>>"
## CHECK: predecessors "<<BEnterTry>>"
@@ -880,7 +990,7 @@
## CHECK: Div
## CHECK: name "<<BEnterTry>>"
-## CHECK: predecessors "<<BCatch>>"
+## CHECK: predecessors "<<BSplit1>>"
## CHECK: successors "<<BTry>>"
## CHECK: xhandlers "<<BCatch>>"
## CHECK: TryBoundary kind:entry
@@ -891,6 +1001,16 @@
## CHECK: xhandlers "<<BCatch>>"
## CHECK: TryBoundary kind:exit
+## CHECK: name "<<BSplit1>>"
+## CHECK: predecessors "<<BSplit2>>" "<<BCatch>>"
+## CHECK: successors "<<BEnterTry>>"
+## CHECK: Goto
+
+## CHECK: name "<<BSplit2>>"
+## CHECK: predecessors "B0"
+## CHECK: successors "<<BSplit1>>"
+## CHECK: Goto
+
.method public static testCatchLoop(III)I
.registers 4
@@ -918,14 +1038,16 @@
## CHECK: Div
## CHECK: name "<<BCatch:B\d+>>"
-## CHECK: predecessors "<<BExitTry1>>" "<<BEnterTry1>>" "<<BEnterTry2:B\d+>>" "<<BExitTry1>>" "<<BExitTry2:B\d+>>"
-## CHECK: successors "<<BEnterTry2>>"
+## CHECK: predecessors "<<BEnterTry1>>" "<<BEnterTry2:B\d+>>" "<<BExitTry1>>" "<<BExitTry2:B\d+>>"
+## CHECK: successors "<<BSplit1:B\d+>>"
## CHECK: flags "catch_block"
## CHECK: name "<<BReturn:B\d+>>"
## CHECK: predecessors "<<BExitTry2>>"
+## CHECK: successors "<<BExit:B\d+>>"
-## CHECK: name "{{B\d+}}"
+## CHECK: name "<<BExit>>"
+## CHECK: predecessors "<<BReturn>>"
## CHECK: Exit
## CHECK: name "<<BTry2:B\d+>>"
@@ -940,14 +1062,14 @@
## CHECK: TryBoundary kind:entry
## CHECK: name "<<BEnterTry2>>"
-## CHECK: predecessors "<<BCatch>>"
+## CHECK: predecessors "<<BSplit1>>"
## CHECK: successors "<<BTry2>>"
## CHECK: xhandlers "<<BCatch>>"
## CHECK: TryBoundary kind:entry
## CHECK: name "<<BExitTry1>>"
## CHECK: predecessors "<<BTry1>>"
-## CHECK: successors "<<BCatch>>"
+## CHECK: successors "<<BSplit2:B\d+>>"
## CHECK: xhandlers "<<BCatch>>"
## CHECK: TryBoundary kind:exit
@@ -957,6 +1079,16 @@
## CHECK: xhandlers "<<BCatch>>"
## CHECK: TryBoundary kind:exit
+## CHECK: name "<<BSplit1>>"
+## CHECK: predecessors "<<BSplit2>>" "<<BCatch>>"
+## CHECK: successors "<<BEnterTry2>>"
+## CHECK: Goto
+
+## CHECK: name "<<BSplit2>>"
+## CHECK: predecessors "<<BExitTry1>>"
+## CHECK: successors "<<BSplit1>>"
+## CHECK: Goto
+
.method public static testHandlerEdge1(III)I
.registers 4
@@ -977,16 +1109,16 @@
## CHECK-START: int Builder.testHandlerEdge2(int, int, int) builder (after)
## CHECK: name "B0"
-## CHECK: successors "<<BCatch1:B\d+>>"
+## CHECK: successors "<<BSplit4:B\d+>>"
-## CHECK: name "<<BCatch1>>"
-## CHECK: predecessors "B0" "<<BEnterTry2:B\d+>>" "<<BExitTry2:B\d+>>"
-## CHECK: successors "<<BEnterTry1:B\d+>>"
+## CHECK: name "<<BCatch1:B\d+>>"
+## CHECK: predecessors "<<BEnterTry2:B\d+>>" "<<BExitTry2:B\d+>>"
+## CHECK: successors "<<BSplit1:B\d+>>"
## CHECK: flags "catch_block"
## CHECK: name "<<BCatch2:B\d+>>"
-## CHECK: predecessors "<<BExitTry1:B\d+>>" "<<BEnterTry1>>" "<<BExitTry1>>"
-## CHECK: successors "<<BEnterTry2>>"
+## CHECK: predecessors "<<BEnterTry1:B\d+>>" "<<BExitTry1:B\d+>>"
+## CHECK: successors "<<BSplit2:B\d+>>"
## CHECK: flags "catch_block"
## CHECK: name "<<BReturn:B\d+>>"
@@ -995,6 +1127,7 @@
## CHECK: Return
## CHECK: name "<<BExit>>"
+## CHECK: Exit
## CHECK: name "<<BTry1:B\d+>>"
## CHECK: predecessors "<<BEnterTry1>>"
@@ -1007,20 +1140,20 @@
## CHECK: Div
## CHECK: name "<<BEnterTry1>>"
-## CHECK: predecessors "<<BCatch1>>"
+## CHECK: predecessors "<<BSplit1>>"
## CHECK: successors "<<BTry1>>"
## CHECK: xhandlers "<<BCatch2>>"
## CHECK: TryBoundary kind:entry
## CHECK: name "<<BEnterTry2>>"
-## CHECK: predecessors "<<BCatch2>>"
+## CHECK: predecessors "<<BSplit2>>"
## CHECK: successors "<<BTry2>>"
## CHECK: xhandlers "<<BCatch1>>"
## CHECK: TryBoundary kind:entry
## CHECK: name "<<BExitTry1>>"
## CHECK: predecessors "<<BTry1>>"
-## CHECK: successors "<<BCatch2>>"
+## CHECK: successors "<<BSplit3:B\d+>>"
## CHECK: xhandlers "<<BCatch2>>"
## CHECK: TryBoundary kind:exit
@@ -1030,6 +1163,26 @@
## CHECK: xhandlers "<<BCatch1>>"
## CHECK: TryBoundary kind:exit
+## CHECK: name "<<BSplit1>>"
+## CHECK: predecessors "<<BSplit4>>" "<<BCatch1>>"
+## CHECK: successors "<<BEnterTry1>>"
+## CHECK: Goto
+
+## CHECK: name "<<BSplit2>>"
+## CHECK: predecessors "<<BCatch2>>" "<<BSplit3>>"
+## CHECK: successors "<<BEnterTry2>>"
+## CHECK: Goto
+
+## CHECK: name "<<BSplit3>>"
+## CHECK: predecessors "<<BExitTry1>>"
+## CHECK: successors "<<BSplit2>>"
+## CHECK: Goto
+
+## CHECK: name "<<BSplit4>>"
+## CHECK: predecessors "B0"
+## CHECK: successors "<<BSplit1>>"
+## CHECK: Goto
+
.method public static testHandlerEdge2(III)I
.registers 4
@@ -1053,10 +1206,10 @@
## CHECK-START: int Builder.testTryInLoop(int, int) builder (after)
## CHECK: name "B0"
-## CHECK: successors "<<BEnterTry:B\d+>>"
+## CHECK: successors "<<BSplit1:B\d+>>"
## CHECK: name "<<BTry:B\d+>>"
-## CHECK: predecessors "<<BEnterTry>>"
+## CHECK: predecessors "<<BEnterTry:B\d+>>"
## CHECK: successors "<<BExitTry:B\d+>>"
## CHECK: Div
@@ -1065,22 +1218,28 @@
## CHECK: successors "<<BEnterTry>>"
## CHECK: flags "catch_block"
-## CHECK: name "<<BExit:B\d+>>"
-## CHECK-NOT: predecessors "{{B\d+}}"
-## CHECK: end_block
-
## CHECK: name "<<BEnterTry>>"
-## CHECK: predecessors "B0"
+## CHECK: predecessors "<<BSplit1>>"
## CHECK: successors "<<BTry>>"
## CHECK: xhandlers "<<BCatch>>"
## CHECK: TryBoundary kind:entry
## CHECK: name "<<BExitTry>>"
## CHECK: predecessors "<<BTry>>"
-## CHECK: successors "<<BEnterTry>>"
+## CHECK: successors "<<BSplit2:B\d+>>"
## CHECK: xhandlers "<<BCatch>>"
## CHECK: TryBoundary kind:exit
+## CHECK: name "<<BSplit1>>"
+## CHECK: predecessors "B0"
+## CHECK: successors "<<BEnterTry>>"
+## CHECK: Goto
+
+## CHECK: name "<<BSplit2>>"
+## CHECK: predecessors "<<BExitTry>>"
+## CHECK: successors "<<BEnterTry>>"
+## CHECK: Goto
+
.method public static testTryInLoop(II)I
.registers 3
@@ -1098,9 +1257,10 @@
# INVOKE it follows, even if there is a try boundary between them.
## CHECK-START: int Builder.testMoveResult_Invoke(int, int, int) builder (after)
-
-## CHECK: <<Res:i\d+>> InvokeStaticOrDirect
-## CHECK-NEXT: StoreLocal [v0,<<Res>>]
+## CHECK-DAG: <<M1:i\d+>> IntConstant -1
+## CHECK-DAG: <<Res:i\d+>> InvokeStaticOrDirect
+## CHECK-DAG: <<Phi:i\d+>> Phi [<<Res>>,<<M1>>]
+## CHECK-DAG: Return [<<Phi>>]
.method public static testMoveResult_Invoke(III)I
.registers 3
@@ -1124,16 +1284,16 @@
# FILLED_NEW_ARRAY it follows, even if there is a try boundary between them.
## CHECK-START: int[] Builder.testMoveResult_FilledNewArray(int, int, int) builder (after)
-
-## CHECK: <<Res:l\d+>> NewArray
-## CHECK-NEXT: Temporary
-## CHECK-NEXT: <<Local1:i\d+>> LoadLocal [v0]
-## CHECK-NEXT: ArraySet [<<Res>>,{{i\d+}},<<Local1>>]
-## CHECK-NEXT: <<Local2:i\d+>> LoadLocal [v1]
-## CHECK-NEXT: ArraySet [<<Res>>,{{i\d+}},<<Local2>>]
-## CHECK-NEXT: <<Local3:i\d+>> LoadLocal [v2]
-## CHECK-NEXT: ArraySet [<<Res>>,{{i\d+}},<<Local3>>]
-## CHECK-NEXT: StoreLocal [v0,<<Res>>]
+## CHECK-DAG: <<Arg1:i\d+>> ParameterValue
+## CHECK-DAG: <<Arg2:i\d+>> ParameterValue
+## CHECK-DAG: <<Arg3:i\d+>> ParameterValue
+## CHECK-DAG: <<Null:l\d+>> NullConstant
+## CHECK-DAG: <<Res:l\d+>> NewArray
+## CHECK-DAG: ArraySet [<<Res>>,{{i\d+}},<<Arg1>>]
+## CHECK-DAG: ArraySet [<<Res>>,{{i\d+}},<<Arg2>>]
+## CHECK-DAG: ArraySet [<<Res>>,{{i\d+}},<<Arg3>>]
+## CHECK-DAG: <<Phi:l\d+>> Phi [<<Res>>,<<Null>>]
+## CHECK-DAG: Return [<<Phi>>]
.method public static testMoveResult_FilledNewArray(III)[I
.registers 3
diff --git a/test/510-checker-try-catch/smali/SsaBuilder.smali b/test/510-checker-try-catch/smali/SsaBuilder.smali
index a6a5bfebee..1fd5fb2dd6 100644
--- a/test/510-checker-try-catch/smali/SsaBuilder.smali
+++ b/test/510-checker-try-catch/smali/SsaBuilder.smali
@@ -19,7 +19,7 @@
# Tests that catch blocks with both normal and exceptional predecessors are
# split in two.
-## CHECK-START: int SsaBuilder.testSimplifyCatchBlock(int, int, int) ssa_builder (after)
+## CHECK-START: int SsaBuilder.testSimplifyCatchBlock(int, int, int) builder (after)
## CHECK: name "B1"
## CHECK-NEXT: from_bci
@@ -62,7 +62,7 @@
# Should be rejected because :catch_all is a loop header.
-## CHECK-START: int SsaBuilder.testCatchLoopHeader(int, int, int) ssa_builder (after, bad_state)
+## CHECK-START: int SsaBuilder.testCatchLoopHeader(int, int, int) builder (after, bad_state)
.method public static testCatchLoopHeader(III)I
.registers 4
@@ -84,7 +84,7 @@
# Tests creation of catch Phis.
-## CHECK-START: int SsaBuilder.testPhiCreation(int, int, int) ssa_builder (after)
+## CHECK-START: int SsaBuilder.testPhiCreation(int, int, int) builder (after)
## CHECK-DAG: <<P0:i\d+>> ParameterValue
## CHECK-DAG: <<P1:i\d+>> ParameterValue
## CHECK-DAG: <<P2:i\d+>> ParameterValue
@@ -127,7 +127,7 @@
# Tests that phi elimination does not remove catch phis where the value does
# not dominate the phi.
-## CHECK-START: int SsaBuilder.testPhiElimination_Domination(int, int) ssa_builder (after)
+## CHECK-START: int SsaBuilder.testPhiElimination_Domination(int, int) builder (after)
## CHECK-DAG: <<P0:i\d+>> ParameterValue
## CHECK-DAG: <<P1:i\d+>> ParameterValue
## CHECK-DAG: <<Cst5:i\d+>> IntConstant 5
@@ -168,7 +168,7 @@
# Tests that phi elimination loops until no more phis can be removed.
-## CHECK-START: int SsaBuilder.testPhiElimination_Dependencies(int, int, int) ssa_builder (after)
+## CHECK-START: int SsaBuilder.testPhiElimination_Dependencies(int, int, int) builder (after)
## CHECK-NOT: Phi
.method public static testPhiElimination_Dependencies(III)I
@@ -200,10 +200,7 @@
# Tests that dead catch blocks are removed.
-## CHECK-START: int SsaBuilder.testDeadCatchBlock(int, int, int) ssa_builder (before)
-## CHECK: Mul
-
-## CHECK-START: int SsaBuilder.testDeadCatchBlock(int, int, int) ssa_builder (after)
+## CHECK-START: int SsaBuilder.testDeadCatchBlock(int, int, int) builder (after)
## CHECK-DAG: <<P0:i\d+>> ParameterValue
## CHECK-DAG: <<P1:i\d+>> ParameterValue
## CHECK-DAG: <<P2:i\d+>> ParameterValue
@@ -211,7 +208,7 @@
## CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<P2>>]
## CHECK-DAG: Return [<<Add2>>]
-## CHECK-START: int SsaBuilder.testDeadCatchBlock(int, int, int) ssa_builder (after)
+## CHECK-START: int SsaBuilder.testDeadCatchBlock(int, int, int) builder (after)
## CHECK-NOT: flags "catch_block"
## CHECK-NOT: Mul
diff --git a/test/517-checker-builder-fallthrough/smali/TestCase.smali b/test/517-checker-builder-fallthrough/smali/TestCase.smali
index bc9502b31a..946f169948 100644
--- a/test/517-checker-builder-fallthrough/smali/TestCase.smali
+++ b/test/517-checker-builder-fallthrough/smali/TestCase.smali
@@ -25,8 +25,8 @@
## CHECK: name "B1"
## CHECK: successors "B5" "B2"
-## CHECK: StoreLocal [v0,<<Const0>>]
-## CHECK: If
+## CHECK: <<Cond:z\d+>> Equal [<<Const0>>,<<Const0>>]
+## CHECK: If [<<Cond>>]
## CHECK: name "B2"
## CHECK: successors "B4"
diff --git a/test/523-checker-can-throw-regression/smali/Test.smali b/test/523-checker-can-throw-regression/smali/Test.smali
index 87192ea123..4b737a9057 100644
--- a/test/523-checker-can-throw-regression/smali/Test.smali
+++ b/test/523-checker-can-throw-regression/smali/Test.smali
@@ -46,8 +46,10 @@
div-int/2addr p0, p1
:else
div-int/2addr p0, p2
- return p0
:try_end_2
- .catchall {:try_start_2 .. :try_end_2} :catchall
+ .catchall {:try_start_2 .. :try_end_2} :catchall2
+
+ :catchall2
+ return p0
.end method
diff --git a/test/530-checker-lse/src/Main.java b/test/530-checker-lse/src/Main.java
index d647683869..4d6ea06fe0 100644
--- a/test/530-checker-lse/src/Main.java
+++ b/test/530-checker-lse/src/Main.java
@@ -664,28 +664,19 @@ public class Main {
System.out.println("testFinalizableByForcingGc() failed to force gc.");
}
- /// CHECK-START: int Main.testHSelect(boolean) load_store_elimination (before)
+ /// CHECK-START: int Main.$noinline$testHSelect(boolean) load_store_elimination (before)
/// CHECK: InstanceFieldSet
/// CHECK: Select
- /// CHECK-START: int Main.testHSelect(boolean) load_store_elimination (after)
+ /// CHECK-START: int Main.$noinline$testHSelect(boolean) load_store_elimination (after)
/// CHECK: InstanceFieldSet
/// CHECK: Select
// Test that HSelect creates alias.
- public static int testHSelect(boolean b) {
- // Disable inlining.
- System.out.print("");
- System.out.print("");
- System.out.print("");
- System.out.print("");
- System.out.print("");
- System.out.print("");
- System.out.print("");
- System.out.print("");
- System.out.print("");
- System.out.print("");
-
+ public static int $noinline$testHSelect(boolean b) {
+ if (sFlag) {
+ throw new Error();
+ }
TestClass obj = new TestClass();
TestClass obj2 = null;
obj.i = 0xdead;
@@ -754,6 +745,8 @@ public class Main {
assertIntEquals(test23(false), 5);
assertFloatEquals(test24(), 8.0f);
testFinalizableByForcingGc();
- assertIntEquals(testHSelect(true), 0xdead);
+ assertIntEquals($noinline$testHSelect(true), 0xdead);
}
+
+ static boolean sFlag;
}
diff --git a/test/537-checker-debuggable/smali/TestCase.smali b/test/537-checker-debuggable/smali/TestCase.smali
index 8e6c7ef727..5714d3aeae 100644
--- a/test/537-checker-debuggable/smali/TestCase.smali
+++ b/test/537-checker-debuggable/smali/TestCase.smali
@@ -20,10 +20,10 @@
# be eliminated in normal mode but kept live in debuggable mode. Test that
# Checker runs the correct test for each compilation mode.
-## CHECK-START: int TestCase.deadPhi(int, int, int) ssa_builder (after)
+## CHECK-START: int TestCase.deadPhi(int, int, int) builder (after)
## CHECK-NOT: Phi
-## CHECK-START-DEBUGGABLE: int TestCase.deadPhi(int, int, int) ssa_builder (after)
+## CHECK-START-DEBUGGABLE: int TestCase.deadPhi(int, int, int) builder (after)
## CHECK: Phi
.method public static deadPhi(III)I
diff --git a/test/540-checker-rtp-bug/src/Main.java b/test/540-checker-rtp-bug/src/Main.java
index 9a9f0b6048..17b11db295 100644
--- a/test/540-checker-rtp-bug/src/Main.java
+++ b/test/540-checker-rtp-bug/src/Main.java
@@ -21,7 +21,7 @@ final class Final {
}
public class Main {
- /// CHECK-START: Final Main.testKeepCheckCast(java.lang.Object, boolean) ssa_builder (after)
+ /// CHECK-START: Final Main.testKeepCheckCast(java.lang.Object, boolean) builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:java.lang.Object
/// CHECK: <<Class:l\d+>> LoadClass
/// CHECK: CheckCast [<<Phi>>,<<Class>>]
@@ -43,7 +43,7 @@ public class Main {
return (Final) x;
}
- /// CHECK-START: void Main.testKeepInstanceOf(java.lang.Object, boolean) ssa_builder (after)
+ /// CHECK-START: void Main.testKeepInstanceOf(java.lang.Object, boolean) builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:java.lang.Object
/// CHECK: <<Class:l\d+>> LoadClass
/// CHECK: InstanceOf [<<Phi>>,<<Class>>]
@@ -65,7 +65,7 @@ public class Main {
}
}
- /// CHECK-START: java.lang.String Main.testNoInline(java.lang.Object, boolean) ssa_builder (after)
+ /// CHECK-START: java.lang.String Main.testNoInline(java.lang.Object, boolean) builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:java.lang.Object
/// CHECK: <<NC:l\d+>> NullCheck [<<Phi>>]
/// CHECK: <<Ret:l\d+>> InvokeVirtual [<<NC>>] method_name:java.lang.Object.toString
diff --git a/test/549-checker-types-merge/src/Main.java b/test/549-checker-types-merge/src/Main.java
index 917073b1c9..51af3cff10 100644
--- a/test/549-checker-types-merge/src/Main.java
+++ b/test/549-checker-types-merge/src/Main.java
@@ -38,14 +38,14 @@ class ClassImplementsInterfaceA extends ClassSuper implements InterfaceA {}
public class Main {
- /// CHECK-START: java.lang.Object Main.testMergeNullContant(boolean) ssa_builder (after)
+ /// CHECK-START: java.lang.Object Main.testMergeNullContant(boolean) builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:Main
/// CHECK: Return [<<Phi>>]
private Object testMergeNullContant(boolean cond) {
return cond ? null : new Main();
}
- /// CHECK-START: java.lang.Object Main.testMergeClasses(boolean, ClassExtendsA, ClassExtendsB) ssa_builder (after)
+ /// CHECK-START: java.lang.Object Main.testMergeClasses(boolean, ClassExtendsA, ClassExtendsB) builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:ClassSuper
/// CHECK: Return [<<Phi>>]
private Object testMergeClasses(boolean cond, ClassExtendsA a, ClassExtendsB b) {
@@ -53,7 +53,7 @@ public class Main {
return cond ? a : b;
}
- /// CHECK-START: java.lang.Object Main.testMergeClasses(boolean, ClassExtendsA, ClassSuper) ssa_builder (after)
+ /// CHECK-START: java.lang.Object Main.testMergeClasses(boolean, ClassExtendsA, ClassSuper) builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:ClassSuper
/// CHECK: Return [<<Phi>>]
private Object testMergeClasses(boolean cond, ClassExtendsA a, ClassSuper b) {
@@ -61,7 +61,7 @@ public class Main {
return cond ? a : b;
}
- /// CHECK-START: java.lang.Object Main.testMergeClasses(boolean, ClassSuper, ClassSuper) ssa_builder (after)
+ /// CHECK-START: java.lang.Object Main.testMergeClasses(boolean, ClassSuper, ClassSuper) builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:ClassSuper
/// CHECK: Return [<<Phi>>]
private Object testMergeClasses(boolean cond, ClassSuper a, ClassSuper b) {
@@ -69,7 +69,7 @@ public class Main {
return cond ? a : b;
}
- /// CHECK-START: java.lang.Object Main.testMergeClasses(boolean, ClassOtherSuper, ClassSuper) ssa_builder (after)
+ /// CHECK-START: java.lang.Object Main.testMergeClasses(boolean, ClassOtherSuper, ClassSuper) builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:java.lang.Object
/// CHECK: Return [<<Phi>>]
private Object testMergeClasses(boolean cond, ClassOtherSuper a, ClassSuper b) {
@@ -77,7 +77,7 @@ public class Main {
return cond ? a : b;
}
- /// CHECK-START: java.lang.Object Main.testMergeClassWithInterface(boolean, ClassImplementsInterfaceA, InterfaceSuper) ssa_builder (after)
+ /// CHECK-START: java.lang.Object Main.testMergeClassWithInterface(boolean, ClassImplementsInterfaceA, InterfaceSuper) builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:InterfaceSuper
/// CHECK: Return [<<Phi>>]
private Object testMergeClassWithInterface(boolean cond, ClassImplementsInterfaceA a, InterfaceSuper b) {
@@ -85,7 +85,7 @@ public class Main {
return cond ? a : b;
}
- /// CHECK-START: java.lang.Object Main.testMergeClassWithInterface(boolean, ClassSuper, InterfaceSuper) ssa_builder (after)
+ /// CHECK-START: java.lang.Object Main.testMergeClassWithInterface(boolean, ClassSuper, InterfaceSuper) builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:java.lang.Object
/// CHECK: Return [<<Phi>>]
private Object testMergeClassWithInterface(boolean cond, ClassSuper a, InterfaceSuper b) {
@@ -93,7 +93,7 @@ public class Main {
return cond ? a : b;
}
- /// CHECK-START: java.lang.Object Main.testMergeInterfaces(boolean, InterfaceExtendsA, InterfaceSuper) ssa_builder (after)
+ /// CHECK-START: java.lang.Object Main.testMergeInterfaces(boolean, InterfaceExtendsA, InterfaceSuper) builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:InterfaceSuper
/// CHECK: Return [<<Phi>>]
private Object testMergeInterfaces(boolean cond, InterfaceExtendsA a, InterfaceSuper b) {
@@ -101,7 +101,7 @@ public class Main {
return cond ? a : b;
}
- /// CHECK-START: java.lang.Object Main.testMergeInterfaces(boolean, InterfaceSuper, InterfaceSuper) ssa_builder (after)
+ /// CHECK-START: java.lang.Object Main.testMergeInterfaces(boolean, InterfaceSuper, InterfaceSuper) builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:InterfaceSuper
/// CHECK: Return [<<Phi>>]
private Object testMergeInterfaces(boolean cond, InterfaceSuper a, InterfaceSuper b) {
@@ -109,7 +109,7 @@ public class Main {
return cond ? a : b;
}
- /// CHECK-START: java.lang.Object Main.testMergeInterfaces(boolean, InterfaceExtendsA, InterfaceExtendsB) ssa_builder (after)
+ /// CHECK-START: java.lang.Object Main.testMergeInterfaces(boolean, InterfaceExtendsA, InterfaceExtendsB) builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:java.lang.Object
/// CHECK: Return [<<Phi>>]
private Object testMergeInterfaces(boolean cond, InterfaceExtendsA a, InterfaceExtendsB b) {
@@ -117,7 +117,7 @@ public class Main {
return cond ? a : b;
}
- /// CHECK-START: java.lang.Object Main.testMergeInterfaces(boolean, InterfaceSuper, InterfaceOtherSuper) ssa_builder (after)
+ /// CHECK-START: java.lang.Object Main.testMergeInterfaces(boolean, InterfaceSuper, InterfaceOtherSuper) builder (after)
/// CHECK: <<Phi:l\d+>> Phi klass:java.lang.Object
/// CHECK: Return [<<Phi>>]
private Object testMergeInterfaces(boolean cond, InterfaceSuper a, InterfaceOtherSuper b) {
diff --git a/test/550-checker-regression-wide-store/smali/TestCase.smali b/test/550-checker-regression-wide-store/smali/TestCase.smali
index 7974d56a8f..9133c82365 100644
--- a/test/550-checker-regression-wide-store/smali/TestCase.smali
+++ b/test/550-checker-regression-wide-store/smali/TestCase.smali
@@ -25,7 +25,7 @@
# Test storing into the high vreg of a wide pair. This scenario has runtime
# behaviour implications so we run it from Main.main.
-## CHECK-START: int TestCase.invalidateLow(long) ssa_builder (after)
+## CHECK-START: int TestCase.invalidateLow(long) builder (after)
## CHECK-DAG: <<Cst0:i\d+>> IntConstant 0
## CHECK-DAG: <<Arg:j\d+>> ParameterValue
## CHECK-DAG: <<Cast:i\d+>> TypeConversion [<<Arg>>]
@@ -53,7 +53,7 @@
# Test that storing a wide invalidates the value in the high vreg. This
# cannot be detected from runtime so we only test the environment with Checker.
-## CHECK-START: void TestCase.invalidateHigh1(long) ssa_builder (after)
+## CHECK-START: void TestCase.invalidateHigh1(long) builder (after)
## CHECK-DAG: <<Arg:j\d+>> ParameterValue
## CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.System.nanoTime env:[[<<Arg>>,_,<<Arg>>,_]]
@@ -67,7 +67,7 @@
.end method
-## CHECK-START: void TestCase.invalidateHigh2(long) ssa_builder (after)
+## CHECK-START: void TestCase.invalidateHigh2(long) builder (after)
## CHECK-DAG: <<Arg:j\d+>> ParameterValue
## CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.System.nanoTime env:[[<<Arg>>,_,_,<<Arg>>,_]]
diff --git a/test/551-checker-shifter-operand/src/Main.java b/test/551-checker-shifter-operand/src/Main.java
index decdd1f324..8d73d69db9 100644
--- a/test/551-checker-shifter-operand/src/Main.java
+++ b/test/551-checker-shifter-operand/src/Main.java
@@ -241,13 +241,14 @@ public class Main {
/// CHECK-START-ARM64: void Main.$opt$validateExtendByteInt1(int, byte) instruction_simplifier_arm64 (after)
/// CHECK: Arm64DataProcWithShifterOp
- /// CHECK: Arm64DataProcWithShifterOp
+ /// CHECK-NOT: Arm64DataProcWithShifterOp
/// CHECK-START-ARM64: void Main.$opt$validateExtendByteInt1(int, byte) instruction_simplifier_arm64 (after)
/// CHECK-NOT: TypeConversion
public static void $opt$validateExtendByteInt1(int a, byte b) {
assertIntEquals(a + $noinline$byteToChar (b), a + (char)b);
+ // Conversions byte->short and short->int are implicit; nothing to merge.
assertIntEquals(a + $noinline$byteToShort(b), a + (short)b);
}
@@ -266,17 +267,24 @@ public class Main {
/// CHECK: Arm64DataProcWithShifterOp
/// CHECK: Arm64DataProcWithShifterOp
/// CHECK: Arm64DataProcWithShifterOp
+ /// CHECK: Arm64DataProcWithShifterOp
+ /// CHECK: Arm64DataProcWithShifterOp
+ /// CHECK-NOT: Arm64DataProcWithShifterOp
/// CHECK-START-ARM64: void Main.$opt$validateExtendByteLong(long, byte) instruction_simplifier_arm64 (after)
/// CHECK: TypeConversion
- /// CHECK: TypeConversion
/// CHECK-NOT: TypeConversion
public static void $opt$validateExtendByteLong(long a, byte b) {
- // The first two tests have a type conversion.
+ // In each of the following tests, there will be a merge on the LHS.
+
+ // The first test has an explicit byte->char conversion on RHS,
+ // followed by a conversion that is merged with the Add.
assertLongEquals(a + $noinline$byteToChar (b), a + (char)b);
+ // Since conversions byte->short and byte->int are implicit, the RHS
+ // for the two tests below is the same and one is eliminated by GVN.
+ // The other is then merged to a shifter operand instruction.
assertLongEquals(a + $noinline$byteToShort(b), a + (short)b);
- // This test does not because the conversion to `int` is optimized away.
assertLongEquals(a + $noinline$byteToInt (b), a + (int)b);
}
diff --git a/test/552-checker-primitive-typeprop/smali/ArrayGet.smali b/test/552-checker-primitive-typeprop/smali/ArrayGet.smali
index 042fa0c80c..de3229064c 100644
--- a/test/552-checker-primitive-typeprop/smali/ArrayGet.smali
+++ b/test/552-checker-primitive-typeprop/smali/ArrayGet.smali
@@ -19,10 +19,10 @@
# Test phi with fixed-type ArrayGet as an input and a matching second input.
# The phi should be typed accordingly.
-## CHECK-START: void ArrayGet.matchingFixedType(float[], float) ssa_builder (after)
+## CHECK-START: void ArrayGet.matchingFixedType(float[], float) builder (after)
## CHECK-NOT: Phi
-## CHECK-START-DEBUGGABLE: void ArrayGet.matchingFixedType(float[], float) ssa_builder (after)
+## CHECK-START-DEBUGGABLE: void ArrayGet.matchingFixedType(float[], float) builder (after)
## CHECK-DAG: <<Arg1:f\d+>> ParameterValue
## CHECK-DAG: <<Aget:f\d+>> ArrayGet
## CHECK-DAG: {{f\d+}} Phi [<<Aget>>,<<Arg1>>] reg:0
@@ -49,10 +49,10 @@
# Test phi with fixed-type ArrayGet as an input and a conflicting second input.
# The phi should be eliminated due to the conflict.
-## CHECK-START: void ArrayGet.conflictingFixedType(float[], int) ssa_builder (after)
+## CHECK-START: void ArrayGet.conflictingFixedType(float[], int) builder (after)
## CHECK-NOT: Phi
-## CHECK-START-DEBUGGABLE: void ArrayGet.conflictingFixedType(float[], int) ssa_builder (after)
+## CHECK-START-DEBUGGABLE: void ArrayGet.conflictingFixedType(float[], int) builder (after)
## CHECK-NOT: Phi
.method public static conflictingFixedType([FI)V
.registers 8
@@ -76,13 +76,13 @@
# Same test as the one above, only this time tests that type of ArrayGet is not
# changed.
-## CHECK-START: void ArrayGet.conflictingFixedType2(int[], float) ssa_builder (after)
+## CHECK-START: void ArrayGet.conflictingFixedType2(int[], float) builder (after)
## CHECK-NOT: Phi
-## CHECK-START-DEBUGGABLE: void ArrayGet.conflictingFixedType2(int[], float) ssa_builder (after)
+## CHECK-START-DEBUGGABLE: void ArrayGet.conflictingFixedType2(int[], float) builder (after)
## CHECK-NOT: Phi
-## CHECK-START-DEBUGGABLE: void ArrayGet.conflictingFixedType2(int[], float) ssa_builder (after)
+## CHECK-START-DEBUGGABLE: void ArrayGet.conflictingFixedType2(int[], float) builder (after)
## CHECK: {{i\d+}} ArrayGet
.method public static conflictingFixedType2([IF)V
.registers 8
@@ -107,10 +107,10 @@
# Test phi with free-type ArrayGet as an input and a matching second input.
# The phi should be typed accordingly.
-## CHECK-START: void ArrayGet.matchingFreeType(float[], float) ssa_builder (after)
+## CHECK-START: void ArrayGet.matchingFreeType(float[], float) builder (after)
## CHECK-NOT: Phi
-## CHECK-START-DEBUGGABLE: void ArrayGet.matchingFreeType(float[], float) ssa_builder (after)
+## CHECK-START-DEBUGGABLE: void ArrayGet.matchingFreeType(float[], float) builder (after)
## CHECK-DAG: <<Arg1:f\d+>> ParameterValue
## CHECK-DAG: <<Aget:f\d+>> ArrayGet
## CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Aget>>]
@@ -139,10 +139,10 @@
# The phi will be kept and typed according to the second input despite the
# conflict.
-## CHECK-START: void ArrayGet.conflictingFreeType(int[], float) ssa_builder (after)
+## CHECK-START: void ArrayGet.conflictingFreeType(int[], float) builder (after)
## CHECK-NOT: Phi
-## CHECK-START-DEBUGGABLE: void ArrayGet.conflictingFreeType(int[], float) ssa_builder (after)
+## CHECK-START-DEBUGGABLE: void ArrayGet.conflictingFreeType(int[], float) builder (after)
## CHECK-NOT: Phi
.method public static conflictingFreeType([IF)V
@@ -169,7 +169,7 @@
# case uses ArrayGet indirectly through two phis. It also creates an unused
# conflicting phi which should not be preserved.
-## CHECK-START: void ArrayGet.conflictingPhiUses(int[], float, boolean, boolean, boolean) ssa_builder (after)
+## CHECK-START: void ArrayGet.conflictingPhiUses(int[], float, boolean, boolean, boolean) builder (after)
## CHECK: InvokeStaticOrDirect env:[[{{i\d+}},{{i\d+}},_,{{i\d+}},{{.*}}
.method public static conflictingPhiUses([IFZZZ)V
@@ -209,10 +209,10 @@
# another. The situation needs to be resolved so that only one instruction
# remains.
-## CHECK-START: void ArrayGet.typedVsUntypedPhiUse(float[], float, boolean, boolean) ssa_builder (after)
+## CHECK-START: void ArrayGet.typedVsUntypedPhiUse(float[], float, boolean, boolean) builder (after)
## CHECK: {{f\d+}} ArrayGet
-## CHECK-START: void ArrayGet.typedVsUntypedPhiUse(float[], float, boolean, boolean) ssa_builder (after)
+## CHECK-START: void ArrayGet.typedVsUntypedPhiUse(float[], float, boolean, boolean) builder (after)
## CHECK-NOT: {{i\d+}} ArrayGet
.method public static typedVsUntypedPhiUse([FFZZ)V
diff --git a/test/552-checker-primitive-typeprop/smali/ArraySet.smali b/test/552-checker-primitive-typeprop/smali/ArraySet.smali
index 57d8606abb..087460aef2 100644
--- a/test/552-checker-primitive-typeprop/smali/ArraySet.smali
+++ b/test/552-checker-primitive-typeprop/smali/ArraySet.smali
@@ -19,7 +19,7 @@
# Note that the input is a Phi to make sure primitive type propagation is re-run
# on the replaced inputs.
-## CHECK-START: void ArraySet.ambiguousSet(int[], float[], boolean) ssa_builder (after)
+## CHECK-START: void ArraySet.ambiguousSet(int[], float[], boolean) builder (after)
## CHECK-DAG: <<IntArray:l\d+>> ParameterValue klass:int[]
## CHECK-DAG: <<IntA:i\d+>> IntConstant 0
## CHECK-DAG: <<IntB:i\d+>> IntConstant 1073741824
diff --git a/test/552-checker-primitive-typeprop/smali/SsaBuilder.smali b/test/552-checker-primitive-typeprop/smali/SsaBuilder.smali
index 395feaaf61..0d067ed1ca 100644
--- a/test/552-checker-primitive-typeprop/smali/SsaBuilder.smali
+++ b/test/552-checker-primitive-typeprop/smali/SsaBuilder.smali
@@ -22,7 +22,7 @@
# otherwise running the code with an array short enough to throw will crash at
# runtime because v0 is undefined.
-## CHECK-START: int SsaBuilder.environmentPhi(boolean, int[]) ssa_builder (after)
+## CHECK-START: int SsaBuilder.environmentPhi(boolean, int[]) builder (after)
## CHECK-DAG: <<Cst0:f\d+>> FloatConstant 0
## CHECK-DAG: <<Cst2:f\d+>> FloatConstant 2
## CHECK-DAG: <<Phi:f\d+>> Phi [<<Cst0>>,<<Cst2>>]
diff --git a/test/552-checker-primitive-typeprop/smali/TypePropagation.smali b/test/552-checker-primitive-typeprop/smali/TypePropagation.smali
index 58682a1923..d34e43e160 100644
--- a/test/552-checker-primitive-typeprop/smali/TypePropagation.smali
+++ b/test/552-checker-primitive-typeprop/smali/TypePropagation.smali
@@ -15,7 +15,7 @@
.class public LTypePropagation;
.super Ljava/lang/Object;
-## CHECK-START-DEBUGGABLE: void TypePropagation.mergeDeadPhi(boolean, boolean, int, float, float) ssa_builder (after)
+## CHECK-START-DEBUGGABLE: void TypePropagation.mergeDeadPhi(boolean, boolean, int, float, float) builder (after)
## CHECK-NOT: Phi
.method public static mergeDeadPhi(ZZIFF)V
.registers 8
@@ -34,7 +34,7 @@
return-void
.end method
-## CHECK-START-DEBUGGABLE: void TypePropagation.mergeSameType(boolean, int, int) ssa_builder (after)
+## CHECK-START-DEBUGGABLE: void TypePropagation.mergeSameType(boolean, int, int) builder (after)
## CHECK: {{i\d+}} Phi
## CHECK-NOT: Phi
.method public static mergeSameType(ZII)V
@@ -47,7 +47,7 @@
return-void
.end method
-## CHECK-START-DEBUGGABLE: void TypePropagation.mergeVoidInput(boolean, boolean, int, int) ssa_builder (after)
+## CHECK-START-DEBUGGABLE: void TypePropagation.mergeVoidInput(boolean, boolean, int, int) builder (after)
## CHECK: {{i\d+}} Phi
## CHECK: {{i\d+}} Phi
## CHECK-NOT: Phi
@@ -64,7 +64,7 @@
return-void
.end method
-## CHECK-START-DEBUGGABLE: void TypePropagation.mergeDifferentSize(boolean, int, long) ssa_builder (after)
+## CHECK-START-DEBUGGABLE: void TypePropagation.mergeDifferentSize(boolean, int, long) builder (after)
## CHECK-NOT: Phi
.method public static mergeDifferentSize(ZIJ)V
.registers 8
@@ -76,7 +76,7 @@
return-void
.end method
-## CHECK-START-DEBUGGABLE: void TypePropagation.mergeRefFloat(boolean, float, java.lang.Object) ssa_builder (after)
+## CHECK-START-DEBUGGABLE: void TypePropagation.mergeRefFloat(boolean, float, java.lang.Object) builder (after)
## CHECK-NOT: Phi
.method public static mergeRefFloat(ZFLjava/lang/Object;)V
.registers 8
@@ -88,7 +88,7 @@
return-void
.end method
-## CHECK-START-DEBUGGABLE: void TypePropagation.mergeIntFloat_Success(boolean, float) ssa_builder (after)
+## CHECK-START-DEBUGGABLE: void TypePropagation.mergeIntFloat_Success(boolean, float) builder (after)
## CHECK: {{f\d+}} Phi
## CHECK-NOT: Phi
.method public static mergeIntFloat_Success(ZF)V
@@ -101,7 +101,7 @@
return-void
.end method
-## CHECK-START-DEBUGGABLE: void TypePropagation.mergeIntFloat_Fail(boolean, int, float) ssa_builder (after)
+## CHECK-START-DEBUGGABLE: void TypePropagation.mergeIntFloat_Fail(boolean, int, float) builder (after)
## CHECK-NOT: Phi
.method public static mergeIntFloat_Fail(ZIF)V
.registers 8
@@ -113,7 +113,7 @@
return-void
.end method
-## CHECK-START-DEBUGGABLE: void TypePropagation.updateAllUsersOnConflict(boolean, boolean, int, float, int) ssa_builder (after)
+## CHECK-START-DEBUGGABLE: void TypePropagation.updateAllUsersOnConflict(boolean, boolean, int, float, int) builder (after)
## CHECK-NOT: Phi
.method public static updateAllUsersOnConflict(ZZIFI)V
.registers 8
diff --git a/test/554-checker-rtp-checkcast/src/Main.java b/test/554-checker-rtp-checkcast/src/Main.java
index 607f71afb5..5bf766ff4e 100644
--- a/test/554-checker-rtp-checkcast/src/Main.java
+++ b/test/554-checker-rtp-checkcast/src/Main.java
@@ -19,7 +19,7 @@ public class Main {
public static Object returnIntArray() { return new int[10]; }
- /// CHECK-START: void Main.boundTypeForMergingPhi() ssa_builder (after)
+ /// CHECK-START: void Main.boundTypeForMergingPhi() builder (after)
/// CHECK-DAG: ArraySet [<<NC:l\d+>>,{{i\d+}},{{i\d+}}]
/// CHECK-DAG: <<NC>> NullCheck [<<Phi:l\d+>>]
/// CHECK-DAG: <<Phi>> Phi klass:int[]
@@ -32,7 +32,7 @@ public class Main {
array[0] = 14;
}
- /// CHECK-START: void Main.boundTypeForLoopPhi() ssa_builder (after)
+ /// CHECK-START: void Main.boundTypeForLoopPhi() builder (after)
/// CHECK-DAG: ArraySet [<<NC:l\d+>>,{{i\d+}},{{i\d+}}]
/// CHECK-DAG: <<NC>> NullCheck [<<Phi:l\d+>>]
/// CHECK-DAG: <<Phi>> Phi klass:int[]
@@ -50,7 +50,7 @@ public class Main {
array[0] = 14;
}
- /// CHECK-START: void Main.boundTypeForCatchPhi() ssa_builder (after)
+ /// CHECK-START: void Main.boundTypeForCatchPhi() builder (after)
/// CHECK-DAG: ArraySet [<<NC:l\d+>>,{{i\d+}},{{i\d+}}]
/// CHECK-DAG: <<NC>> NullCheck [<<Phi:l\d+>>]
/// CHECK-DAG: <<Phi>> Phi is_catch_phi:true klass:int[]
diff --git a/test/557-checker-ref-equivalent/smali/TestCase.smali b/test/557-checker-ref-equivalent/smali/TestCase.smali
index 24729572c4..1347554aad 100644
--- a/test/557-checker-ref-equivalent/smali/TestCase.smali
+++ b/test/557-checker-ref-equivalent/smali/TestCase.smali
@@ -16,7 +16,7 @@
.super Ljava/lang/Object;
-## CHECK-START: void TestCase.testIntRefEquivalent() ssa_builder (after)
+## CHECK-START: void TestCase.testIntRefEquivalent() builder (after)
## CHECK-NOT: Phi
.method public static testIntRefEquivalent()V
.registers 4
diff --git a/test/557-checker-ref-equivalent/src/Main.java b/test/557-checker-ref-equivalent/src/Main.java
index a970af5cdf..9323757d92 100644
--- a/test/557-checker-ref-equivalent/src/Main.java
+++ b/test/557-checker-ref-equivalent/src/Main.java
@@ -16,7 +16,7 @@
public class Main {
- /// CHECK-START: void Main.testRedundantPhiCycle(boolean) ssa_builder (after)
+ /// CHECK-START: void Main.testRedundantPhiCycle(boolean) builder (after)
/// CHECK-NOT: Phi
private void testRedundantPhiCycle(boolean cond) {
Object o = null;
@@ -28,7 +28,7 @@ public class Main {
}
}
- /// CHECK-START: void Main.testLoopPhisWithNullAndCrossUses(boolean) ssa_builder (after)
+ /// CHECK-START: void Main.testLoopPhisWithNullAndCrossUses(boolean) builder (after)
/// CHECK-NOT: Phi
private void testLoopPhisWithNullAndCrossUses(boolean cond) {
Main a = null;
diff --git a/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali b/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali
index 971ad84241..7ce60a304b 100644
--- a/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali
+++ b/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali
@@ -323,7 +323,7 @@
# - / \-
# irreducible_loop_back_edge loop_within_back_edge
#
-## CHECK-START: void IrreducibleLoop.analyze1(int) ssa_builder (after)
+## CHECK-START: void IrreducibleLoop.analyze1(int) builder (after)
## CHECK-DAG: Goto loop:<<OuterLoop:B\d+>> outer_loop:none irreducible:true
## CHECK-DAG: Goto outer_loop:<<OuterLoop>> irreducible:false
.method public static analyze1(I)V
@@ -371,7 +371,7 @@
# exit \- /
# irreducible_loop_body
#
-## CHECK-START: void IrreducibleLoop.analyze2(int) ssa_builder (after)
+## CHECK-START: void IrreducibleLoop.analyze2(int) builder (after)
## CHECK-DAG: Goto outer_loop:none irreducible:false
## CHECK-DAG: Goto outer_loop:none irreducible:true
.method public static analyze2(I)V
@@ -418,7 +418,7 @@
# |
# exit
#
-## CHECK-START: void IrreducibleLoop.analyze3(int) ssa_builder (after)
+## CHECK-START: void IrreducibleLoop.analyze3(int) builder (after)
## CHECK-DAG: Goto loop:<<OuterLoop:B\d+>> outer_loop:none irreducible:true
## CHECK-DAG: Goto outer_loop:<<OuterLoop>> irreducible:true
.method public static analyze3(I)V
@@ -467,7 +467,7 @@
# |
# exit
#
-## CHECK-START: void IrreducibleLoop.analyze4(int) ssa_builder (after)
+## CHECK-START: void IrreducibleLoop.analyze4(int) builder (after)
## CHECK-DAG: Goto loop:<<OuterLoop:B\d+>> outer_loop:none irreducible:true
## CHECK-DAG: Goto outer_loop:<<OuterLoop>> irreducible:true
.method public static analyze4(I)V
@@ -519,7 +519,7 @@
# |
# exit
#
-## CHECK-START: void IrreducibleLoop.analyze5(int) ssa_builder (after)
+## CHECK-START: void IrreducibleLoop.analyze5(int) builder (after)
## CHECK-DAG: Goto loop:<<OuterLoop:B\d+>> outer_loop:none irreducible:true
## CHECK-DAG: Goto outer_loop:<<OuterLoop>> irreducible:true
.method public static analyze5(I)V
diff --git a/test/559-checker-rtp-ifnotnull/src/Main.java b/test/559-checker-rtp-ifnotnull/src/Main.java
index 8f401292da..2dc5666e18 100644
--- a/test/559-checker-rtp-ifnotnull/src/Main.java
+++ b/test/559-checker-rtp-ifnotnull/src/Main.java
@@ -17,7 +17,7 @@
public class Main {
- /// CHECK-START: void Main.boundTypeForIfNotNull() ssa_builder (after)
+ /// CHECK-START: void Main.boundTypeForIfNotNull() builder (after)
/// CHECK-DAG: <<Method:(i|j)\d+>> CurrentMethod
/// CHECK-DAG: <<Null:l\d+>> NullConstant
/// CHECK-DAG: <<Cst5:i\d+>> IntConstant 5
diff --git a/test/569-checker-pattern-replacement/src-multidex/Base.java b/test/569-checker-pattern-replacement/src-multidex/Base.java
new file mode 100644
index 0000000000..f4d59af55d
--- /dev/null
+++ b/test/569-checker-pattern-replacement/src-multidex/Base.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Base {
+ Base() {
+ intField = 0; // Unnecessary IPUT.
+ doubleField = 0.0; // Unnecessary IPUT.
+ objectField = null; // Unnecessary IPUT.
+ }
+
+ Base(int intValue) {
+ intField = intValue;
+ }
+
+ Base(String stringValue) {
+ objectField = stringValue; // Unnecessary IPUT.
+ stringField = stringValue;
+ objectField = null; // Unnecessary IPUT.
+ }
+
+ Base(double doubleValue, Object objectValue) {
+ doubleField = doubleValue;
+ objectField = objectValue;
+ }
+
+ Base(int intValue, double doubleValue, Object objectValue) {
+ intField = intValue;
+ doubleField = doubleValue;
+ objectField = objectValue;
+ }
+
+ Base(int intValue, double doubleValue, Object objectValue, String stringValue) {
+ // Outside our limit of 3 IPUTs.
+ intField = intValue;
+ doubleField = doubleValue;
+ objectField = objectValue;
+ stringField = stringValue;
+ }
+
+ Base(double doubleValue) {
+ this(doubleValue, null);
+ }
+
+ Base(Object objectValue) {
+ // Unsupported forwarding of a value after a zero.
+ this(0.0, objectValue);
+ }
+
+ Base(int intValue, long dummy) {
+ this(intValue, 0.0, null);
+ }
+
+ public int intField;
+ public double doubleField;
+ public Object objectField;
+ public String stringField;
+}
diff --git a/test/569-checker-pattern-replacement/src-multidex/BaseWithFinalField.java b/test/569-checker-pattern-replacement/src-multidex/BaseWithFinalField.java
new file mode 100644
index 0000000000..7a1d5914c8
--- /dev/null
+++ b/test/569-checker-pattern-replacement/src-multidex/BaseWithFinalField.java
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class BaseWithFinalField {
+ BaseWithFinalField() {
+ intField = 0;
+ }
+
+ BaseWithFinalField(int intValue) {
+ intField = intValue;
+ }
+
+ public final int intField;
+}
diff --git a/test/569-checker-pattern-replacement/src-multidex/Derived.java b/test/569-checker-pattern-replacement/src-multidex/Derived.java
new file mode 100644
index 0000000000..184563fd59
--- /dev/null
+++ b/test/569-checker-pattern-replacement/src-multidex/Derived.java
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public final class Derived extends Base {
+ public Derived() {
+ this(0);
+ }
+
+ public Derived(int intValue) {
+ super(intValue);
+ }
+
+ public Derived(String stringValue) {
+ super(stringValue);
+ stringField = null; // Clear field set by Base.<init>(String).
+ }
+
+ public Derived(double doubleValue) {
+ super(doubleValue, null);
+ }
+
+ public Derived(int intValue, double doubleValue, Object objectValue) {
+ super(intValue, doubleValue, objectValue);
+ objectField = null; // Clear field set by Base.<init>(int, double, Object).
+ intField = 0; // Clear field set by Base.<init>(int, double, Object).
+ }
+
+ Derived(int intValue, double doubleValue, Object objectValue, String stringValue) {
+ super(intValue, doubleValue, objectValue, stringValue);
+ // Clearing fields here doesn't help because the superclass constructor must
+ // satisfy the pattern constraints on its own and it doesn't (it has 4 IPUTs).
+ intField = 0;
+ doubleField = 0.0;
+ objectField = null;
+ stringField = null;
+ }
+
+ public Derived(float floatValue) {
+ super();
+ floatField = floatValue;
+ }
+
+ public Derived(int intValue, double doubleValue, Object objectValue, float floatValue) {
+ super(intValue, doubleValue, objectValue);
+ objectField = null; // Clear field set by Base.<init>(int, double, Object).
+ floatField = floatValue;
+ }
+
+ public float floatField;
+}
diff --git a/test/569-checker-pattern-replacement/src-multidex/DerivedInSecondDex.java b/test/569-checker-pattern-replacement/src-multidex/DerivedInSecondDex.java
new file mode 100644
index 0000000000..50266e8f8d
--- /dev/null
+++ b/test/569-checker-pattern-replacement/src-multidex/DerivedInSecondDex.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public final class DerivedInSecondDex extends BaseInMainDex {
+ DerivedInSecondDex() {
+ super();
+ }
+
+ DerivedInSecondDex(int intValue) {
+ // Not matched: Superclass in a different dex file has an IPUT.
+ super(intValue);
+ }
+
+ DerivedInSecondDex(long dummy) {
+ // Matched: Superclass in a different dex file has an IPUT that's pruned because we store 0.
+ super(0);
+ }
+}
diff --git a/test/569-checker-pattern-replacement/src-multidex/DerivedWithFinalField.java b/test/569-checker-pattern-replacement/src-multidex/DerivedWithFinalField.java
new file mode 100644
index 0000000000..5b39b8a182
--- /dev/null
+++ b/test/569-checker-pattern-replacement/src-multidex/DerivedWithFinalField.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public final class DerivedWithFinalField extends BaseWithFinalField {
+ DerivedWithFinalField() {
+ this(0);
+ }
+
+ DerivedWithFinalField(int intValue) {
+ super(intValue);
+ doubleField = 0.0;
+ }
+
+ DerivedWithFinalField(double doubleValue) {
+ super(0);
+ doubleField = doubleValue;
+ }
+
+ DerivedWithFinalField(int intValue, double doubleValue) {
+ super(intValue);
+ doubleField = doubleValue;
+ }
+
+ public final double doubleField;
+}
diff --git a/test/569-checker-pattern-replacement/src/BaseInMainDex.java b/test/569-checker-pattern-replacement/src/BaseInMainDex.java
new file mode 100644
index 0000000000..b401540a96
--- /dev/null
+++ b/test/569-checker-pattern-replacement/src/BaseInMainDex.java
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class BaseInMainDex {
+ BaseInMainDex() {
+ }
+
+ BaseInMainDex(int intValue) {
+ intField = intValue;
+ }
+
+ public int intField;
+}
diff --git a/test/569-checker-pattern-replacement/src/Main.java b/test/569-checker-pattern-replacement/src/Main.java
index e2d451cc86..345e9fd222 100644
--- a/test/569-checker-pattern-replacement/src/Main.java
+++ b/test/569-checker-pattern-replacement/src/Main.java
@@ -15,368 +15,1210 @@
*/
public class Main {
- /// CHECK-START: void Main.staticNop() inliner (before)
- /// CHECK: InvokeStaticOrDirect
+ /// CHECK-START: void Main.staticNop() inliner (before)
+ /// CHECK: InvokeStaticOrDirect
- /// CHECK-START: void Main.staticNop() inliner (after)
- /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-START: void Main.staticNop() inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
- public static void staticNop() {
- Second.staticNop(11);
- }
-
- /// CHECK-START: void Main.nop(Second) inliner (before)
- /// CHECK: InvokeVirtual
-
- /// CHECK-START: void Main.nop(Second) inliner (after)
- /// CHECK-NOT: InvokeVirtual
-
- public static void nop(Second s) {
- s.nop();
- }
-
- /// CHECK-START: java.lang.Object Main.staticReturnArg2(java.lang.String) inliner (before)
- /// CHECK-DAG: <<Value:l\d+>> ParameterValue
- /// CHECK-DAG: <<Ignored:i\d+>> IntConstant 77
- /// CHECK-DAG: <<ClinitCk:l\d+>> ClinitCheck
- // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
- /// CHECK-DAG: <<Invoke:l\d+>> InvokeStaticOrDirect [<<Ignored>>,<<Value>>{{(,[ij]\d+)?}},<<ClinitCk>>]
- /// CHECK-DAG: Return [<<Invoke>>]
+ public static void staticNop() {
+ Second.staticNop(11);
+ }
- /// CHECK-START: java.lang.Object Main.staticReturnArg2(java.lang.String) inliner (after)
- /// CHECK-DAG: <<Value:l\d+>> ParameterValue
- /// CHECK-DAG: Return [<<Value>>]
+ /// CHECK-START: void Main.nop(Second) inliner (before)
+ /// CHECK: InvokeVirtual
- /// CHECK-START: java.lang.Object Main.staticReturnArg2(java.lang.String) inliner (after)
- /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-START: void Main.nop(Second) inliner (after)
+ /// CHECK-NOT: InvokeVirtual
- public static Object staticReturnArg2(String value) {
- return Second.staticReturnArg2(77, value);
- }
+ public static void nop(Second s) {
+ s.nop();
+ }
- /// CHECK-START: long Main.returnArg1(Second, long) inliner (before)
- /// CHECK-DAG: <<Second:l\d+>> ParameterValue
- /// CHECK-DAG: <<Value:j\d+>> ParameterValue
- /// CHECK-DAG: <<NullCk:l\d+>> NullCheck [<<Second>>]
- /// CHECK-DAG: <<Invoke:j\d+>> InvokeVirtual [<<NullCk>>,<<Value>>]
- /// CHECK-DAG: Return [<<Invoke>>]
+ /// CHECK-START: java.lang.Object Main.staticReturnArg2(java.lang.String) inliner (before)
+ /// CHECK-DAG: <<Value:l\d+>> ParameterValue
+ /// CHECK-DAG: <<Ignored:i\d+>> IntConstant 77
+ /// CHECK-DAG: <<ClinitCk:l\d+>> ClinitCheck
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: <<Invoke:l\d+>> InvokeStaticOrDirect [<<Ignored>>,<<Value>>{{(,[ij]\d+)?}},<<ClinitCk>>]
+ /// CHECK-DAG: Return [<<Invoke>>]
- /// CHECK-START: long Main.returnArg1(Second, long) inliner (after)
- /// CHECK-DAG: <<Value:j\d+>> ParameterValue
- /// CHECK-DAG: Return [<<Value>>]
+ /// CHECK-START: java.lang.Object Main.staticReturnArg2(java.lang.String) inliner (after)
+ /// CHECK-DAG: <<Value:l\d+>> ParameterValue
+ /// CHECK-DAG: Return [<<Value>>]
- /// CHECK-START: long Main.returnArg1(Second, long) inliner (after)
- /// CHECK-NOT: InvokeVirtual
+ /// CHECK-START: java.lang.Object Main.staticReturnArg2(java.lang.String) inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
- public static long returnArg1(Second s, long value) {
- return s.returnArg1(value);
- }
+ public static Object staticReturnArg2(String value) {
+ return Second.staticReturnArg2(77, value);
+ }
- /// CHECK-START: int Main.staticReturn9() inliner (before)
- /// CHECK: {{i\d+}} InvokeStaticOrDirect
+ /// CHECK-START: long Main.returnArg1(Second, long) inliner (before)
+ /// CHECK-DAG: <<Second:l\d+>> ParameterValue
+ /// CHECK-DAG: <<Value:j\d+>> ParameterValue
+ /// CHECK-DAG: <<NullCk:l\d+>> NullCheck [<<Second>>]
+ /// CHECK-DAG: <<Invoke:j\d+>> InvokeVirtual [<<NullCk>>,<<Value>>]
+ /// CHECK-DAG: Return [<<Invoke>>]
- /// CHECK-START: int Main.staticReturn9() inliner (before)
- /// CHECK-NOT: IntConstant 9
+ /// CHECK-START: long Main.returnArg1(Second, long) inliner (after)
+ /// CHECK-DAG: <<Value:j\d+>> ParameterValue
+ /// CHECK-DAG: Return [<<Value>>]
- /// CHECK-START: int Main.staticReturn9() inliner (after)
- /// CHECK-DAG: <<Const9:i\d+>> IntConstant 9
- /// CHECK-DAG: Return [<<Const9>>]
+ /// CHECK-START: long Main.returnArg1(Second, long) inliner (after)
+ /// CHECK-NOT: InvokeVirtual
- /// CHECK-START: int Main.staticReturn9() inliner (after)
- /// CHECK-NOT: InvokeStaticOrDirect
+ public static long returnArg1(Second s, long value) {
+ return s.returnArg1(value);
+ }
- public static int staticReturn9() {
- return Second.staticReturn9();
- }
-
- /// CHECK-START: int Main.return7(Second) inliner (before)
- /// CHECK: {{i\d+}} InvokeVirtual
-
- /// CHECK-START: int Main.return7(Second) inliner (before)
- /// CHECK-NOT: IntConstant 7
-
- /// CHECK-START: int Main.return7(Second) inliner (after)
- /// CHECK-DAG: <<Const7:i\d+>> IntConstant 7
- /// CHECK-DAG: Return [<<Const7>>]
-
- /// CHECK-START: int Main.return7(Second) inliner (after)
- /// CHECK-NOT: InvokeVirtual
-
- public static int return7(Second s) {
- return s.return7(null);
- }
+ /// CHECK-START: int Main.staticReturn9() inliner (before)
+ /// CHECK: {{i\d+}} InvokeStaticOrDirect
- /// CHECK-START: java.lang.String Main.staticReturnNull() inliner (before)
- /// CHECK: {{l\d+}} InvokeStaticOrDirect
+ /// CHECK-START: int Main.staticReturn9() inliner (before)
+ /// CHECK-NOT: IntConstant 9
- /// CHECK-START: java.lang.String Main.staticReturnNull() inliner (before)
- /// CHECK-NOT: NullConstant
+ /// CHECK-START: int Main.staticReturn9() inliner (after)
+ /// CHECK-DAG: <<Const9:i\d+>> IntConstant 9
+ /// CHECK-DAG: Return [<<Const9>>]
- /// CHECK-START: java.lang.String Main.staticReturnNull() inliner (after)
- /// CHECK-DAG: <<Null:l\d+>> NullConstant
- /// CHECK-DAG: Return [<<Null>>]
+ /// CHECK-START: int Main.staticReturn9() inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
- /// CHECK-START: java.lang.String Main.staticReturnNull() inliner (after)
- /// CHECK-NOT: InvokeStaticOrDirect
+ public static int staticReturn9() {
+ return Second.staticReturn9();
+ }
- public static String staticReturnNull() {
- return Second.staticReturnNull();
- }
+ /// CHECK-START: int Main.return7(Second) inliner (before)
+ /// CHECK: {{i\d+}} InvokeVirtual
- /// CHECK-START: java.lang.Object Main.returnNull(Second) inliner (before)
- /// CHECK: {{l\d+}} InvokeVirtual
+ /// CHECK-START: int Main.return7(Second) inliner (before)
+ /// CHECK-NOT: IntConstant 7
- /// CHECK-START: java.lang.Object Main.returnNull(Second) inliner (before)
- /// CHECK-NOT: NullConstant
+ /// CHECK-START: int Main.return7(Second) inliner (after)
+ /// CHECK-DAG: <<Const7:i\d+>> IntConstant 7
+ /// CHECK-DAG: Return [<<Const7>>]
- /// CHECK-START: java.lang.Object Main.returnNull(Second) inliner (after)
- /// CHECK-DAG: <<Null:l\d+>> NullConstant
- /// CHECK-DAG: Return [<<Null>>]
+ /// CHECK-START: int Main.return7(Second) inliner (after)
+ /// CHECK-NOT: InvokeVirtual
- /// CHECK-START: java.lang.Object Main.returnNull(Second) inliner (after)
- /// CHECK-NOT: InvokeVirtual
+ public static int return7(Second s) {
+ return s.return7(null);
+ }
- public static Object returnNull(Second s) {
- return s.returnNull();
- }
+ /// CHECK-START: java.lang.String Main.staticReturnNull() inliner (before)
+ /// CHECK: {{l\d+}} InvokeStaticOrDirect
- /// CHECK-START: int Main.getInt(Second) inliner (before)
- /// CHECK: {{i\d+}} InvokeVirtual
+ /// CHECK-START: java.lang.String Main.staticReturnNull() inliner (before)
+ /// CHECK-NOT: NullConstant
- /// CHECK-START: int Main.getInt(Second) inliner (after)
- /// CHECK: {{i\d+}} InstanceFieldGet
+ /// CHECK-START: java.lang.String Main.staticReturnNull() inliner (after)
+ /// CHECK-DAG: <<Null:l\d+>> NullConstant
+ /// CHECK-DAG: Return [<<Null>>]
- /// CHECK-START: int Main.getInt(Second) inliner (after)
- /// CHECK-NOT: InvokeVirtual
+ /// CHECK-START: java.lang.String Main.staticReturnNull() inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
- public static int getInt(Second s) {
- return s.getInstanceIntField();
- }
+ public static String staticReturnNull() {
+ return Second.staticReturnNull();
+ }
- /// CHECK-START: double Main.getDouble(Second) inliner (before)
- /// CHECK: {{d\d+}} InvokeVirtual
+ /// CHECK-START: java.lang.Object Main.returnNull(Second) inliner (before)
+ /// CHECK: {{l\d+}} InvokeVirtual
- /// CHECK-START: double Main.getDouble(Second) inliner (after)
- /// CHECK: {{d\d+}} InstanceFieldGet
+ /// CHECK-START: java.lang.Object Main.returnNull(Second) inliner (before)
+ /// CHECK-NOT: NullConstant
- /// CHECK-START: double Main.getDouble(Second) inliner (after)
- /// CHECK-NOT: InvokeVirtual
+ /// CHECK-START: java.lang.Object Main.returnNull(Second) inliner (after)
+ /// CHECK-DAG: <<Null:l\d+>> NullConstant
+ /// CHECK-DAG: Return [<<Null>>]
- public static double getDouble(Second s) {
- return s.getInstanceDoubleField(22);
- }
+ /// CHECK-START: java.lang.Object Main.returnNull(Second) inliner (after)
+ /// CHECK-NOT: InvokeVirtual
- /// CHECK-START: java.lang.Object Main.getObject(Second) inliner (before)
- /// CHECK: {{l\d+}} InvokeVirtual
+ public static Object returnNull(Second s) {
+ return s.returnNull();
+ }
- /// CHECK-START: java.lang.Object Main.getObject(Second) inliner (after)
- /// CHECK: {{l\d+}} InstanceFieldGet
+ /// CHECK-START: int Main.getInt(Second) inliner (before)
+ /// CHECK: {{i\d+}} InvokeVirtual
- /// CHECK-START: java.lang.Object Main.getObject(Second) inliner (after)
- /// CHECK-NOT: InvokeVirtual
+ /// CHECK-START: int Main.getInt(Second) inliner (after)
+ /// CHECK: {{i\d+}} InstanceFieldGet
- public static Object getObject(Second s) {
- return s.getInstanceObjectField(-1L);
- }
+ /// CHECK-START: int Main.getInt(Second) inliner (after)
+ /// CHECK-NOT: InvokeVirtual
- /// CHECK-START: java.lang.String Main.getString(Second) inliner (before)
- /// CHECK: {{l\d+}} InvokeVirtual
+ public static int getInt(Second s) {
+ return s.getInstanceIntField();
+ }
- /// CHECK-START: java.lang.String Main.getString(Second) inliner (after)
- /// CHECK: {{l\d+}} InstanceFieldGet
+ /// CHECK-START: double Main.getDouble(Second) inliner (before)
+ /// CHECK: {{d\d+}} InvokeVirtual
- /// CHECK-START: java.lang.String Main.getString(Second) inliner (after)
- /// CHECK-NOT: InvokeVirtual
+ /// CHECK-START: double Main.getDouble(Second) inliner (after)
+ /// CHECK: {{d\d+}} InstanceFieldGet
- public static String getString(Second s) {
- return s.getInstanceStringField(null, "whatever", 1234L);
- }
+ /// CHECK-START: double Main.getDouble(Second) inliner (after)
+ /// CHECK-NOT: InvokeVirtual
- /// CHECK-START: int Main.staticGetInt(Second) inliner (before)
- /// CHECK: {{i\d+}} InvokeStaticOrDirect
+ public static double getDouble(Second s) {
+ return s.getInstanceDoubleField(22);
+ }
- /// CHECK-START: int Main.staticGetInt(Second) inliner (after)
- /// CHECK: {{i\d+}} InvokeStaticOrDirect
+ /// CHECK-START: java.lang.Object Main.getObject(Second) inliner (before)
+ /// CHECK: {{l\d+}} InvokeVirtual
- /// CHECK-START: int Main.staticGetInt(Second) inliner (after)
- /// CHECK-NOT: InstanceFieldGet
+ /// CHECK-START: java.lang.Object Main.getObject(Second) inliner (after)
+ /// CHECK: {{l\d+}} InstanceFieldGet
- public static int staticGetInt(Second s) {
- return Second.staticGetInstanceIntField(s);
- }
+ /// CHECK-START: java.lang.Object Main.getObject(Second) inliner (after)
+ /// CHECK-NOT: InvokeVirtual
- /// CHECK-START: double Main.getDoubleFromParam(Second) inliner (before)
- /// CHECK: {{d\d+}} InvokeVirtual
+ public static Object getObject(Second s) {
+ return s.getInstanceObjectField(-1L);
+ }
- /// CHECK-START: double Main.getDoubleFromParam(Second) inliner (after)
- /// CHECK: {{d\d+}} InvokeVirtual
+ /// CHECK-START: java.lang.String Main.getString(Second) inliner (before)
+ /// CHECK: {{l\d+}} InvokeVirtual
- /// CHECK-START: double Main.getDoubleFromParam(Second) inliner (after)
- /// CHECK-NOT: InstanceFieldGet
+ /// CHECK-START: java.lang.String Main.getString(Second) inliner (after)
+ /// CHECK: {{l\d+}} InstanceFieldGet
- public static double getDoubleFromParam(Second s) {
- return s.getInstanceDoubleFieldFromParam(s);
- }
+ /// CHECK-START: java.lang.String Main.getString(Second) inliner (after)
+ /// CHECK-NOT: InvokeVirtual
- /// CHECK-START: int Main.getStaticInt(Second) inliner (before)
- /// CHECK: {{i\d+}} InvokeVirtual
+ public static String getString(Second s) {
+ return s.getInstanceStringField(null, "whatever", 1234L);
+ }
- /// CHECK-START: int Main.getStaticInt(Second) inliner (after)
- /// CHECK: {{i\d+}} InvokeVirtual
+ /// CHECK-START: int Main.staticGetInt(Second) inliner (before)
+ /// CHECK: {{i\d+}} InvokeStaticOrDirect
- /// CHECK-START: int Main.getStaticInt(Second) inliner (after)
- /// CHECK-NOT: InstanceFieldGet
- /// CHECK-NOT: StaticFieldGet
+ /// CHECK-START: int Main.staticGetInt(Second) inliner (after)
+ /// CHECK: {{i\d+}} InvokeStaticOrDirect
- public static int getStaticInt(Second s) {
- return s.getStaticIntField();
- }
+ /// CHECK-START: int Main.staticGetInt(Second) inliner (after)
+ /// CHECK-NOT: InstanceFieldGet
- /// CHECK-START: long Main.setLong(Second, long) inliner (before)
- /// CHECK: InvokeVirtual
+ public static int staticGetInt(Second s) {
+ return Second.staticGetInstanceIntField(s);
+ }
- /// CHECK-START: long Main.setLong(Second, long) inliner (after)
- /// CHECK: InstanceFieldSet
+ /// CHECK-START: double Main.getDoubleFromParam(Second) inliner (before)
+ /// CHECK: {{d\d+}} InvokeVirtual
- /// CHECK-START: long Main.setLong(Second, long) inliner (after)
- /// CHECK-NOT: InvokeVirtual
+ /// CHECK-START: double Main.getDoubleFromParam(Second) inliner (after)
+ /// CHECK: {{d\d+}} InvokeVirtual
- public static long setLong(Second s, long value) {
- s.setInstanceLongField(-1, value);
- return s.instanceLongField;
- }
+ /// CHECK-START: double Main.getDoubleFromParam(Second) inliner (after)
+ /// CHECK-NOT: InstanceFieldGet
- /// CHECK-START: long Main.setLongReturnArg2(Second, long, int) inliner (before)
- /// CHECK: InvokeVirtual
-
- /// CHECK-START: long Main.setLongReturnArg2(Second, long, int) inliner (after)
- /// CHECK-DAG: <<Second:l\d+>> ParameterValue
- /// CHECK-DAG: <<Value:j\d+>> ParameterValue
- /// CHECK-DAG: <<Arg2:i\d+>> ParameterValue
- /// CHECK-DAG: <<NullCk:l\d+>> NullCheck [<<Second>>]
- /// CHECK-DAG: InstanceFieldSet [<<NullCk>>,<<Value>>]
- /// CHECK-DAG: <<NullCk2:l\d+>> NullCheck [<<Second>>]
- /// CHECK-DAG: <<IGet:j\d+>> InstanceFieldGet [<<NullCk2>>]
- /// CHECK-DAG: <<Conv:j\d+>> TypeConversion [<<Arg2>>]
- /// CHECK-DAG: <<Add:j\d+>> Add [<<IGet>>,<<Conv>>]
- /// CHECK-DAG: Return [<<Add>>]
-
- /// CHECK-START: long Main.setLongReturnArg2(Second, long, int) inliner (after)
- /// CHECK-NOT: InvokeVirtual
-
- public static long setLongReturnArg2(Second s, long value, int arg2) {
- int result = s.setInstanceLongFieldReturnArg2(value, arg2);
- return s.instanceLongField + result;
- }
+ public static double getDoubleFromParam(Second s) {
+ return s.getInstanceDoubleFieldFromParam(s);
+ }
- /// CHECK-START: long Main.staticSetLong(Second, long) inliner (before)
- /// CHECK: InvokeStaticOrDirect
+ /// CHECK-START: int Main.getStaticInt(Second) inliner (before)
+ /// CHECK: {{i\d+}} InvokeVirtual
- /// CHECK-START: long Main.staticSetLong(Second, long) inliner (after)
- /// CHECK: InvokeStaticOrDirect
+ /// CHECK-START: int Main.getStaticInt(Second) inliner (after)
+ /// CHECK: {{i\d+}} InvokeVirtual
- /// CHECK-START: long Main.staticSetLong(Second, long) inliner (after)
- /// CHECK-NOT: InstanceFieldSet
+ /// CHECK-START: int Main.getStaticInt(Second) inliner (after)
+ /// CHECK-NOT: InstanceFieldGet
+ /// CHECK-NOT: StaticFieldGet
- public static long staticSetLong(Second s, long value) {
- Second.staticSetInstanceLongField(s, value);
- return s.instanceLongField;
- }
+ public static int getStaticInt(Second s) {
+ return s.getStaticIntField();
+ }
- /// CHECK-START: long Main.setLongThroughParam(Second, long) inliner (before)
- /// CHECK: InvokeVirtual
+ /// CHECK-START: long Main.setLong(Second, long) inliner (before)
+ /// CHECK: InvokeVirtual
- /// CHECK-START: long Main.setLongThroughParam(Second, long) inliner (after)
- /// CHECK: InvokeVirtual
+ /// CHECK-START: long Main.setLong(Second, long) inliner (after)
+ /// CHECK: InstanceFieldSet
- /// CHECK-START: long Main.setLongThroughParam(Second, long) inliner (after)
- /// CHECK-NOT: InstanceFieldSet
+ /// CHECK-START: long Main.setLong(Second, long) inliner (after)
+ /// CHECK-NOT: InvokeVirtual
- public static long setLongThroughParam(Second s, long value) {
- s.setInstanceLongFieldThroughParam(s, value);
- return s.instanceLongField;
- }
+ public static long setLong(Second s, long value) {
+ s.setInstanceLongField(-1, value);
+ return s.instanceLongField;
+ }
- /// CHECK-START: float Main.setStaticFloat(Second, float) inliner (before)
- /// CHECK: InvokeVirtual
+ /// CHECK-START: long Main.setLongReturnArg2(Second, long, int) inliner (before)
+ /// CHECK: InvokeVirtual
- /// CHECK-START: float Main.setStaticFloat(Second, float) inliner (after)
- /// CHECK: InvokeVirtual
+ /// CHECK-START: long Main.setLongReturnArg2(Second, long, int) inliner (after)
+ /// CHECK-DAG: <<Second:l\d+>> ParameterValue
+ /// CHECK-DAG: <<Value:j\d+>> ParameterValue
+ /// CHECK-DAG: <<Arg2:i\d+>> ParameterValue
+ /// CHECK-DAG: <<NullCk:l\d+>> NullCheck [<<Second>>]
+ /// CHECK-DAG: InstanceFieldSet [<<NullCk>>,<<Value>>]
+ /// CHECK-DAG: <<NullCk2:l\d+>> NullCheck [<<Second>>]
+ /// CHECK-DAG: <<IGet:j\d+>> InstanceFieldGet [<<NullCk2>>]
+ /// CHECK-DAG: <<Conv:j\d+>> TypeConversion [<<Arg2>>]
+ /// CHECK-DAG: <<Add:j\d+>> Add [<<IGet>>,<<Conv>>]
+ /// CHECK-DAG: Return [<<Add>>]
- /// CHECK-START: float Main.setStaticFloat(Second, float) inliner (after)
- /// CHECK-NOT: InstanceFieldSet
- /// CHECK-NOT: StaticFieldSet
+ /// CHECK-START: long Main.setLongReturnArg2(Second, long, int) inliner (after)
+ /// CHECK-NOT: InvokeVirtual
- public static float setStaticFloat(Second s, float value) {
- s.setStaticFloatField(value);
- return s.staticFloatField;
- }
+ public static long setLongReturnArg2(Second s, long value, int arg2) {
+ int result = s.setInstanceLongFieldReturnArg2(value, arg2);
+ return s.instanceLongField + result;
+ }
- /// CHECK-START: java.lang.Object Main.newObject() inliner (before)
- /// CHECK-DAG: <<Obj:l\d+>> NewInstance
- // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
- /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>{{(,[ij]\d+)?}}] method_name:java.lang.Object.<init>
+ /// CHECK-START: long Main.staticSetLong(Second, long) inliner (before)
+ /// CHECK: InvokeStaticOrDirect
- /// CHECK-START: java.lang.Object Main.newObject() inliner (after)
- /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-START: long Main.staticSetLong(Second, long) inliner (after)
+ /// CHECK: InvokeStaticOrDirect
- public static Object newObject() {
- return new Object();
- }
+ /// CHECK-START: long Main.staticSetLong(Second, long) inliner (after)
+ /// CHECK-NOT: InstanceFieldSet
- public static void main(String[] args) throws Exception {
- Second s = new Second();
-
- // Replaced NOP pattern.
- staticNop();
- nop(s);
- // Replaced "return arg" pattern.
- assertEquals("arbitrary string", staticReturnArg2("arbitrary string"));
- assertEquals(4321L, returnArg1(s, 4321L));
- // Replaced "return const" pattern.
- assertEquals(9, staticReturn9());
- assertEquals(7, return7(s));
- assertEquals(null, staticReturnNull());
- assertEquals(null, returnNull(s));
- // Replaced IGET pattern.
- assertEquals(42, getInt(s));
- assertEquals(-42.0, getDouble(s));
- assertEquals(null, getObject(s));
- assertEquals("dummy", getString(s));
- // Not replaced IGET pattern.
- assertEquals(42, staticGetInt(s));
- assertEquals(-42.0, getDoubleFromParam(s));
- // SGET.
- assertEquals(4242, getStaticInt(s));
- // Replaced IPUT pattern.
- assertEquals(111L, setLong(s, 111L));
- assertEquals(345L, setLongReturnArg2(s, 222L, 123));
- // Not replaced IPUT pattern.
- assertEquals(222L, staticSetLong(s, 222L));
- assertEquals(333L, setLongThroughParam(s, 333L));
- // SPUT.
- assertEquals(-11.5f, setStaticFloat(s, -11.5f));
-
- if (newObject() == null) {
- throw new AssertionError("new Object() cannot be null.");
- }
+ public static long staticSetLong(Second s, long value) {
+ Second.staticSetInstanceLongField(s, value);
+ return s.instanceLongField;
+ }
+
+ /// CHECK-START: long Main.setLongThroughParam(Second, long) inliner (before)
+ /// CHECK: InvokeVirtual
+
+ /// CHECK-START: long Main.setLongThroughParam(Second, long) inliner (after)
+ /// CHECK: InvokeVirtual
+
+ /// CHECK-START: long Main.setLongThroughParam(Second, long) inliner (after)
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static long setLongThroughParam(Second s, long value) {
+ s.setInstanceLongFieldThroughParam(s, value);
+ return s.instanceLongField;
+ }
+
+ /// CHECK-START: float Main.setStaticFloat(Second, float) inliner (before)
+ /// CHECK: InvokeVirtual
+
+ /// CHECK-START: float Main.setStaticFloat(Second, float) inliner (after)
+ /// CHECK: InvokeVirtual
+
+ /// CHECK-START: float Main.setStaticFloat(Second, float) inliner (after)
+ /// CHECK-NOT: InstanceFieldSet
+ /// CHECK-NOT: StaticFieldSet
+
+ public static float setStaticFloat(Second s, float value) {
+ s.setStaticFloatField(value);
+ return s.staticFloatField;
+ }
+
+ /// CHECK-START: java.lang.Object Main.newObject() inliner (before)
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>{{(,[ij]\d+)?}}] method_name:java.lang.Object.<init>
+
+ /// CHECK-START: java.lang.Object Main.newObject() inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+
+ public static Object newObject() {
+ return new Object();
+ }
+
+ /// CHECK-START: double Main.constructBase() inliner (before)
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+ /// CHECK-START: double Main.constructBase() inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static double constructBase() {
+ Base b = new Base();
+ return b.intField + b.doubleField;
+ }
+
+ /// CHECK-START: double Main.constructBase(int) inliner (before)
+ /// CHECK-DAG: <<Value:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+ /// CHECK-START: double Main.constructBase(int) inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+
+ /// CHECK-START: double Main.constructBase(int) inliner (after)
+ /// CHECK-DAG: <<Value:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ /// CHECK-DAG: InstanceFieldSet [<<Obj>>,<<Value>>]
+
+ /// CHECK-START: double Main.constructBase(int) inliner (after)
+ /// CHECK-DAG: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static double constructBase(int intValue) {
+ Base b = new Base(intValue);
+ return b.intField + b.doubleField;
+ }
+
+ /// CHECK-START: double Main.constructBaseWith0() inliner (before)
+ /// CHECK-DAG: <<Value:i\d+>> IntConstant 0
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+ /// CHECK-START: double Main.constructBaseWith0() inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static double constructBaseWith0() {
+ Base b = new Base(0);
+ return b.intField + b.doubleField;
+ }
+
+ /// CHECK-START: java.lang.String Main.constructBase(java.lang.String) inliner (before)
+ /// CHECK-DAG: <<Value:l\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+ /// CHECK-START: java.lang.String Main.constructBase(java.lang.String) inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+
+ /// CHECK-START: java.lang.String Main.constructBase(java.lang.String) inliner (after)
+ /// CHECK-DAG: <<Value:l\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ /// CHECK-DAG: InstanceFieldSet [<<Obj>>,<<Value>>]
+
+ /// CHECK-START: java.lang.String Main.constructBase(java.lang.String) inliner (after)
+ /// CHECK-DAG: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static String constructBase(String stringValue) {
+ Base b = new Base(stringValue);
+ return b.stringField;
+ }
+
+ /// CHECK-START: java.lang.String Main.constructBaseWithNullString() inliner (before)
+ /// CHECK-DAG: <<Null:l\d+>> NullConstant
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<Null>>{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+ /// CHECK-START: java.lang.String Main.constructBaseWithNullString() inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+
+ /// CHECK-START: java.lang.String Main.constructBaseWithNullString() inliner (after)
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static String constructBaseWithNullString() {
+ String stringValue = null;
+ Base b = new Base(stringValue);
+ return b.stringField;
+ }
+
+ /// CHECK-START: double Main.constructBase(double, java.lang.Object) inliner (before)
+ /// CHECK-DAG: <<DValue:d\d+>> ParameterValue
+ /// CHECK-DAG: <<OValue:l\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<DValue>>,<<OValue>>{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+ /// CHECK-START: double Main.constructBase(double, java.lang.Object) inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+
+ /// CHECK-START: double Main.constructBase(double, java.lang.Object) inliner (after)
+ /// CHECK-DAG: <<DValue:d\d+>> ParameterValue
+ /// CHECK-DAG: <<OValue:l\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ /// CHECK-DAG: InstanceFieldSet [<<Obj>>,<<DValue>>]
+ /// CHECK-DAG: InstanceFieldSet [<<Obj>>,<<OValue>>]
+
+ /// CHECK-START: double Main.constructBase(double, java.lang.Object) inliner (after)
+ /// CHECK-DAG: InstanceFieldSet
+ /// CHECK-DAG: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static double constructBase(double doubleValue, Object objectValue) {
+ Base b = new Base(doubleValue, objectValue);
+ return (b.objectField != null) ? b.doubleField : -b.doubleField;
+ }
+
+ /// CHECK-START: double Main.constructBase(int, double, java.lang.Object) inliner (before)
+ /// CHECK-DAG: <<IValue:i\d+>> ParameterValue
+ /// CHECK-DAG: <<DValue:d\d+>> ParameterValue
+ /// CHECK-DAG: <<OValue:l\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<IValue>>,<<DValue>>,<<OValue>>{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+ /// CHECK-START: double Main.constructBase(int, double, java.lang.Object) inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+
+ /// CHECK-START: double Main.constructBase(int, double, java.lang.Object) inliner (after)
+ /// CHECK-DAG: <<IValue:i\d+>> ParameterValue
+ /// CHECK-DAG: <<DValue:d\d+>> ParameterValue
+ /// CHECK-DAG: <<OValue:l\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ /// CHECK-DAG: InstanceFieldSet [<<Obj>>,<<IValue>>]
+ /// CHECK-DAG: InstanceFieldSet [<<Obj>>,<<DValue>>]
+ /// CHECK-DAG: InstanceFieldSet [<<Obj>>,<<OValue>>]
+
+ /// CHECK-START: double Main.constructBase(int, double, java.lang.Object) inliner (after)
+ /// CHECK-DAG: InstanceFieldSet
+ /// CHECK-DAG: InstanceFieldSet
+ /// CHECK-DAG: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static double constructBase(int intValue, double doubleValue, Object objectValue) {
+ Base b = new Base(intValue, doubleValue, objectValue);
+ double tmp = b.intField + b.doubleField;
+ return (b.objectField != null) ? tmp : -tmp;
+ }
+
+ /// CHECK-START: double Main.constructBaseWith0DoubleNull(double) inliner (before)
+ /// CHECK-DAG: <<IValue:i\d+>> IntConstant 0
+ /// CHECK-DAG: <<DValue:d\d+>> ParameterValue
+ /// CHECK-DAG: <<OValue:l\d+>> NullConstant
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<IValue>>,<<DValue>>,<<OValue>>{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+ /// CHECK-START: double Main.constructBaseWith0DoubleNull(double) inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+
+ /// CHECK-START: double Main.constructBaseWith0DoubleNull(double) inliner (after)
+ /// CHECK-DAG: <<DValue:d\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ /// CHECK-DAG: InstanceFieldSet [<<Obj>>,<<DValue>>]
+
+ /// CHECK-START: double Main.constructBaseWith0DoubleNull(double) inliner (after)
+ /// CHECK-DAG: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static double constructBaseWith0DoubleNull(double doubleValue) {
+ Base b = new Base(0, doubleValue, null);
+ double tmp = b.intField + b.doubleField;
+ return (b.objectField != null) ? tmp : -tmp;
+ }
+
+ /// CHECK-START: double Main.constructBase(int, double, java.lang.Object, java.lang.String) inliner (before)
+ /// CHECK-DAG: <<IValue:i\d+>> ParameterValue
+ /// CHECK-DAG: <<DValue:d\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<IValue>>,<<DValue>>,{{l\d+}},{{l\d+}}{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+ /// CHECK-START: double Main.constructBase(int, double, java.lang.Object, java.lang.String) inliner (after)
+ /// CHECK-DAG: <<IValue:i\d+>> ParameterValue
+ /// CHECK-DAG: <<DValue:d\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<IValue>>,<<DValue>>,{{l\d+}},{{l\d+}}{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+ /// CHECK-START: double Main.constructBase(int, double, java.lang.Object, java.lang.String) inliner (after)
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static double constructBase(
+ int intValue, double doubleValue, Object objectValue, String stringValue) {
+ Base b = new Base(intValue, doubleValue, objectValue, stringValue);
+ double tmp = b.intField + b.doubleField;
+ tmp = (b.objectField != null) ? tmp : -tmp;
+ return (b.stringField != null) ? 2.0 * tmp : 0.5 * tmp;
+ }
+
+ /// CHECK-START: double Main.constructBase(double) inliner (before)
+ /// CHECK-DAG: <<Value:d\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+ /// CHECK-START: double Main.constructBase(double) inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+
+ /// CHECK-START: double Main.constructBase(double) inliner (after)
+ /// CHECK-DAG: <<Value:d\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ /// CHECK-DAG: InstanceFieldSet [<<Obj>>,<<Value>>]
+
+ /// CHECK-START: double Main.constructBase(double) inliner (after)
+ /// CHECK-DAG: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static double constructBase(double doubleValue) {
+ Base b = new Base(doubleValue);
+ return b.intField + b.doubleField;
+ }
+
+ /// CHECK-START: double Main.constructBaseWith0d() inliner (before)
+ /// CHECK-DAG: <<Value:d\d+>> DoubleConstant
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+ /// CHECK-START: double Main.constructBaseWith0d() inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static double constructBaseWith0d() {
+ Base b = new Base(0.0);
+ return b.intField + b.doubleField;
+ }
+
+ /// CHECK-START: double Main.constructBase(java.lang.Object) inliner (before)
+ /// CHECK-DAG: <<OValue:l\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<OValue>>{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+ /// CHECK-START: double Main.constructBase(java.lang.Object) inliner (after)
+ /// CHECK-DAG: <<OValue:l\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<OValue>>{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+ /// CHECK-START: double Main.constructBase(java.lang.Object) inliner (after)
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static double constructBase(Object objectValue) {
+ Base b = new Base(objectValue);
+ double tmp = b.intField + b.doubleField;
+ return (b.objectField != null) ? tmp + 1.0 : tmp - 1.0;
+ }
+
+ /// CHECK-START: double Main.constructBase(int, long) inliner (before)
+ /// CHECK-DAG: <<IValue:i\d+>> ParameterValue
+ /// CHECK-DAG: <<JValue:j\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<IValue>>,<<JValue>>{{(,[ij]\d+)?}}] method_name:Base.<init>
+
+ /// CHECK-START: double Main.constructBase(int, long) inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+
+ /// CHECK-START: double Main.constructBase(int, long) inliner (after)
+ /// CHECK-DAG: <<IValue:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ /// CHECK-DAG: InstanceFieldSet [<<Obj>>,<<IValue>>]
+
+ /// CHECK-START: double Main.constructBase(int, long) inliner (after)
+ /// CHECK-DAG: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static double constructBase(int intValue, long dummy) {
+ Base b = new Base(intValue, dummy);
+ return b.intField + b.doubleField;
+ }
+
+ /// CHECK-START: double Main.constructDerived() inliner (before)
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>{{(,[ij]\d+)?}}] method_name:Derived.<init>
+
+ /// CHECK-START: double Main.constructDerived() inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static double constructDerived() {
+ Derived d = new Derived();
+ return d.intField + d.doubleField;
+ }
+
+ /// CHECK-START: double Main.constructDerived(int) inliner (before)
+ /// CHECK-DAG: <<Value:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:Derived.<init>
+
+ /// CHECK-START: double Main.constructDerived(int) inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+
+ /// CHECK-START: double Main.constructDerived(int) inliner (after)
+ /// CHECK-DAG: <<Value:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ /// CHECK-DAG: InstanceFieldSet [<<Obj>>,<<Value>>]
+
+ /// CHECK-START: double Main.constructDerived(int) inliner (after)
+ /// CHECK-DAG: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static double constructDerived(int intValue) {
+ Derived d = new Derived(intValue);
+ return d.intField + d.doubleField;
+ }
+
+ /// CHECK-START: double Main.constructDerivedWith0() inliner (before)
+ /// CHECK-DAG: <<Value:i\d+>> IntConstant 0
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:Derived.<init>
+
+ /// CHECK-START: double Main.constructDerivedWith0() inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static double constructDerivedWith0() {
+ Derived d = new Derived(0);
+ return d.intField + d.doubleField;
+ }
+
+ /// CHECK-START: java.lang.String Main.constructDerived(java.lang.String) inliner (before)
+ /// CHECK-DAG: <<Value:l\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:Derived.<init>
+
+ /// CHECK-START: java.lang.String Main.constructDerived(java.lang.String) inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+
+ /// CHECK-START: java.lang.String Main.constructDerived(java.lang.String) inliner (after)
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static String constructDerived(String stringValue) {
+ Derived d = new Derived(stringValue);
+ return d.stringField;
+ }
+
+ /// CHECK-START: double Main.constructDerived(double) inliner (before)
+ /// CHECK-DAG: <<Value:d\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:Derived.<init>
+
+ /// CHECK-START: double Main.constructDerived(double) inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+
+ /// CHECK-START: double Main.constructDerived(double) inliner (after)
+ /// CHECK-DAG: <<Value:d\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ /// CHECK-DAG: InstanceFieldSet [<<Obj>>,<<Value>>]
+
+ /// CHECK-START: double Main.constructDerived(double) inliner (after)
+ /// CHECK-DAG: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static double constructDerived(double doubleValue) {
+ Derived d = new Derived(doubleValue);
+ return d.intField + d.doubleField;
+ }
+
+ /// CHECK-START: double Main.constructDerivedWith0d() inliner (before)
+ /// CHECK-DAG: <<Value:d\d+>> DoubleConstant
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:Derived.<init>
+
+ /// CHECK-START: double Main.constructDerivedWith0d() inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static double constructDerivedWith0d() {
+ Derived d = new Derived(0.0);
+ return d.intField + d.doubleField;
+ }
+
+ /// CHECK-START: double Main.constructDerived(int, double, java.lang.Object) inliner (before)
+ /// CHECK-DAG: <<IValue:i\d+>> ParameterValue
+ /// CHECK-DAG: <<DValue:d\d+>> ParameterValue
+ /// CHECK-DAG: <<OValue:l\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<IValue>>,<<DValue>>,<<OValue>>{{(,[ij]\d+)?}}] method_name:Derived.<init>
+
+ /// CHECK-START: double Main.constructDerived(int, double, java.lang.Object) inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+
+ /// CHECK-START: double Main.constructDerived(int, double, java.lang.Object) inliner (after)
+ /// CHECK-DAG: <<DValue:d\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ /// CHECK-DAG: InstanceFieldSet [<<Obj>>,<<DValue>>]
+
+ /// CHECK-START: double Main.constructDerived(int, double, java.lang.Object) inliner (after)
+ /// CHECK-DAG: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static double constructDerived(int intValue, double doubleValue, Object objectValue) {
+ Derived d = new Derived(intValue, doubleValue, objectValue);
+ double tmp = d.intField + d.doubleField;
+ return (d.objectField != null) ? tmp : -tmp;
+ }
+
+ /// CHECK-START: double Main.constructDerived(int, double, java.lang.Object, java.lang.String) inliner (before)
+ /// CHECK-DAG: <<IValue:i\d+>> ParameterValue
+ /// CHECK-DAG: <<DValue:d\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<IValue>>,<<DValue>>,{{l\d+}},{{l\d+}}{{(,[ij]\d+)?}}] method_name:Derived.<init>
+
+ /// CHECK-START: double Main.constructDerived(int, double, java.lang.Object, java.lang.String) inliner (after)
+ /// CHECK-DAG: <<IValue:i\d+>> ParameterValue
+ /// CHECK-DAG: <<DValue:d\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<IValue>>,<<DValue>>,{{l\d+}},{{l\d+}}{{(,[ij]\d+)?}}] method_name:Derived.<init>
+
+ /// CHECK-START: double Main.constructDerived(int, double, java.lang.Object, java.lang.String) inliner (after)
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static double constructDerived(
+ int intValue, double doubleValue, Object objectValue, String stringValue) {
+ Derived d = new Derived(intValue, doubleValue, objectValue, stringValue);
+ double tmp = d.intField + d.doubleField;
+ tmp = (d.objectField != null) ? tmp : -tmp;
+ return (d.stringField != null) ? 2.0 * tmp : 0.5 * tmp;
+ }
+
+ /// CHECK-START: double Main.constructDerived(float) inliner (before)
+ /// CHECK-DAG: <<Value:f\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:Derived.<init>
+
+ /// CHECK-START: double Main.constructDerived(float) inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+
+ /// CHECK-START: double Main.constructDerived(float) inliner (after)
+ /// CHECK-DAG: <<Value:f\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ /// CHECK-DAG: InstanceFieldSet [<<Obj>>,<<Value>>]
+
+ /// CHECK-START: double Main.constructDerived(float) inliner (after)
+ /// CHECK-DAG: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static double constructDerived(float floatValue) {
+ Derived d = new Derived(floatValue);
+ return d.intField + d.doubleField + d.floatField;
+ }
+
+ /// CHECK-START: double Main.constructDerived(int, double, java.lang.Object, float) inliner (before)
+ /// CHECK-DAG: <<IValue:i\d+>> ParameterValue
+ /// CHECK-DAG: <<DValue:d\d+>> ParameterValue
+ /// CHECK-DAG: <<OValue:l\d+>> ParameterValue
+ /// CHECK-DAG: <<FValue:f\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<IValue>>,<<DValue>>,<<OValue>>,<<FValue>>{{(,[ij]\d+)?}}] method_name:Derived.<init>
+
+ /// CHECK-START: double Main.constructDerived(int, double, java.lang.Object, float) inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+
+ /// CHECK-START: double Main.constructDerived(int, double, java.lang.Object, float) inliner (after)
+ /// CHECK-DAG: <<IValue:i\d+>> ParameterValue
+ /// CHECK-DAG: <<DValue:d\d+>> ParameterValue
+ /// CHECK-DAG: <<FValue:f\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ /// CHECK-DAG: InstanceFieldSet [<<Obj>>,<<IValue>>]
+ /// CHECK-DAG: InstanceFieldSet [<<Obj>>,<<DValue>>]
+ /// CHECK-DAG: InstanceFieldSet [<<Obj>>,<<FValue>>]
+
+ /// CHECK-START: double Main.constructDerived(int, double, java.lang.Object, float) inliner (after)
+ /// CHECK-DAG: InstanceFieldSet
+ /// CHECK-DAG: InstanceFieldSet
+ /// CHECK-DAG: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static double constructDerived(
+ int intValue, double doubleValue, Object objectValue, float floatValue) {
+ Derived d = new Derived(intValue, doubleValue, objectValue, floatValue);
+ double tmp = d.intField + d.doubleField + d.floatField;
+ return (d.objectField != null) ? tmp : -tmp;
+ }
+
+ /// CHECK-START: int Main.constructBaseWithFinalField() inliner (before)
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>{{(,[ij]\d+)?}}] method_name:BaseWithFinalField.<init>
+
+ /// CHECK-START: int Main.constructBaseWithFinalField() inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static int constructBaseWithFinalField() {
+ BaseWithFinalField b = new BaseWithFinalField();
+ return b.intField;
+ }
+
+ /// CHECK-START: int Main.constructBaseWithFinalField(int) inliner (before)
+ /// CHECK-DAG: <<Value:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:BaseWithFinalField.<init>
+
+ /// CHECK-START: int Main.constructBaseWithFinalField(int) inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+
+ /// CHECK-START: int Main.constructBaseWithFinalField(int) inliner (after)
+ /// CHECK-DAG: <<Value:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ /// CHECK-DAG: InstanceFieldSet [<<Obj>>,<<Value>>]
+ /// CHECK-DAG: MemoryBarrier
+
+ /// CHECK-START: int Main.constructBaseWithFinalField(int) inliner (after)
+ /// CHECK-DAG: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static int constructBaseWithFinalField(int intValue) {
+ BaseWithFinalField b = new BaseWithFinalField(intValue);
+ return b.intField;
+ }
+
+ /// CHECK-START: int Main.constructBaseWithFinalFieldWith0() inliner (before)
+ /// CHECK-DAG: <<Value:i\d+>> IntConstant 0
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:BaseWithFinalField.<init>
+
+ /// CHECK-START: int Main.constructBaseWithFinalFieldWith0() inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static int constructBaseWithFinalFieldWith0() {
+ BaseWithFinalField b = new BaseWithFinalField(0);
+ return b.intField;
+ }
+
+ /// CHECK-START: double Main.constructDerivedWithFinalField() inliner (before)
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>{{(,[ij]\d+)?}}] method_name:DerivedWithFinalField.<init>
+
+ /// CHECK-START: double Main.constructDerivedWithFinalField() inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static double constructDerivedWithFinalField() {
+ DerivedWithFinalField d = new DerivedWithFinalField();
+ return d.intField + d.doubleField;
+ }
+
+ /// CHECK-START: double Main.constructDerivedWithFinalField(int) inliner (before)
+ /// CHECK-DAG: <<Value:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:DerivedWithFinalField.<init>
+
+ /// CHECK-START: double Main.constructDerivedWithFinalField(int) inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+
+ /// CHECK-START: double Main.constructDerivedWithFinalField(int) inliner (after)
+ /// CHECK-DAG: <<Value:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ /// CHECK-DAG: InstanceFieldSet [<<Obj>>,<<Value>>]
+ /// CHECK-DAG: MemoryBarrier
+
+ /// CHECK-START: double Main.constructDerivedWithFinalField(int) inliner (after)
+ /// CHECK-DAG: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static double constructDerivedWithFinalField(int intValue) {
+ DerivedWithFinalField d = new DerivedWithFinalField(intValue);
+ return d.intField + d.doubleField;
+ }
+
+ /// CHECK-START: double Main.constructDerivedWithFinalFieldWith0() inliner (before)
+ /// CHECK-DAG: <<Value:i\d+>> IntConstant 0
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:DerivedWithFinalField.<init>
+
+ /// CHECK-START: double Main.constructDerivedWithFinalFieldWith0() inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static double constructDerivedWithFinalFieldWith0() {
+ DerivedWithFinalField d = new DerivedWithFinalField(0);
+ return d.intField + d.doubleField;
+ }
+
+ /// CHECK-START: double Main.constructDerivedWithFinalField(double) inliner (before)
+ /// CHECK-DAG: <<Value:d\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:DerivedWithFinalField.<init>
+
+ /// CHECK-START: double Main.constructDerivedWithFinalField(double) inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+
+ /// CHECK-START: double Main.constructDerivedWithFinalField(double) inliner (after)
+ /// CHECK-DAG: <<Value:d\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ /// CHECK-DAG: InstanceFieldSet [<<Obj>>,<<Value>>]
+ /// CHECK-DAG: MemoryBarrier
+
+ /// CHECK-START: double Main.constructDerivedWithFinalField(double) inliner (after)
+ /// CHECK-DAG: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static double constructDerivedWithFinalField(double doubleValue) {
+ DerivedWithFinalField d = new DerivedWithFinalField(doubleValue);
+ return d.intField + d.doubleField;
+ }
+
+ /// CHECK-START: double Main.constructDerivedWithFinalFieldWith0d() inliner (before)
+ /// CHECK-DAG: <<Value:d\d+>> DoubleConstant
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:DerivedWithFinalField.<init>
+
+ /// CHECK-START: double Main.constructDerivedWithFinalFieldWith0d() inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static double constructDerivedWithFinalFieldWith0d() {
+ DerivedWithFinalField d = new DerivedWithFinalField(0.0);
+ return d.intField + d.doubleField;
+ }
+
+ /// CHECK-START: double Main.constructDerivedWithFinalField(int, double) inliner (before)
+ /// CHECK-DAG: <<IValue:i\d+>> ParameterValue
+ /// CHECK-DAG: <<DValue:d\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<IValue>>,<<DValue>>{{(,[ij]\d+)?}}] method_name:DerivedWithFinalField.<init>
+
+ /// CHECK-START: double Main.constructDerivedWithFinalField(int, double) inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+
+ /// CHECK-START: double Main.constructDerivedWithFinalField(int, double) inliner (after)
+ /// CHECK-DAG: <<Value:d\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ /// CHECK-DAG: InstanceFieldSet [<<Obj>>,<<Value>>]
+ /// CHECK-DAG: MemoryBarrier
+
+ /// CHECK-START: double Main.constructDerivedWithFinalField(int, double) inliner (after)
+ /// CHECK-DAG: InstanceFieldSet
+ /// CHECK-DAG: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldSet
+
+ /// CHECK-START: double Main.constructDerivedWithFinalField(int, double) inliner (after)
+ /// CHECK-DAG: MemoryBarrier
+ /// CHECK-NOT: MemoryBarrier
+
+ public static double constructDerivedWithFinalField(int intValue, double doubleValue) {
+ DerivedWithFinalField d = new DerivedWithFinalField(intValue, doubleValue);
+ return d.intField + d.doubleField;
+ }
+
+ /// CHECK-START: double Main.constructDerivedWithFinalFieldWith0And0d() inliner (before)
+ /// CHECK-DAG: <<IValue:i\d+>> IntConstant 0
+ /// CHECK-DAG: <<DValue:d\d+>> DoubleConstant
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<IValue>>,<<DValue>>{{(,[ij]\d+)?}}] method_name:DerivedWithFinalField.<init>
+
+ /// CHECK-START: double Main.constructDerivedWithFinalFieldWith0And0d() inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static double constructDerivedWithFinalFieldWith0And0d() {
+ DerivedWithFinalField d = new DerivedWithFinalField(0, 0.0);
+ return d.intField + d.doubleField;
+ }
+
+ /// CHECK-START: int Main.constructDerivedInSecondDex() inliner (before)
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>{{(,[ij]\d+)?}}] method_name:DerivedInSecondDex.<init>
+
+ /// CHECK-START: int Main.constructDerivedInSecondDex() inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static int constructDerivedInSecondDex() {
+ DerivedInSecondDex d = new DerivedInSecondDex();
+ return d.intField;
+ }
+
+ /// CHECK-START: int Main.constructDerivedInSecondDex(int) inliner (before)
+ /// CHECK-DAG: <<Value:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:DerivedInSecondDex.<init>
+
+ /// CHECK-START: int Main.constructDerivedInSecondDex(int) inliner (after)
+ /// CHECK-DAG: <<Value:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:DerivedInSecondDex.<init>
+
+ /// CHECK-START: int Main.constructDerivedInSecondDex(int) inliner (after)
+ /// CHECK-NOT: MemoryBarrier
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static int constructDerivedInSecondDex(int intValue) {
+ DerivedInSecondDex d = new DerivedInSecondDex(intValue);
+ return d.intField;
+ }
+
+ /// CHECK-START: int Main.constructDerivedInSecondDexWith0() inliner (before)
+ /// CHECK-DAG: <<Value:i\d+>> IntConstant 0
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:DerivedInSecondDex.<init>
+
+ /// CHECK-START: int Main.constructDerivedInSecondDexWith0() inliner (after)
+ /// CHECK-DAG: <<Value:i\d+>> IntConstant 0
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:DerivedInSecondDex.<init>
+
+ /// CHECK-START: int Main.constructDerivedInSecondDexWith0() inliner (after)
+ /// CHECK-NOT: MemoryBarrier
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static int constructDerivedInSecondDexWith0() {
+ DerivedInSecondDex d = new DerivedInSecondDex(0);
+ return d.intField;
+ }
+
+ /// CHECK-START: int Main.constructDerivedInSecondDex(long) inliner (before)
+ /// CHECK-DAG: <<Value:j\d+>> ParameterValue
+ /// CHECK-DAG: <<Obj:l\d+>> NewInstance
+ // Note: The ArtMethod* (typed as int or long) is optional after sharpening.
+ /// CHECK-DAG: InvokeStaticOrDirect [<<Obj>>,<<Value>>{{(,[ij]\d+)?}}] method_name:DerivedInSecondDex.<init>
+
+ /// CHECK-START: int Main.constructDerivedInSecondDex(long) inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: MemoryBarrier
+ /// CHECK-NOT: InstanceFieldSet
+
+ public static int constructDerivedInSecondDex(long dummy) {
+ DerivedInSecondDex d = new DerivedInSecondDex(dummy);
+ return d.intField;
+ }
+
+ public static void main(String[] args) throws Exception {
+ Second s = new Second();
+
+ // Replaced NOP pattern.
+ staticNop();
+ nop(s);
+ // Replaced "return arg" pattern.
+ assertEquals("arbitrary string", staticReturnArg2("arbitrary string"));
+ assertEquals(4321L, returnArg1(s, 4321L));
+ // Replaced "return const" pattern.
+ assertEquals(9, staticReturn9());
+ assertEquals(7, return7(s));
+ assertEquals(null, staticReturnNull());
+ assertEquals(null, returnNull(s));
+ // Replaced IGET pattern.
+ assertEquals(42, getInt(s));
+ assertEquals(-42.0, getDouble(s));
+ assertEquals(null, getObject(s));
+ assertEquals("dummy", getString(s));
+ // Not replaced IGET pattern.
+ assertEquals(42, staticGetInt(s));
+ assertEquals(-42.0, getDoubleFromParam(s));
+ // SGET.
+ assertEquals(4242, getStaticInt(s));
+ // Replaced IPUT pattern.
+ assertEquals(111L, setLong(s, 111L));
+ assertEquals(345L, setLongReturnArg2(s, 222L, 123));
+ // Not replaced IPUT pattern.
+ assertEquals(222L, staticSetLong(s, 222L));
+ assertEquals(333L, setLongThroughParam(s, 333L));
+ // SPUT.
+ assertEquals(-11.5f, setStaticFloat(s, -11.5f));
+
+ if (newObject() == null) {
+ throw new AssertionError("new Object() cannot be null.");
}
- private static void assertEquals(int expected, int actual) {
- if (expected != actual) {
- throw new AssertionError("Wrong result: " + expected + " != " + actual);
- }
+ assertEquals(0.0, constructBase());
+ assertEquals(42.0, constructBase(42));
+ assertEquals(0.0, constructBaseWith0());
+ assertEquals("something", constructBase("something"));
+ assertEquals(null, constructBaseWithNullString());
+ assertEquals(11.0, constructBase(11.0, new Object()));
+ assertEquals(-12.0, constructBase(12.0, null));
+ assertEquals(30.0, constructBase(17, 13.0, new Object()));
+ assertEquals(-34.0, constructBase(19, 15.0, null));
+ assertEquals(-22.5, constructBaseWith0DoubleNull(22.5));
+ assertEquals(-8.0, constructBase(2, 14.0, null, null));
+ assertEquals(-64.0, constructBase(4, 28.0, null, "dummy"));
+ assertEquals(13.0, constructBase(24, 2.0, new Object(), null));
+ assertEquals(30.0, constructBase(11, 4.0, new Object(), "dummy"));
+ assertEquals(43.0, constructBase(43.0));
+ assertEquals(0.0, constructBaseWith0d());
+ assertEquals(1.0, constructBase(new Object()));
+ assertEquals(-1.0, constructBase((Object) null));
+ assertEquals(123.0, constructBase(123, 65L));
+
+ assertEquals(0.0, constructDerived());
+ assertEquals(73.0, constructDerived(73));
+ assertEquals(0.0, constructDerivedWith0());
+ assertEquals(null, constructDerived("something else"));
+ assertEquals(18.0, constructDerived(18.0));
+ assertEquals(0.0, constructDerivedWith0d());
+ assertEquals(-7.0, constructDerived(5, 7.0, new Object()));
+ assertEquals(-4.0, constructDerived(9, 4.0, null));
+ assertEquals(0.0, constructDerived(1, 9.0, null, null));
+ assertEquals(0.0, constructDerived(2, 8.0, null, "dummy"));
+ assertEquals(0.0, constructDerived(3, 7.0, new Object(), null));
+ assertEquals(0.0, constructDerived(4, 6.0, new Object(), "dummy"));
+ assertEquals(17.0, constructDerived(17.0f));
+ assertEquals(-5.5, constructDerived(6, -7.0, new Object(), 6.5f));
+
+ assertEquals(0, constructBaseWithFinalField());
+ assertEquals(77, constructBaseWithFinalField(77));
+ assertEquals(0, constructBaseWithFinalFieldWith0());
+ assertEquals(0.0, constructDerivedWithFinalField());
+ assertEquals(-33.0, constructDerivedWithFinalField(-33));
+ assertEquals(0.0, constructDerivedWithFinalFieldWith0());
+ assertEquals(-44.0, constructDerivedWithFinalField(-44.0));
+ assertEquals(0.0, constructDerivedWithFinalFieldWith0d());
+ assertEquals(88, constructDerivedWithFinalField(22, 66.0));
+ assertEquals(0.0, constructDerivedWithFinalFieldWith0And0d());
+
+ assertEquals(0, constructDerivedInSecondDex());
+ assertEquals(123, constructDerivedInSecondDex(123));
+ assertEquals(0, constructDerivedInSecondDexWith0());
+ assertEquals(0, constructDerivedInSecondDex(7L));
+ }
+
+ private static void assertEquals(int expected, int actual) {
+ if (expected != actual) {
+ throw new AssertionError("Wrong result: " + expected + " != " + actual);
}
+ }
- private static void assertEquals(double expected, double actual) {
- if (expected != actual) {
- throw new AssertionError("Wrong result: " + expected + " != " + actual);
- }
+ private static void assertEquals(double expected, double actual) {
+ if (expected != actual) {
+ throw new AssertionError("Wrong result: " + expected + " != " + actual);
}
+ }
- private static void assertEquals(Object expected, Object actual) {
- if (expected != actual && (expected == null || !expected.equals(actual))) {
- throw new AssertionError("Wrong result: " + expected + " != " + actual);
- }
+ private static void assertEquals(Object expected, Object actual) {
+ if (expected != actual && (expected == null || !expected.equals(actual))) {
+ throw new AssertionError("Wrong result: " + expected + " != " + actual);
}
+ }
}
diff --git a/test/570-checker-osr/expected.txt b/test/570-checker-osr/expected.txt
index 555c6a91d8..25fb2200eb 100644
--- a/test/570-checker-osr/expected.txt
+++ b/test/570-checker-osr/expected.txt
@@ -1,5 +1,5 @@
JNI_OnLoad called
-100000000
-200000000
-300000000
-400000000
+100000
+200000
+300000
+400000
diff --git a/test/570-checker-osr/osr.cc b/test/570-checker-osr/osr.cc
index fb846872e6..4c58b39319 100644
--- a/test/570-checker-osr/osr.cc
+++ b/test/570-checker-osr/osr.cc
@@ -17,6 +17,7 @@
#include "art_method.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
+#include "jit/profiling_info.h"
#include "oat_quick_method_header.h"
#include "scoped_thread_state_change.h"
#include "stack_map.h"
@@ -28,7 +29,8 @@ class OsrVisitor : public StackVisitor {
explicit OsrVisitor(Thread* thread)
SHARED_REQUIRES(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- in_osr_method_(false) {}
+ in_osr_method_(false),
+ in_interpreter_(false) {}
bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
@@ -38,11 +40,14 @@ class OsrVisitor : public StackVisitor {
(m_name.compare("$noinline$returnFloat") == 0) ||
(m_name.compare("$noinline$returnDouble") == 0) ||
(m_name.compare("$noinline$returnLong") == 0) ||
- (m_name.compare("$noinline$deopt") == 0)) {
+ (m_name.compare("$noinline$deopt") == 0) ||
+ (m_name.compare("$noinline$inlineCache") == 0)) {
const OatQuickMethodHeader* header =
Runtime::Current()->GetJit()->GetCodeCache()->LookupOsrMethodHeader(m);
if (header != nullptr && header == GetCurrentOatQuickMethodHeader()) {
in_osr_method_ = true;
+ } else if (IsCurrentFrameInInterpreter()) {
+ in_interpreter_ = true;
}
return false;
}
@@ -50,6 +55,7 @@ class OsrVisitor : public StackVisitor {
}
bool in_osr_method_;
+ bool in_interpreter_;
};
extern "C" JNIEXPORT jboolean JNICALL Java_Main_ensureInOsrCode(JNIEnv*, jclass) {
@@ -64,4 +70,75 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_ensureInOsrCode(JNIEnv*, jclass)
return visitor.in_osr_method_;
}
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_ensureInInterpreter(JNIEnv*, jclass) {
+ if (!Runtime::Current()->UseJit()) {
+ // The return value is irrelevant if we're not using JIT.
+ return false;
+ }
+ ScopedObjectAccess soa(Thread::Current());
+ OsrVisitor visitor(soa.Self());
+ visitor.WalkStack();
+ return visitor.in_interpreter_;
+}
+
+class ProfilingInfoVisitor : public StackVisitor {
+ public:
+ explicit ProfilingInfoVisitor(Thread* thread)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
+
+ bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ArtMethod* m = GetMethod();
+ std::string m_name(m->GetName());
+
+ if (m_name.compare("$noinline$inlineCache") == 0) {
+ ProfilingInfo::Create(Thread::Current(), m, /* retry_allocation */ true);
+ return false;
+ }
+ return true;
+ }
+};
+
+extern "C" JNIEXPORT void JNICALL Java_Main_ensureHasProfilingInfo(JNIEnv*, jclass) {
+ if (!Runtime::Current()->UseJit()) {
+ return;
+ }
+ ScopedObjectAccess soa(Thread::Current());
+ ProfilingInfoVisitor visitor(soa.Self());
+ visitor.WalkStack();
+}
+
+class OsrCheckVisitor : public StackVisitor {
+ public:
+ explicit OsrCheckVisitor(Thread* thread)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
+
+ bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ArtMethod* m = GetMethod();
+ std::string m_name(m->GetName());
+
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (m_name.compare("$noinline$inlineCache") == 0 && jit != nullptr) {
+ while (jit->GetCodeCache()->LookupOsrMethodHeader(m) == nullptr) {
+ // Sleep to yield to the compiler thread.
+ sleep(0);
+ // Will either ensure it's compiled or do the compilation itself.
+ jit->CompileMethod(m, Thread::Current(), /* osr */ true);
+ }
+ return false;
+ }
+ return true;
+ }
+};
+
+extern "C" JNIEXPORT void JNICALL Java_Main_ensureHasOsrCode(JNIEnv*, jclass) {
+ if (!Runtime::Current()->UseJit()) {
+ return;
+ }
+ ScopedObjectAccess soa(Thread::Current());
+ OsrCheckVisitor visitor(soa.Self());
+ visitor.WalkStack();
+}
+
} // namespace art
diff --git a/test/570-checker-osr/src/Main.java b/test/570-checker-osr/src/Main.java
index 7485163314..828908a582 100644
--- a/test/570-checker-osr/src/Main.java
+++ b/test/570-checker-osr/src/Main.java
@@ -16,6 +16,7 @@
public class Main {
public static void main(String[] args) {
+ new SubMain();
System.loadLibrary(args[0]);
if ($noinline$returnInt() != 53) {
throw new Error("Unexpected return value");
@@ -33,12 +34,18 @@ public class Main {
try {
$noinline$deopt();
} catch (Exception e) {}
+ DeoptimizationController.stopDeoptimization();
+
+ $noinline$inlineCache(new Main(), /* isSecondInvocation */ false);
+ if ($noinline$inlineCache(new SubMain(), /* isSecondInvocation */ true) != SubMain.class) {
+ throw new Error("Unexpected return value");
+ }
}
public static int $noinline$returnInt() {
if (doThrow) throw new Error("");
int i = 0;
- for (; i < 100000000; ++i) {
+ for (; i < 100000; ++i) {
}
while (!ensureInOsrCode()) {}
System.out.println(i);
@@ -48,7 +55,7 @@ public class Main {
public static float $noinline$returnFloat() {
if (doThrow) throw new Error("");
int i = 0;
- for (; i < 200000000; ++i) {
+ for (; i < 200000; ++i) {
}
while (!ensureInOsrCode()) {}
System.out.println(i);
@@ -58,7 +65,7 @@ public class Main {
public static double $noinline$returnDouble() {
if (doThrow) throw new Error("");
int i = 0;
- for (; i < 300000000; ++i) {
+ for (; i < 300000; ++i) {
}
while (!ensureInOsrCode()) {}
System.out.println(i);
@@ -67,8 +74,8 @@ public class Main {
public static long $noinline$returnLong() {
if (doThrow) throw new Error("");
- int i = 1000000;
- for (; i < 400000000; ++i) {
+ int i = 0;
+ for (; i < 400000; ++i) {
}
while (!ensureInOsrCode()) {}
System.out.println(i);
@@ -78,15 +85,66 @@ public class Main {
public static void $noinline$deopt() {
if (doThrow) throw new Error("");
int i = 0;
- for (; i < 100000000; ++i) {
+ for (; i < 100000; ++i) {
}
while (!ensureInOsrCode()) {}
DeoptimizationController.startDeoptimization();
}
+ public static Class $noinline$inlineCache(Main m, boolean isSecondInvocation) {
+ // If we are running in non-JIT mode, or were unlucky enough to get this method
+ // already JITted, just return the expected value.
+ if (!ensureInInterpreter()) {
+ return SubMain.class;
+ }
+
+ ensureHasProfilingInfo();
+
+ // Ensure that we have OSR code to jump to.
+ if (isSecondInvocation) {
+ ensureHasOsrCode();
+ }
+
+ // This call will be optimized in the OSR compiled code
+ // to check and deoptimize if m is not of type 'Main'.
+ Main other = m.inlineCache();
+
+ // Jump to OSR compiled code. The second run
+ // of this method will have 'm' as a SubMain, and the compiled
+ // code we are jumping to will have wrongly optimize other as being a
+ // 'Main'.
+ if (isSecondInvocation) {
+ while (!ensureInOsrCode()) {}
+ }
+
+ // We used to wrongly optimize this call and assume 'other' was a 'Main'.
+ return other.returnClass();
+ }
+
+ public Main inlineCache() {
+ return new Main();
+ }
+
+ public Class returnClass() {
+ return Main.class;
+ }
+
public static int[] array = new int[4];
+ public static native boolean ensureInInterpreter();
public static native boolean ensureInOsrCode();
+ public static native void ensureHasProfilingInfo();
+ public static native void ensureHasOsrCode();
public static boolean doThrow = false;
}
+
+class SubMain extends Main {
+ public Class returnClass() {
+ return SubMain.class;
+ }
+
+ public Main inlineCache() {
+ return new SubMain();
+ }
+}
diff --git a/test/570-checker-select/src/Main.java b/test/570-checker-select/src/Main.java
index ec60240e90..8a4cf603af 100644
--- a/test/570-checker-select/src/Main.java
+++ b/test/570-checker-select/src/Main.java
@@ -19,6 +19,11 @@ public class Main {
/// CHECK-START: int Main.BoolCond_IntVarVar(boolean, int, int) register (after)
/// CHECK: Select [{{i\d+}},{{i\d+}},{{z\d+}}]
+ /// CHECK-START-ARM64: int Main.BoolCond_IntVarVar(boolean, int, int) disassembly (after)
+ /// CHECK: Select
+ /// CHECK-NEXT: cmp
+ /// CHECK-NEXT: csel ne
+
/// CHECK-START-X86_64: int Main.BoolCond_IntVarVar(boolean, int, int) disassembly (after)
/// CHECK: <<Cond:z\d+>> ParameterValue
/// CHECK: Select [{{i\d+}},{{i\d+}},<<Cond>>]
@@ -31,6 +36,11 @@ public class Main {
/// CHECK-START: int Main.BoolCond_IntVarCst(boolean, int) register (after)
/// CHECK: Select [{{i\d+}},{{i\d+}},{{z\d+}}]
+ /// CHECK-START-ARM64: int Main.BoolCond_IntVarCst(boolean, int) disassembly (after)
+ /// CHECK: Select
+ /// CHECK-NEXT: cmp
+ /// CHECK-NEXT: csinc ne
+
/// CHECK-START-X86_64: int Main.BoolCond_IntVarCst(boolean, int) disassembly (after)
/// CHECK: <<Cond:z\d+>> ParameterValue
/// CHECK: Select [{{i\d+}},{{i\d+}},<<Cond>>]
@@ -43,6 +53,11 @@ public class Main {
/// CHECK-START: int Main.BoolCond_IntCstVar(boolean, int) register (after)
/// CHECK: Select [{{i\d+}},{{i\d+}},{{z\d+}}]
+ /// CHECK-START-ARM64: int Main.BoolCond_IntCstVar(boolean, int) disassembly (after)
+ /// CHECK: Select
+ /// CHECK-NEXT: cmp
+ /// CHECK-NEXT: csinc eq
+
/// CHECK-START-X86_64: int Main.BoolCond_IntCstVar(boolean, int) disassembly (after)
/// CHECK: <<Cond:z\d+>> ParameterValue
/// CHECK: Select [{{i\d+}},{{i\d+}},<<Cond>>]
@@ -55,6 +70,11 @@ public class Main {
/// CHECK-START: long Main.BoolCond_LongVarVar(boolean, long, long) register (after)
/// CHECK: Select [{{j\d+}},{{j\d+}},{{z\d+}}]
+ /// CHECK-START-ARM64: long Main.BoolCond_LongVarVar(boolean, long, long) disassembly (after)
+ /// CHECK: Select
+ /// CHECK-NEXT: cmp
+ /// CHECK-NEXT: csel ne
+
/// CHECK-START-X86_64: long Main.BoolCond_LongVarVar(boolean, long, long) disassembly (after)
/// CHECK: <<Cond:z\d+>> ParameterValue
/// CHECK: Select [{{j\d+}},{{j\d+}},<<Cond>>]
@@ -67,6 +87,11 @@ public class Main {
/// CHECK-START: long Main.BoolCond_LongVarCst(boolean, long) register (after)
/// CHECK: Select [{{j\d+}},{{j\d+}},{{z\d+}}]
+ /// CHECK-START-ARM64: long Main.BoolCond_LongVarCst(boolean, long) disassembly (after)
+ /// CHECK: Select
+ /// CHECK-NEXT: cmp
+ /// CHECK-NEXT: csinc ne
+
/// CHECK-START-X86_64: long Main.BoolCond_LongVarCst(boolean, long) disassembly (after)
/// CHECK: <<Cond:z\d+>> ParameterValue
/// CHECK: Select [{{j\d+}},{{j\d+}},<<Cond>>]
@@ -79,6 +104,11 @@ public class Main {
/// CHECK-START: long Main.BoolCond_LongCstVar(boolean, long) register (after)
/// CHECK: Select [{{j\d+}},{{j\d+}},{{z\d+}}]
+ /// CHECK-START-ARM64: long Main.BoolCond_LongCstVar(boolean, long) disassembly (after)
+ /// CHECK: Select
+ /// CHECK-NEXT: cmp
+ /// CHECK-NEXT: csinc eq
+
/// CHECK-START-X86_64: long Main.BoolCond_LongCstVar(boolean, long) disassembly (after)
/// CHECK: <<Cond:z\d+>> ParameterValue
/// CHECK: Select [{{j\d+}},{{j\d+}},<<Cond>>]
@@ -91,6 +121,11 @@ public class Main {
/// CHECK-START: float Main.BoolCond_FloatVarVar(boolean, float, float) register (after)
/// CHECK: Select [{{f\d+}},{{f\d+}},{{z\d+}}]
+ /// CHECK-START-ARM64: float Main.BoolCond_FloatVarVar(boolean, float, float) disassembly (after)
+ /// CHECK: Select
+ /// CHECK-NEXT: cmp
+ /// CHECK-NEXT: fcsel ne
+
public static float BoolCond_FloatVarVar(boolean cond, float x, float y) {
return cond ? x : y;
}
@@ -98,6 +133,11 @@ public class Main {
/// CHECK-START: float Main.BoolCond_FloatVarCst(boolean, float) register (after)
/// CHECK: Select [{{f\d+}},{{f\d+}},{{z\d+}}]
+ /// CHECK-START-ARM64: float Main.BoolCond_FloatVarCst(boolean, float) disassembly (after)
+ /// CHECK: Select
+ /// CHECK-NEXT: cmp
+ /// CHECK-NEXT: fcsel ne
+
public static float BoolCond_FloatVarCst(boolean cond, float x) {
return cond ? x : 1.0f;
}
@@ -105,6 +145,11 @@ public class Main {
/// CHECK-START: float Main.BoolCond_FloatCstVar(boolean, float) register (after)
/// CHECK: Select [{{f\d+}},{{f\d+}},{{z\d+}}]
+ /// CHECK-START-ARM64: float Main.BoolCond_FloatCstVar(boolean, float) disassembly (after)
+ /// CHECK: Select
+ /// CHECK-NEXT: cmp
+ /// CHECK-NEXT: fcsel ne
+
public static float BoolCond_FloatCstVar(boolean cond, float y) {
return cond ? 1.0f : y;
}
@@ -113,6 +158,11 @@ public class Main {
/// CHECK: <<Cond:z\d+>> LessThanOrEqual [{{i\d+}},{{i\d+}}]
/// CHECK-NEXT: Select [{{i\d+}},{{i\d+}},<<Cond>>]
+ /// CHECK-START-ARM64: int Main.IntNonmatCond_IntVarVar(int, int, int, int) disassembly (after)
+ /// CHECK: Select
+ /// CHECK-NEXT: cmp
+ /// CHECK-NEXT: csel le
+
/// CHECK-START-X86_64: int Main.IntNonmatCond_IntVarVar(int, int, int, int) disassembly (after)
/// CHECK: <<Cond:z\d+>> LessThanOrEqual [{{i\d+}},{{i\d+}}]
/// CHECK-NEXT: Select [{{i\d+}},{{i\d+}},<<Cond>>]
@@ -127,6 +177,13 @@ public class Main {
/// CHECK-NEXT: <<Sel:i\d+>> Select [{{i\d+}},{{i\d+}},{{z\d+}}]
/// CHECK-NEXT: Add [<<Cond>>,<<Sel>>]
+ /// CHECK-START-ARM64: int Main.IntMatCond_IntVarVar(int, int, int, int) disassembly (after)
+ /// CHECK: LessThanOrEqual
+ /// CHECK-NEXT: cmp
+ /// CHECK-NEXT: cset le
+ /// CHECK: Select
+ /// CHECK-NEXT: csel le
+
/// CHECK-START-X86_64: int Main.IntMatCond_IntVarVar(int, int, int, int) disassembly (after)
/// CHECK: <<Cond:z\d+>> LessThanOrEqual [{{i\d+}},{{i\d+}}]
/// CHECK: Select [{{i\d+}},{{i\d+}},<<Cond>>]
@@ -141,6 +198,11 @@ public class Main {
/// CHECK: <<Cond:z\d+>> LessThanOrEqual [{{i\d+}},{{i\d+}}]
/// CHECK-NEXT: Select [{{j\d+}},{{j\d+}},<<Cond>>]
+ /// CHECK-START-ARM64: long Main.IntNonmatCond_LongVarVar(int, int, long, long) disassembly (after)
+ /// CHECK: Select
+ /// CHECK-NEXT: cmp
+ /// CHECK-NEXT: csel le
+
/// CHECK-START-X86_64: long Main.IntNonmatCond_LongVarVar(int, int, long, long) disassembly (after)
/// CHECK: <<Cond:z\d+>> LessThanOrEqual [{{i\d+}},{{i\d+}}]
/// CHECK-NEXT: Select [{{j\d+}},{{j\d+}},<<Cond>>]
@@ -156,6 +218,13 @@ public class Main {
/// CHECK: <<Sel2:j\d+>> Select [{{j\d+}},{{j\d+}},<<Cond>>]
/// CHECK: Add [<<Sel2>>,<<Sel1>>]
+ /// CHECK-START-ARM64: long Main.IntMatCond_LongVarVar(int, int, long, long) disassembly (after)
+ /// CHECK: LessThanOrEqual
+ /// CHECK-NEXT: cmp
+ /// CHECK-NEXT: cset le
+ /// CHECK: Select
+ /// CHECK-NEXT: csel le
+
/// CHECK-START-X86_64: long Main.IntMatCond_LongVarVar(int, int, long, long) disassembly (after)
/// CHECK: <<Cond:z\d+>> LessThanOrEqual [{{i\d+}},{{i\d+}}]
/// CHECK: Select [{{j\d+}},{{j\d+}},<<Cond>>]
@@ -172,6 +241,11 @@ public class Main {
/// CHECK: <<Cond:z\d+>> LessThanOrEqual [{{j\d+}},{{j\d+}}]
/// CHECK: Select [{{j\d+}},{{j\d+}},<<Cond>>]
+ /// CHECK-START-ARM64: long Main.LongNonmatCond_LongVarVar(long, long, long, long) disassembly (after)
+ /// CHECK: Select
+ /// CHECK-NEXT: cmp
+ /// CHECK-NEXT: csel le
+
/// CHECK-START-X86_64: long Main.LongNonmatCond_LongVarVar(long, long, long, long) disassembly (after)
/// CHECK: <<Cond:z\d+>> LessThanOrEqual [{{j\d+}},{{j\d+}}]
/// CHECK: Select [{{j\d+}},{{j\d+}},<<Cond>>]
@@ -187,6 +261,13 @@ public class Main {
/// CHECK: <<Sel2:j\d+>> Select [{{j\d+}},{{j\d+}},<<Cond>>]
/// CHECK: Add [<<Sel2>>,<<Sel1>>]
+ /// CHECK-START-ARM64: long Main.LongMatCond_LongVarVar(long, long, long, long) disassembly (after)
+ /// CHECK: LessThanOrEqual
+ /// CHECK-NEXT: cmp
+ /// CHECK-NEXT: cset le
+ /// CHECK: Select
+ /// CHECK-NEXT: csel le
+
/// CHECK-START-X86_64: long Main.LongMatCond_LongVarVar(long, long, long, long) disassembly (after)
/// CHECK: <<Cond:z\d+>> LessThanOrEqual [{{j\d+}},{{j\d+}}]
/// CHECK: Select [{{j\d+}},{{j\d+}},<<Cond>>]
@@ -203,6 +284,12 @@ public class Main {
/// CHECK: <<Cond:z\d+>> LessThanOrEqual [{{f\d+}},{{f\d+}}]
/// CHECK-NEXT: Select [{{i\d+}},{{i\d+}},<<Cond>>]
+ /// CHECK-START-ARM64: int Main.FloatLtNonmatCond_IntVarVar(float, float, int, int) disassembly (after)
+ /// CHECK: LessThanOrEqual
+ /// CHECK: Select
+ /// CHECK-NEXT: fcmp
+ /// CHECK-NEXT: csel le
+
public static int FloatLtNonmatCond_IntVarVar(float a, float b, int x, int y) {
return a > b ? x : y;
}
@@ -211,6 +298,12 @@ public class Main {
/// CHECK: <<Cond:z\d+>> GreaterThanOrEqual [{{f\d+}},{{f\d+}}]
/// CHECK-NEXT: Select [{{i\d+}},{{i\d+}},<<Cond>>]
+ /// CHECK-START-ARM64: int Main.FloatGtNonmatCond_IntVarVar(float, float, int, int) disassembly (after)
+ /// CHECK: GreaterThanOrEqual
+ /// CHECK: Select
+ /// CHECK-NEXT: fcmp
+ /// CHECK-NEXT: csel hs
+
public static int FloatGtNonmatCond_IntVarVar(float a, float b, int x, int y) {
return a < b ? x : y;
}
@@ -219,6 +312,12 @@ public class Main {
/// CHECK: <<Cond:z\d+>> GreaterThanOrEqual [{{f\d+}},{{f\d+}}]
/// CHECK-NEXT: Select [{{f\d+}},{{f\d+}},<<Cond>>]
+ /// CHECK-START-ARM64: float Main.FloatGtNonmatCond_FloatVarVar(float, float, float, float) disassembly (after)
+ /// CHECK: GreaterThanOrEqual
+ /// CHECK: Select
+ /// CHECK-NEXT: fcmp
+ /// CHECK-NEXT: fcsel hs
+
public static float FloatGtNonmatCond_FloatVarVar(float a, float b, float x, float y) {
return a < b ? x : y;
}
@@ -228,6 +327,13 @@ public class Main {
/// CHECK-NEXT: <<Sel:i\d+>> Select [{{i\d+}},{{i\d+}},<<Cond>>]
/// CHECK-NEXT: Add [<<Cond>>,<<Sel>>]
+ /// CHECK-START-ARM64: int Main.FloatLtMatCond_IntVarVar(float, float, int, int) disassembly (after)
+ /// CHECK: LessThanOrEqual
+ /// CHECK-NEXT: fcmp
+ /// CHECK-NEXT: cset le
+ /// CHECK: Select
+ /// CHECK-NEXT: csel le
+
public static int FloatLtMatCond_IntVarVar(float a, float b, int x, int y) {
int result = (a > b ? x : y);
return result + (a > b ? 0 : 1);
@@ -238,6 +344,13 @@ public class Main {
/// CHECK-NEXT: <<Sel:i\d+>> Select [{{i\d+}},{{i\d+}},<<Cond>>]
/// CHECK-NEXT: Add [<<Cond>>,<<Sel>>]
+ /// CHECK-START-ARM64: int Main.FloatGtMatCond_IntVarVar(float, float, int, int) disassembly (after)
+ /// CHECK: GreaterThanOrEqual
+ /// CHECK-NEXT: fcmp
+ /// CHECK-NEXT: cset hs
+ /// CHECK: Select
+ /// CHECK-NEXT: csel hs
+
public static int FloatGtMatCond_IntVarVar(float a, float b, int x, int y) {
int result = (a < b ? x : y);
return result + (a < b ? 0 : 1);
@@ -248,6 +361,13 @@ public class Main {
/// CHECK-NEXT: <<Sel:f\d+>> Select [{{f\d+}},{{f\d+}},<<Cond>>]
/// CHECK-NEXT: TypeConversion [<<Cond>>]
+ /// CHECK-START-ARM64: float Main.FloatGtMatCond_FloatVarVar(float, float, float, float) disassembly (after)
+ /// CHECK: GreaterThanOrEqual
+ /// CHECK-NEXT: fcmp
+ /// CHECK-NEXT: cset hs
+ /// CHECK: Select
+ /// CHECK-NEXT: fcsel hs
+
public static float FloatGtMatCond_FloatVarVar(float a, float b, float x, float y) {
float result = (a < b ? x : y);
return result + (a < b ? 0 : 1);
diff --git a/test/572-checker-array-get-regression/expected.txt b/test/572-checker-array-get-regression/expected.txt
new file mode 100644
index 0000000000..f7d1ad4743
--- /dev/null
+++ b/test/572-checker-array-get-regression/expected.txt
@@ -0,0 +1 @@
+524287
diff --git a/test/572-checker-array-get-regression/info.txt b/test/572-checker-array-get-regression/info.txt
new file mode 100644
index 0000000000..d06feee152
--- /dev/null
+++ b/test/572-checker-array-get-regression/info.txt
@@ -0,0 +1,3 @@
+Regression test for the ARM64 Baker's read barrier fast path compiler
+instrumentation of array loads with a large constant index, where we
+used to require too many scratch (temporary) registers.
diff --git a/test/572-checker-array-get-regression/src/Main.java b/test/572-checker-array-get-regression/src/Main.java
new file mode 100644
index 0000000000..b55be706f4
--- /dev/null
+++ b/test/572-checker-array-get-regression/src/Main.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ public static void main(String[] args) {
+ System.out.println(test().intValue());
+ }
+
+ /// CHECK-START: java.lang.Integer Main.test() builder (after)
+ /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+ /// CHECK-DAG: <<Const2P19:i\d+>> IntConstant 524288
+ /// CHECK-DAG: <<ConstM1:i\d+>> IntConstant -1
+ /// CHECK-DAG: <<Array:l\d+>> NewArray [<<Const2P19>>,<<Method>>]
+ /// CHECK-DAG: <<NullCheck1:l\d+>> NullCheck [<<Array>>]
+ /// CHECK-DAG: <<Length1:i\d+>> ArrayLength [<<NullCheck1>>]
+ /// CHECK-DAG: <<Index:i\d+>> Add [<<Length1>>,<<ConstM1>>]
+ /// CHECK-DAG: <<NullCheck2:l\d+>> NullCheck [<<Array>>]
+ /// CHECK-DAG: <<Length2:i\d+>> ArrayLength [<<NullCheck2>>]
+ /// CHECK-DAG: <<BoundsCheck:i\d+>> BoundsCheck [<<Index>>,<<Length2>>]
+ /// CHECK-DAG: <<LastElement:l\d+>> ArrayGet [<<NullCheck2>>,<<BoundsCheck>>]
+ /// CHECK-DAG: Return [<<LastElement>>]
+
+
+ /// CHECK-START: java.lang.Integer Main.test() register (before)
+ /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+ /// CHECK-DAG: <<Const2P19:i\d+>> IntConstant 524288
+ /// CHECK-DAG: <<Const2P19M1:i\d+>> IntConstant 524287
+ /// CHECK-DAG: <<Array:l\d+>> NewArray [<<Const2P19>>,<<Method>>]
+ /// CHECK-DAG: <<LastElement:l\d+>> ArrayGet [<<Array>>,<<Const2P19M1>>]
+ /// CHECK-DAG: Return [<<LastElement>>]
+
+ public static Integer test() {
+ Integer[] integers = new Integer[1 << 19];
+ initIntegerArray(integers);
+ // Array load with a large constant index (after constant folding
+ // and bounds check elimination).
+ Integer last_integer = integers[integers.length - 1];
+ return last_integer;
+ }
+
+ public static void initIntegerArray(Integer[] integers) {
+ for (int i = 0; i < integers.length; ++i) {
+ integers[i] = new Integer(i);
+ }
+ }
+
+}
diff --git a/test/573-checker-checkcast-regression/expected.txt b/test/573-checker-checkcast-regression/expected.txt
new file mode 100644
index 0000000000..b8626c4cff
--- /dev/null
+++ b/test/573-checker-checkcast-regression/expected.txt
@@ -0,0 +1 @@
+4
diff --git a/test/573-checker-checkcast-regression/info.txt b/test/573-checker-checkcast-regression/info.txt
new file mode 100644
index 0000000000..74a6d6ec00
--- /dev/null
+++ b/test/573-checker-checkcast-regression/info.txt
@@ -0,0 +1,4 @@
+Regression test for the x86-64 Baker's read barrier fast path compiler
+instrumentation of CheckCasts, where we used to use an
+art::x86_64::NearLabel, the range of which was sometimes too short
+with Baker's read barriers enabled.
diff --git a/test/573-checker-checkcast-regression/src/Main.java b/test/573-checker-checkcast-regression/src/Main.java
new file mode 100644
index 0000000000..473a2b164e
--- /dev/null
+++ b/test/573-checker-checkcast-regression/src/Main.java
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ public static void main(String[] args) {
+ Object[] array = { new Integer(1), new Integer(2), new Integer(3) };
+ int result = test(array, 0, 2);
+ System.out.println(result);
+ }
+
+ // This test method uses two integers (`index1` and `index2`) to
+ // force the register allocator to use some high registers (R8-R15)
+ // on x86-64 in the code generated for the first CheckCast (which
+ // converts `new_array` to an `Object[]`), so as to produce code
+ // containing a conditional jump whose offset does not fit in a
+ // NearLabel when using Baker's read barrier fast path (because
+ // x86-64 instructions using these high registers have a larger
+ // encoding).
+ //
+ // The intent of this artifical constraint is to ensure the initial
+ // failure is properly tested by this regression test.
+
+ /// CHECK-START: int Main.test(java.lang.Object, int, int) register (after)
+ /// CHECK-DAG: CheckCast check_kind:array_object_check
+ /// CHECK-DAG: CheckCast check_kind:exact_check
+ /// CHECK-DAG: CheckCast check_kind:exact_check
+
+ static public int test(Object new_array, int index1, int index2) {
+ Object[] objectArray = (Object[]) new_array;
+ Integer integer1 = (Integer) objectArray[index1];
+ Integer integer2 = (Integer) objectArray[index2];
+ return integer1.intValue() + integer2.intValue();
+ }
+
+}
diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt
index 73ce3073ce..8808a50f75 100644
--- a/test/800-smali/expected.txt
+++ b/test/800-smali/expected.txt
@@ -58,4 +58,6 @@ b/26594149 (5)
b/26594149 (6)
b/26594149 (7)
b/26594149 (8)
+b/27148248
+b/26965384
Done!
diff --git a/test/800-smali/smali/b_26965384.smali b/test/800-smali/smali/b_26965384.smali
new file mode 100644
index 0000000000..47ed4188bb
--- /dev/null
+++ b/test/800-smali/smali/b_26965384.smali
@@ -0,0 +1,20 @@
+.class public LB26965384;
+.super LB26965384Super;
+
+.method public constructor <init>()V
+ .locals 1
+ const v0, 0
+ iput v0, p0, LB26965384;->a:I
+ invoke-direct {p0}, LB26965384Super;-><init>()V
+ return-void
+.end method
+
+
+# Just by loading this class we should fail. It doesn't really matter what's in
+# this method.
+.method public static run()V
+ .registers 4
+ new-instance v0, LB26965384;
+ invoke-direct {v0}, LB26965384;-><init>()V
+ return-void
+.end method
diff --git a/test/800-smali/smali/b_26965384Super.smali b/test/800-smali/smali/b_26965384Super.smali
new file mode 100644
index 0000000000..32faea790e
--- /dev/null
+++ b/test/800-smali/smali/b_26965384Super.smali
@@ -0,0 +1,10 @@
+.class public LB26965384Super;
+.super Ljava/lang/Object;
+
+.field public a:I
+
+.method public constructor <init>()V
+ .locals 0
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
diff --git a/test/800-smali/smali/b_27148248.smali b/test/800-smali/smali/b_27148248.smali
new file mode 100644
index 0000000000..4601cc6931
--- /dev/null
+++ b/test/800-smali/smali/b_27148248.smali
@@ -0,0 +1,27 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LB27148248;
+
+# Regression for dex2oatd crash during compilation of method which
+# used to throw with argument of non-reference type.
+
+.super Ljava/lang/Object;
+
+.method public static run()V
+ .registers 1
+ const v0, 0xbad
+ throw v0
+.end method
+
diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java
index b0eff5d0bd..4e6de46caa 100644
--- a/test/800-smali/src/Main.java
+++ b/test/800-smali/src/Main.java
@@ -160,6 +160,10 @@ public class Main {
null));
testCases.add(new TestCase("b/26594149 (8)", "B26594149_8", "run", null, new VerifyError(),
null));
+ testCases.add(new TestCase("b/27148248", "B27148248", "run", null, new VerifyError(),
+ null));
+ testCases.add(new TestCase("b/26965384", "B26965384", "run", null, new VerifyError(),
+ null));
}
public void runTests() {
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index f5ee87e8ae..b3560b634b 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -235,6 +235,18 @@ ifdef dist_goal
$(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(ALL_ADDRESS_SIZES))
endif
+# 569-checker-pattern-replacement tests behaviour present only on host.
+TEST_ART_BROKEN_TARGET_TESTS := \
+ 569-checker-pattern-replacement
+
+ifneq (,$(filter target,$(TARGET_TYPES)))
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \
+ $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_TARGET_TESTS), $(ALL_ADDRESS_SIZES))
+endif
+
+TEST_ART_BROKEN_TARGET_TESTS :=
+
# Tests that require python3.
TEST_ART_PYTHON3_DEPENDENCY_RUN_TESTS := \
960-default-smali \
@@ -434,9 +446,7 @@ TEST_ART_BROKEN_INTERPRETER_RUN_TESTS :=
# Known broken tests for the JIT.
# CFI unwinding expects managed frames, and the test does not iterate enough to even compile. JIT
# also uses Generic JNI instead of the JNI compiler.
-# 570 is disabled while investigating osr flakiness.
TEST_ART_BROKEN_JIT_RUN_TESTS := \
- 570-checker-osr \
137-cfi
ifneq (,$(filter jit,$(COMPILER_TYPES)))
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 8245ccfde5..2db1e6c947 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -406,6 +406,9 @@ if [ "$HOST" = "n" ]; then
TMP_DIR_OPTION="-Djava.io.tmpdir=/data/local/tmp"
fi
+# We set DumpNativeStackOnSigQuit to false to avoid stressing libunwind.
+# b/27185632
+# b/24664297
dalvikvm_cmdline="$INVOKE_WITH $GDB $ANDROID_ROOT/bin/$DALVIKVM \
$GDB_ARGS \
$FLAGS \
@@ -420,6 +423,7 @@ dalvikvm_cmdline="$INVOKE_WITH $GDB $ANDROID_ROOT/bin/$DALVIKVM \
$DEBUGGER_OPTS \
$DALVIKVM_BOOT_OPT \
$TMP_DIR_OPTION \
+ -XX:DumpNativeStackOnSigQuit:false \
-cp $DEX_LOCATION/$TEST_NAME.jar$SECONDARY_DEX $MAIN $ARGS"
# Remove whitespace.
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index 9e02ce2f90..2eb52bcad9 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -21,7 +21,7 @@ fi
out_dir=${OUT_DIR-out}
java_libraries_dir=${out_dir}/target/common/obj/JAVA_LIBRARIES
-common_targets="vogar ${java_libraries_dir}/core-tests_intermediates/javalib.jar apache-harmony-jdwp-tests-hostdex ${java_libraries_dir}/jsr166-tests_intermediates/javalib.jar ${out_dir}/host/linux-x86/bin/jack"
+common_targets="vogar core-tests apache-harmony-jdwp-tests-hostdex jsr166-tests ${out_dir}/host/linux-x86/bin/jack"
mode="target"
j_arg="-j$(nproc)"
showcommands=
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index f346239763..45fb4b4dec 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -20,13 +20,13 @@ if [ ! -d libcore ]; then
fi
# Jar containing jsr166 tests.
-jsr166_test_jar=${OUT_DIR-out}/target/common/obj/JAVA_LIBRARIES/jsr166-tests_intermediates/javalib.jar
+jsr166_test_jack=${OUT_DIR-out}/target/common/obj/JAVA_LIBRARIES/jsr166-tests_intermediates/classes.jack
# Jar containing all the other tests.
-test_jar=${OUT_DIR-out}/target/common/obj/JAVA_LIBRARIES/core-tests_intermediates/javalib.jar
+test_jack=${OUT_DIR-out}/target/common/obj/JAVA_LIBRARIES/core-tests_intermediates/classes.jack
-if [ ! -f $test_jar ]; then
+if [ ! -f $test_jack ]; then
echo "Before running, you must build core-tests, jsr166-tests and vogar: \
make core-tests jsr166-tests vogar vogar.jar"
exit 1
@@ -108,7 +108,11 @@ done
# the default timeout.
vogar_args="$vogar_args --timeout 480"
+# Use Jack with "1.8" configuration.
+export JACK_VERSION=`basename prebuilts/sdk/tools/jacks/*ALPHA* | sed 's/^jack-//' | sed 's/.jar$//'`
+vogar_args="$vogar_args --toolchain jack --language JN"
+
# Run the tests using vogar.
echo "Running tests for the following test packages:"
echo ${working_packages[@]} | tr " " "\n"
-vogar $vogar_args --vm-arg -Xusejit:true $expectations --classpath $jsr166_test_jar --classpath $test_jar ${working_packages[@]}
+vogar $vogar_args --vm-arg -Xusejit:true $expectations --classpath $jsr166_test_jack --classpath $test_jack ${working_packages[@]}