diff options
652 files changed, 21829 insertions, 5914 deletions
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk index 02bce411b4..2294ddbd55 100644 --- a/build/Android.common_build.mk +++ b/build/Android.common_build.mk @@ -114,8 +114,7 @@ ART_TARGET_CLANG := $(USE_CLANG_PLATFORM_BUILD) else ART_TARGET_CLANG := false endif -# b/25130937 -ART_TARGET_CLANG_arm := false +ART_TARGET_CLANG_arm := ART_TARGET_CLANG_arm64 := ART_TARGET_CLANG_mips := ART_TARGET_CLANG_mips64 := @@ -335,7 +334,6 @@ art_non_debug_cflags := \ art_debug_cflags := \ $(ART_DEBUG_OPT_FLAG) \ -DDYNAMIC_ANNOTATIONS_ENABLED=1 \ - -DVIXL_DEBUG \ -UNDEBUG art_host_non_debug_cflags := $(art_non_debug_cflags) diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk index b3832ac3b6..33242f1c5d 100644 --- a/build/Android.gtest.mk +++ b/build/Android.gtest.mk @@ -517,7 +517,8 @@ $$(gtest_rule): $$(gtest_exe) $$(gtest_deps) valgrind-$$(gtest_rule): $$(gtest_exe) $$(gtest_deps) $(ART_VALGRIND_DEPENDENCIES) $(hide) $$(call ART_TEST_SKIP,$$@) && \ VALGRIND_LIB=$(HOST_OUT)/lib64/valgrind \ - $(HOST_OUT_EXECUTABLES)/valgrind --leak-check=full --error-exitcode=1 $$< && \ + $(HOST_OUT_EXECUTABLES)/valgrind --leak-check=full --error-exitcode=1 \ + --suppressions=art/test/valgrind-suppressions.txt $$< && \ $$(call ART_TEST_PASSED,$$@) || $$(call ART_TEST_FAILED,$$@) ART_TEST_HOST_VALGRIND_GTEST$$($(2)ART_PHONY_TEST_HOST_SUFFIX)_RULES += valgrind-$$(gtest_rule) @@ -573,7 +574,7 @@ define define-art-gtest ifeq ($$(art_target_or_host),target) $$(eval $$(call set-target-local-clang-vars)) $$(eval $$(call set-target-local-cflags-vars,debug)) - LOCAL_SHARED_LIBRARIES += libdl libicuuc libicui18n libnativehelper libz libcutils libvixld + LOCAL_SHARED_LIBRARIES += libdl libicuuc libicui18n libnativehelper libz libcutils libvixl LOCAL_MODULE_PATH_32 := $$(ART_TARGET_NATIVETEST_OUT)/$$(ART_TARGET_ARCH_32) LOCAL_MODULE_PATH_64 := $$(ART_TARGET_NATIVETEST_OUT)/$$(ART_TARGET_ARCH_64) LOCAL_MULTILIB := both @@ -611,7 +612,7 @@ test-art-target-gtest-$$(art_gtest_name): $$(ART_TEST_TARGET_GTEST_$$(art_gtest_ LOCAL_CLANG := $$(ART_HOST_CLANG) LOCAL_CFLAGS += $$(ART_HOST_CFLAGS) $$(ART_HOST_DEBUG_CFLAGS) LOCAL_ASFLAGS += $$(ART_HOST_ASFLAGS) - LOCAL_SHARED_LIBRARIES += libicuuc-host libicui18n-host libnativehelper libziparchive-host libz-host libvixld + LOCAL_SHARED_LIBRARIES += libicuuc-host libicui18n-host libnativehelper libziparchive-host libz-host libvixl LOCAL_LDLIBS := $(ART_HOST_LDLIBS) -lpthread -ldl LOCAL_IS_HOST_MODULE := true LOCAL_MULTILIB := both diff --git a/compiler/Android.mk b/compiler/Android.mk index 7a257b649f..11ee6dd3a1 100644 --- a/compiler/Android.mk +++ b/compiler/Android.mk @@ -330,9 +330,9 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT # Vixl assembly support for ARM64 targets. ifeq ($$(art_ndebug_or_debug),debug) ifeq ($$(art_static_or_shared), static) - LOCAL_WHOLESTATIC_LIBRARIES += libvixld + LOCAL_WHOLESTATIC_LIBRARIES += libvixl else - LOCAL_SHARED_LIBRARIES += libvixld + LOCAL_SHARED_LIBRARIES += libvixl endif else ifeq ($$(art_static_or_shared), static) diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc index 239bc590e9..6075cd6fbe 100644 --- a/compiler/common_compiler_test.cc +++ b/compiler/common_compiler_test.cc @@ -187,7 +187,9 @@ void CommonCompilerTest::SetUp() { } } -void CommonCompilerTest::CreateCompilerDriver(Compiler::Kind kind, InstructionSet isa) { +void CommonCompilerTest::CreateCompilerDriver(Compiler::Kind kind, + InstructionSet isa, + size_t number_of_threads) { compiler_driver_.reset(new CompilerDriver(compiler_options_.get(), verification_results_.get(), method_inliner_map_.get(), @@ -198,7 +200,7 @@ void CommonCompilerTest::CreateCompilerDriver(Compiler::Kind kind, InstructionSe GetImageClasses(), GetCompiledClasses(), GetCompiledMethods(), - /* thread_count */ 2, + number_of_threads, /* dump_stats */ true, /* dump_passes */ true, timer_.get(), diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h index 7e0fbabff8..7c2c844e6f 100644 --- a/compiler/common_compiler_test.h +++ b/compiler/common_compiler_test.h @@ -93,7 +93,7 @@ class CommonCompilerTest : public CommonRuntimeTest { const char* method_name, const char* signature) SHARED_REQUIRES(Locks::mutator_lock_); - void CreateCompilerDriver(Compiler::Kind kind, InstructionSet isa); + void CreateCompilerDriver(Compiler::Kind kind, InstructionSet isa, size_t number_of_threads = 2U); void ReserveImageSpace(); @@ -122,6 +122,13 @@ class CommonCompilerTest : public CommonRuntimeTest { return; \ } +// TODO: When read barrier works with all tests, get rid of this. +#define TEST_DISABLED_FOR_READ_BARRIER() \ + if (kUseReadBarrier) { \ + printf("WARNING: TEST DISABLED FOR READ BARRIER\n"); \ + return; \ + } + // TODO: When read barrier works with all compilers in use, get rid of this. #define TEST_DISABLED_FOR_READ_BARRIER_WITH_QUICK() \ if (kUseReadBarrier && GetCompilerKind() == Compiler::kQuick) { \ diff --git a/compiler/debug/dwarf/debug_line_opcode_writer.h b/compiler/debug/dwarf/debug_line_opcode_writer.h index 58502a3f9c..b4a4d63f01 100644 --- a/compiler/debug/dwarf/debug_line_opcode_writer.h +++ b/compiler/debug/dwarf/debug_line_opcode_writer.h @@ -36,7 +36,7 @@ class DebugLineOpCodeWriter FINAL : private Writer<Vector> { public: static constexpr int kOpcodeBase = 13; - static constexpr bool kDefaultIsStmt = true; + static constexpr bool kDefaultIsStmt = false; static constexpr int kLineBase = -5; static constexpr int kLineRange = 14; @@ -81,8 +81,11 @@ class DebugLineOpCodeWriter FINAL : private Writer<Vector> { this->PushUleb128(column); } - void NegateStmt() { - this->PushUint8(DW_LNS_negate_stmt); + void SetIsStmt(bool is_stmt) { + if (is_stmt_ != is_stmt) { + this->PushUint8(DW_LNS_negate_stmt); + is_stmt_ = is_stmt; + } } void SetBasicBlock() { @@ -112,6 +115,7 @@ class DebugLineOpCodeWriter FINAL : private Writer<Vector> { current_address_ = 0; current_file_ = 1; current_line_ = 1; + is_stmt_ = kDefaultIsStmt; } // Uncoditionally set address using the long encoding. @@ -227,7 +231,8 @@ class DebugLineOpCodeWriter FINAL : private Writer<Vector> { code_factor_bits_(codeFactorBits), current_address_(0), current_file_(1), - current_line_(1) { + current_line_(1), + is_stmt_(kDefaultIsStmt) { } private: @@ -244,6 +249,7 @@ class DebugLineOpCodeWriter FINAL : private Writer<Vector> { uint64_t current_address_; int current_file_; int current_line_; + bool is_stmt_; std::vector<uintptr_t> patch_locations_; DISALLOW_COPY_AND_ASSIGN(DebugLineOpCodeWriter); diff --git a/compiler/debug/dwarf/dwarf_test.cc b/compiler/debug/dwarf/dwarf_test.cc index e455d0d617..2ba3af5e10 100644 --- a/compiler/debug/dwarf/dwarf_test.cc +++ b/compiler/debug/dwarf/dwarf_test.cc @@ -217,7 +217,9 @@ TEST_F(DwarfTest, DebugLine) { DW_CHECK_NEXT("Advance Line by 2 to 3"); opcodes.SetColumn(4); DW_CHECK_NEXT("Set column to 4"); - opcodes.NegateStmt(); + opcodes.SetIsStmt(true); + DW_CHECK_NEXT("Set is_stmt to 1"); + opcodes.SetIsStmt(false); DW_CHECK_NEXT("Set is_stmt to 0"); opcodes.SetBasicBlock(); DW_CHECK_NEXT("Set basic block"); diff --git a/compiler/debug/dwarf/dwarf_test.h b/compiler/debug/dwarf/dwarf_test.h index 41bfe79c21..e2f0a65ab7 100644 --- a/compiler/debug/dwarf/dwarf_test.h +++ b/compiler/debug/dwarf/dwarf_test.h @@ -62,7 +62,7 @@ class DwarfTest : public CommonRuntimeTest { InstructionSet isa = (sizeof(typename ElfTypes::Addr) == 8) ? kX86_64 : kX86; ScratchFile file; FileOutputStream output_stream(file.GetFile()); - ElfBuilder<ElfTypes> builder(isa, &output_stream); + ElfBuilder<ElfTypes> builder(isa, nullptr, &output_stream); builder.Start(); if (!debug_info_data_.empty()) { builder.WriteSection(".debug_info", &debug_info_data_); diff --git a/compiler/debug/elf_compilation_unit.h b/compiler/debug/elf_compilation_unit.h index f725f45e15..b1d89ebeb2 100644 --- a/compiler/debug/elf_compilation_unit.h +++ b/compiler/debug/elf_compilation_unit.h @@ -27,8 +27,9 @@ namespace debug { struct ElfCompilationUnit { std::vector<const MethodDebugInfo*> methods; size_t debug_line_offset = 0; - uintptr_t low_pc = std::numeric_limits<uintptr_t>::max(); - uintptr_t high_pc = 0; + bool is_code_address_text_relative; // Is the address offset from start of .text section? + uint64_t code_address = std::numeric_limits<uint64_t>::max(); + uint64_t code_end = 0; }; } // namespace debug diff --git a/compiler/debug/elf_debug_frame_writer.h b/compiler/debug/elf_debug_frame_writer.h index f6d9b169c4..f9d33c1c30 100644 --- a/compiler/debug/elf_debug_frame_writer.h +++ b/compiler/debug/elf_debug_frame_writer.h @@ -175,18 +175,6 @@ void WriteCFISection(ElfBuilder<ElfTypes>* builder, CHECK(format == dwarf::DW_DEBUG_FRAME_FORMAT || format == dwarf::DW_EH_FRAME_FORMAT); typedef typename ElfTypes::Addr Elf_Addr; - if (method_infos.empty()) { - return; - } - - std::vector<uint32_t> binary_search_table; - std::vector<uintptr_t> patch_locations; - if (format == dwarf::DW_EH_FRAME_FORMAT) { - binary_search_table.reserve(2 * method_infos.size()); - } else { - patch_locations.reserve(method_infos.size()); - } - // The methods can be written in any order. // Let's therefore sort them in the lexicographical order of the opcodes. // This has no effect on its own. However, if the final .debug_frame section is @@ -194,17 +182,30 @@ void WriteCFISection(ElfBuilder<ElfTypes>* builder, std::vector<const MethodDebugInfo*> sorted_method_infos; sorted_method_infos.reserve(method_infos.size()); for (size_t i = 0; i < method_infos.size(); i++) { - sorted_method_infos.push_back(&method_infos[i]); + if (!method_infos[i].cfi.empty() && !method_infos[i].deduped) { + sorted_method_infos.push_back(&method_infos[i]); + } } - std::sort( + if (sorted_method_infos.empty()) { + return; + } + std::stable_sort( sorted_method_infos.begin(), sorted_method_infos.end(), [](const MethodDebugInfo* lhs, const MethodDebugInfo* rhs) { - ArrayRef<const uint8_t> l = lhs->compiled_method->GetCFIInfo(); - ArrayRef<const uint8_t> r = rhs->compiled_method->GetCFIInfo(); + ArrayRef<const uint8_t> l = lhs->cfi; + ArrayRef<const uint8_t> r = rhs->cfi; return std::lexicographical_compare(l.begin(), l.end(), r.begin(), r.end()); }); + std::vector<uint32_t> binary_search_table; + std::vector<uintptr_t> patch_locations; + if (format == dwarf::DW_EH_FRAME_FORMAT) { + binary_search_table.reserve(2 * sorted_method_infos.size()); + } else { + patch_locations.reserve(sorted_method_infos.size()); + } + // Write .eh_frame/.debug_frame section. auto* cfi_section = (format == dwarf::DW_DEBUG_FRAME_FORMAT ? builder->GetDebugFrame() @@ -212,9 +213,6 @@ void WriteCFISection(ElfBuilder<ElfTypes>* builder, { cfi_section->Start(); const bool is64bit = Is64BitInstructionSet(builder->GetIsa()); - const Elf_Addr text_address = builder->GetText()->Exists() - ? builder->GetText()->GetAddress() - : 0; const Elf_Addr cfi_address = cfi_section->GetAddress(); const Elf_Addr cie_address = cfi_address; Elf_Addr buffer_address = cfi_address; @@ -224,25 +222,21 @@ void WriteCFISection(ElfBuilder<ElfTypes>* builder, buffer_address += buffer.size(); buffer.clear(); for (const MethodDebugInfo* mi : sorted_method_infos) { - if (!mi->deduped) { // Only one FDE per unique address. - ArrayRef<const uint8_t> opcodes = mi->compiled_method->GetCFIInfo(); - if (!opcodes.empty()) { - const Elf_Addr code_address = text_address + mi->low_pc; - if (format == dwarf::DW_EH_FRAME_FORMAT) { - binary_search_table.push_back( - dchecked_integral_cast<uint32_t>(code_address)); - binary_search_table.push_back( - dchecked_integral_cast<uint32_t>(buffer_address)); - } - WriteFDE(is64bit, cfi_address, cie_address, - code_address, mi->high_pc - mi->low_pc, - opcodes, format, buffer_address, &buffer, - &patch_locations); - cfi_section->WriteFully(buffer.data(), buffer.size()); - buffer_address += buffer.size(); - buffer.clear(); - } + DCHECK(!mi->deduped); + DCHECK(!mi->cfi.empty()); + const Elf_Addr code_address = mi->code_address + + (mi->is_code_address_text_relative ? builder->GetText()->GetAddress() : 0); + if (format == dwarf::DW_EH_FRAME_FORMAT) { + binary_search_table.push_back(dchecked_integral_cast<uint32_t>(code_address)); + binary_search_table.push_back(dchecked_integral_cast<uint32_t>(buffer_address)); } + WriteFDE(is64bit, cfi_address, cie_address, + code_address, mi->code_size, + mi->cfi, format, buffer_address, &buffer, + &patch_locations); + cfi_section->WriteFully(buffer.data(), buffer.size()); + buffer_address += buffer.size(); + buffer.clear(); } cfi_section->End(); } diff --git a/compiler/debug/elf_debug_info_writer.h b/compiler/debug/elf_debug_info_writer.h index eed032f88d..a6e6f8b5da 100644 --- a/compiler/debug/elf_debug_info_writer.h +++ b/compiler/debug/elf_debug_info_writer.h @@ -46,6 +46,7 @@ static void LocalInfoCallback(void* ctx, const DexFile::LocalInfo& entry) { static std::vector<const char*> GetParamNames(const MethodDebugInfo* mi) { std::vector<const char*> names; if (mi->code_item != nullptr) { + DCHECK(mi->dex_file != nullptr); const uint8_t* stream = mi->dex_file->GetDebugInfoStream(mi->code_item); if (stream != nullptr) { DecodeUnsignedLeb128(&stream); // line. @@ -117,22 +118,23 @@ class ElfCompilationUnitWriter { void Write(const ElfCompilationUnit& compilation_unit) { CHECK(!compilation_unit.methods.empty()); - const Elf_Addr text_address = owner_->builder_->GetText()->Exists() + const Elf_Addr base_address = compilation_unit.is_code_address_text_relative ? owner_->builder_->GetText()->GetAddress() : 0; - const uintptr_t cu_size = compilation_unit.high_pc - compilation_unit.low_pc; + const uint64_t cu_size = compilation_unit.code_end - compilation_unit.code_address; using namespace dwarf; // NOLINT. For easy access to DWARF constants. info_.StartTag(DW_TAG_compile_unit); info_.WriteString(DW_AT_producer, "Android dex2oat"); info_.WriteData1(DW_AT_language, DW_LANG_Java); info_.WriteString(DW_AT_comp_dir, "$JAVA_SRC_ROOT"); - info_.WriteAddr(DW_AT_low_pc, text_address + compilation_unit.low_pc); + info_.WriteAddr(DW_AT_low_pc, base_address + compilation_unit.code_address); info_.WriteUdata(DW_AT_high_pc, dchecked_integral_cast<uint32_t>(cu_size)); info_.WriteSecOffset(DW_AT_stmt_list, compilation_unit.debug_line_offset); const char* last_dex_class_desc = nullptr; for (auto mi : compilation_unit.methods) { + DCHECK(mi->dex_file != nullptr); const DexFile* dex = mi->dex_file; const DexFile::CodeItem* dex_code = mi->code_item; const DexFile::MethodId& dex_method = dex->GetMethodId(mi->dex_method_index); @@ -165,14 +167,27 @@ class ElfCompilationUnitWriter { int start_depth = info_.Depth(); info_.StartTag(DW_TAG_subprogram); WriteName(dex->GetMethodName(dex_method)); - info_.WriteAddr(DW_AT_low_pc, text_address + mi->low_pc); - info_.WriteUdata(DW_AT_high_pc, dchecked_integral_cast<uint32_t>(mi->high_pc-mi->low_pc)); + info_.WriteAddr(DW_AT_low_pc, base_address + mi->code_address); + info_.WriteUdata(DW_AT_high_pc, mi->code_size); std::vector<uint8_t> expr_buffer; Expression expr(&expr_buffer); expr.WriteOpCallFrameCfa(); info_.WriteExprLoc(DW_AT_frame_base, expr); WriteLazyType(dex->GetReturnTypeDescriptor(dex_proto)); + // Decode dex register locations for all stack maps. + // It might be expensive, so do it just once and reuse the result. + std::vector<DexRegisterMap> dex_reg_maps; + if (mi->code_info != nullptr) { + const CodeInfo code_info(mi->code_info); + StackMapEncoding encoding = code_info.ExtractEncoding(); + for (size_t s = 0; s < code_info.GetNumberOfStackMaps(); ++s) { + const StackMap& stack_map = code_info.GetStackMapAt(s, encoding); + dex_reg_maps.push_back(code_info.GetDexRegisterMapOf( + stack_map, encoding, dex_code->registers_size_)); + } + } + // Write parameters. DecodeDebugLocalInfo returns them as well, but it does not // guarantee order or uniqueness so it is safer to iterate over them manually. // DecodeDebugLocalInfo might not also be available if there is no debug info. @@ -187,7 +202,7 @@ class ElfCompilationUnitWriter { // Write the stack location of the parameter. const uint32_t vreg = dex_code->registers_size_ - dex_code->ins_size_ + arg_reg; const bool is64bitValue = false; - WriteRegLocation(mi, vreg, is64bitValue, compilation_unit.low_pc); + WriteRegLocation(mi, dex_reg_maps, vreg, is64bitValue, compilation_unit.code_address); } arg_reg++; info_.EndTag(); @@ -206,7 +221,7 @@ class ElfCompilationUnitWriter { if (dex_code != nullptr) { // Write the stack location of the parameter. const uint32_t vreg = dex_code->registers_size_ - dex_code->ins_size_ + arg_reg; - WriteRegLocation(mi, vreg, is64bitValue, compilation_unit.low_pc); + WriteRegLocation(mi, dex_reg_maps, vreg, is64bitValue, compilation_unit.code_address); } arg_reg += is64bitValue ? 2 : 1; info_.EndTag(); @@ -229,8 +244,13 @@ class ElfCompilationUnitWriter { WriteName(var.name_); WriteLazyType(var.descriptor_); bool is64bitValue = var.descriptor_[0] == 'D' || var.descriptor_[0] == 'J'; - WriteRegLocation(mi, var.reg_, is64bitValue, compilation_unit.low_pc, - var.start_address_, var.end_address_); + WriteRegLocation(mi, + dex_reg_maps, + var.reg_, + is64bitValue, + compilation_unit.code_address, + var.start_address_, + var.end_address_); info_.EndTag(); } } @@ -424,15 +444,17 @@ class ElfCompilationUnitWriter { // The dex register might be valid only at some points and it might // move between machine registers and stack. void WriteRegLocation(const MethodDebugInfo* method_info, + const std::vector<DexRegisterMap>& dex_register_maps, uint16_t vreg, bool is64bitValue, - uint32_t compilation_unit_low_pc, + uint64_t compilation_unit_code_address, uint32_t dex_pc_low = 0, uint32_t dex_pc_high = 0xFFFFFFFF) { WriteDebugLocEntry(method_info, + dex_register_maps, vreg, is64bitValue, - compilation_unit_low_pc, + compilation_unit_code_address, dex_pc_low, dex_pc_high, owner_->builder_->GetIsa(), diff --git a/compiler/debug/elf_debug_line_writer.h b/compiler/debug/elf_debug_line_writer.h index d3859ca752..66e135f395 100644 --- a/compiler/debug/elf_debug_line_writer.h +++ b/compiler/debug/elf_debug_line_writer.h @@ -17,6 +17,7 @@ #ifndef ART_COMPILER_DEBUG_ELF_DEBUG_LINE_WRITER_H_ #define ART_COMPILER_DEBUG_ELF_DEBUG_LINE_WRITER_H_ +#include <unordered_set> #include <vector> #include "compiled_method.h" @@ -53,7 +54,7 @@ class ElfDebugLineWriter { // Returns the number of bytes written. size_t WriteCompilationUnit(ElfCompilationUnit& compilation_unit) { const bool is64bit = Is64BitInstructionSet(builder_->GetIsa()); - const Elf_Addr text_address = builder_->GetText()->Exists() + const Elf_Addr base_address = compilation_unit.is_code_address_text_relative ? builder_->GetText()->GetAddress() : 0; @@ -81,48 +82,80 @@ class ElfDebugLineWriter { case kX86_64: break; } + std::unordered_set<uint64_t> seen_addresses(compilation_unit.methods.size()); dwarf::DebugLineOpCodeWriter<> opcodes(is64bit, code_factor_bits_); for (const MethodDebugInfo* mi : compilation_unit.methods) { // Ignore function if we have already generated line table for the same address. // It would confuse the debugger and the DWARF specification forbids it. - if (mi->deduped) { + // We allow the line table for method to be replicated in different compilation unit. + // This ensures that each compilation unit contains line table for all its methods. + if (!seen_addresses.insert(mi->code_address).second) { continue; } uint32_t prologue_end = std::numeric_limits<uint32_t>::max(); - ArrayRef<const SrcMapElem> pc2dex_map; - std::vector<SrcMapElem> pc2dex_map_from_stack_maps; - if (mi->IsFromOptimizingCompiler()) { + std::vector<SrcMapElem> pc2dex_map; + if (mi->code_info != nullptr) { // Use stack maps to create mapping table from pc to dex. - const CodeInfo code_info(mi->compiled_method->GetVmapTable().data()); + const CodeInfo code_info(mi->code_info); const StackMapEncoding encoding = code_info.ExtractEncoding(); + pc2dex_map.reserve(code_info.GetNumberOfStackMaps()); for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(); s++) { StackMap stack_map = code_info.GetStackMapAt(s, encoding); DCHECK(stack_map.IsValid()); const uint32_t pc = stack_map.GetNativePcOffset(encoding); const int32_t dex = stack_map.GetDexPc(encoding); - pc2dex_map_from_stack_maps.push_back({pc, dex}); + pc2dex_map.push_back({pc, dex}); if (stack_map.HasDexRegisterMap(encoding)) { // Guess that the first map with local variables is the end of prologue. prologue_end = std::min(prologue_end, pc); } } - std::sort(pc2dex_map_from_stack_maps.begin(), - pc2dex_map_from_stack_maps.end()); - pc2dex_map = ArrayRef<const SrcMapElem>(pc2dex_map_from_stack_maps); - } else { - // Use the mapping table provided by the quick compiler. - pc2dex_map = mi->compiled_method->GetSrcMappingTable(); - prologue_end = 0; + std::sort(pc2dex_map.begin(), pc2dex_map.end()); } if (pc2dex_map.empty()) { continue; } - Elf_Addr method_address = text_address + mi->low_pc; + // Compensate for compiler's off-by-one-instruction error. + // + // The compiler generates stackmap with PC *after* the branch instruction + // (because this is the PC which is easier to obtain when unwinding). + // + // However, the debugger is more clever and it will ask us for line-number + // mapping at the location of the branch instruction (since the following + // instruction could belong to other line, this is the correct thing to do). + // + // So we really want to just decrement the PC by one instruction so that the + // branch instruction is covered as well. However, we do not know the size + // of the previous instruction, and we can not subtract just a fixed amount + // (the debugger would trust us that the PC is valid; it might try to set + // breakpoint there at some point, and setting breakpoint in mid-instruction + // would make the process crash in spectacular way). + // + // Therefore, we say that the PC which the compiler gave us for the stackmap + // is the end of its associated address range, and we use the PC from the + // previous stack map as the start of the range. This ensures that the PC is + // valid and that the branch instruction is covered. + // + // This ensures we have correct line number mapping at call sites (which is + // important for backtraces), but there is nothing we can do for non-call + // sites (so stepping through optimized code in debugger is not possible). + // + // We do not adjust the stackmaps if the code was compiled as debuggable. + // In that case, the stackmaps should accurately cover all instructions. + if (!mi->is_native_debuggable) { + for (size_t i = pc2dex_map.size() - 1; i > 0; --i) { + pc2dex_map[i].from_ = pc2dex_map[i - 1].from_; + } + pc2dex_map[0].from_ = 0; + } + + Elf_Addr method_address = base_address + mi->code_address; PositionInfos dex2line_map; + DCHECK(mi->dex_file != nullptr); const DexFile* dex = mi->dex_file; if (!dex->DecodeDebugPositionInfo(mi->code_item, PositionInfoCallback, &dex2line_map)) { continue; @@ -184,6 +217,10 @@ class ElfDebugLineWriter { // Generate mapping opcodes from PC to Java lines. if (file_index != 0) { + // If the method was not compiled as native-debuggable, we still generate all available + // lines, but we try to prevent the debugger from stepping and setting breakpoints since + // the information is too inaccurate for that (breakpoints would be set after the calls). + const bool default_is_stmt = mi->is_native_debuggable; bool first = true; for (SrcMapElem pc2dex : pc2dex_map) { uint32_t pc = pc2dex.from_; @@ -205,13 +242,14 @@ class ElfDebugLineWriter { // Assume that any preceding code is prologue. int first_line = dex2line_map.front().line_; // Prologue is not a sensible place for a breakpoint. - opcodes.NegateStmt(); + opcodes.SetIsStmt(false); opcodes.AddRow(method_address, first_line); - opcodes.NegateStmt(); opcodes.SetPrologueEnd(); } + opcodes.SetIsStmt(default_is_stmt); opcodes.AddRow(method_address + pc, line); } else if (line != opcodes.CurrentLine()) { + opcodes.SetIsStmt(default_is_stmt); opcodes.AddRow(method_address + pc, line); } } @@ -221,7 +259,7 @@ class ElfDebugLineWriter { opcodes.AddRow(method_address, 0); } - opcodes.AdvancePC(text_address + mi->high_pc); + opcodes.AdvancePC(method_address + mi->code_size); opcodes.EndSequence(); } std::vector<uint8_t> buffer; diff --git a/compiler/debug/elf_debug_loc_writer.h b/compiler/debug/elf_debug_loc_writer.h index 32f624acd3..2d4fff4d14 100644 --- a/compiler/debug/elf_debug_loc_writer.h +++ b/compiler/debug/elf_debug_loc_writer.h @@ -74,8 +74,8 @@ static Reg GetDwarfFpReg(InstructionSet isa, int machine_reg) { } struct VariableLocation { - uint32_t low_pc; - uint32_t high_pc; + uint32_t low_pc; // Relative to compilation unit. + uint32_t high_pc; // Relative to compilation unit. DexRegisterLocation reg_lo; // May be None if the location is unknown. DexRegisterLocation reg_hi; // Most significant bits of 64-bit value. }; @@ -85,33 +85,41 @@ struct VariableLocation { // The result will cover all ranges where the variable is in scope. // PCs corresponding to stackmap with dex register map are accurate, // all other PCs are best-effort only. -std::vector<VariableLocation> GetVariableLocations(const MethodDebugInfo* method_info, - uint16_t vreg, - bool is64bitValue, - uint32_t dex_pc_low, - uint32_t dex_pc_high) { +std::vector<VariableLocation> GetVariableLocations( + const MethodDebugInfo* method_info, + const std::vector<DexRegisterMap>& dex_register_maps, + uint16_t vreg, + bool is64bitValue, + uint64_t compilation_unit_code_address, + uint32_t dex_pc_low, + uint32_t dex_pc_high) { std::vector<VariableLocation> variable_locations; // Get stack maps sorted by pc (they might not be sorted internally). - const CodeInfo code_info(method_info->compiled_method->GetVmapTable().data()); + const CodeInfo code_info(method_info->code_info); const StackMapEncoding encoding = code_info.ExtractEncoding(); - std::map<uint32_t, StackMap> stack_maps; + std::map<uint32_t, uint32_t> stack_maps; // low_pc -> stack_map_index. for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(); s++) { StackMap stack_map = code_info.GetStackMapAt(s, encoding); DCHECK(stack_map.IsValid()); - const uint32_t low_pc = method_info->low_pc + stack_map.GetNativePcOffset(encoding); - DCHECK_LE(low_pc, method_info->high_pc); - stack_maps.emplace(low_pc, stack_map); + const uint32_t pc_offset = stack_map.GetNativePcOffset(encoding); + DCHECK_LE(pc_offset, method_info->code_size); + DCHECK_LE(compilation_unit_code_address, method_info->code_address); + const uint32_t low_pc = dchecked_integral_cast<uint32_t>( + method_info->code_address + pc_offset - compilation_unit_code_address); + stack_maps.emplace(low_pc, s); } // Create entries for the requested register based on stack map data. for (auto it = stack_maps.begin(); it != stack_maps.end(); it++) { - const StackMap& stack_map = it->second; const uint32_t low_pc = it->first; + const uint32_t stack_map_index = it->second; + const StackMap& stack_map = code_info.GetStackMapAt(stack_map_index, encoding); auto next_it = it; next_it++; - const uint32_t high_pc = next_it != stack_maps.end() ? next_it->first - : method_info->high_pc; + const uint32_t high_pc = next_it != stack_maps.end() + ? next_it->first + : method_info->code_address + method_info->code_size - compilation_unit_code_address; DCHECK_LE(low_pc, high_pc); if (low_pc == high_pc) { continue; // Ignore if the address range is empty. @@ -126,9 +134,9 @@ std::vector<VariableLocation> GetVariableLocations(const MethodDebugInfo* method // Find the location of the dex register. DexRegisterLocation reg_lo = DexRegisterLocation::None(); DexRegisterLocation reg_hi = DexRegisterLocation::None(); - if (stack_map.HasDexRegisterMap(encoding)) { - DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf( - stack_map, encoding, method_info->code_item->registers_size_); + DCHECK_LT(stack_map_index, dex_register_maps.size()); + DexRegisterMap dex_register_map = dex_register_maps[stack_map_index]; + if (dex_register_map.IsValid()) { reg_lo = dex_register_map.GetDexRegisterLocation( vreg, method_info->code_item->registers_size_, code_info, encoding); if (is64bitValue) { @@ -159,9 +167,10 @@ std::vector<VariableLocation> GetVariableLocations(const MethodDebugInfo* method // The dex register might be valid only at some points and it might // move between machine registers and stack. static void WriteDebugLocEntry(const MethodDebugInfo* method_info, + const std::vector<DexRegisterMap>& dex_register_maps, uint16_t vreg, bool is64bitValue, - uint32_t compilation_unit_low_pc, + uint64_t compilation_unit_code_address, uint32_t dex_pc_low, uint32_t dex_pc_high, InstructionSet isa, @@ -169,14 +178,16 @@ static void WriteDebugLocEntry(const MethodDebugInfo* method_info, std::vector<uint8_t>* debug_loc_buffer, std::vector<uint8_t>* debug_ranges_buffer) { using Kind = DexRegisterLocation::Kind; - if (!method_info->IsFromOptimizingCompiler()) { + if (method_info->code_info == nullptr || dex_register_maps.empty()) { return; } std::vector<VariableLocation> variable_locations = GetVariableLocations( method_info, + dex_register_maps, vreg, is64bitValue, + compilation_unit_code_address, dex_pc_low, dex_pc_high); @@ -197,9 +208,8 @@ static void WriteDebugLocEntry(const MethodDebugInfo* method_info, const Kind kind = reg_loc.GetKind(); const int32_t value = reg_loc.GetValue(); if (kind == Kind::kInStack) { - const size_t frame_size = method_info->compiled_method->GetFrameSizeInBytes(); // The stack offset is relative to SP. Make it relative to CFA. - expr.WriteOpFbreg(value - frame_size); + expr.WriteOpFbreg(value - method_info->frame_size_in_bytes); if (piece == 0 && reg_hi.GetKind() == Kind::kInStack && reg_hi.GetValue() == value + 4) { break; // the high word is correctly implied by the low word. @@ -244,11 +254,11 @@ static void WriteDebugLocEntry(const MethodDebugInfo* method_info, if (expr.size() > 0) { if (is64bit) { - debug_loc.PushUint64(variable_location.low_pc - compilation_unit_low_pc); - debug_loc.PushUint64(variable_location.high_pc - compilation_unit_low_pc); + debug_loc.PushUint64(variable_location.low_pc); + debug_loc.PushUint64(variable_location.high_pc); } else { - debug_loc.PushUint32(variable_location.low_pc - compilation_unit_low_pc); - debug_loc.PushUint32(variable_location.high_pc - compilation_unit_low_pc); + debug_loc.PushUint32(variable_location.low_pc); + debug_loc.PushUint32(variable_location.high_pc); } // Write the expression. debug_loc.PushUint16(expr.size()); @@ -278,11 +288,11 @@ static void WriteDebugLocEntry(const MethodDebugInfo* method_info, high_pc = variable_locations[++i].high_pc; } if (is64bit) { - debug_ranges.PushUint64(low_pc - compilation_unit_low_pc); - debug_ranges.PushUint64(high_pc - compilation_unit_low_pc); + debug_ranges.PushUint64(low_pc); + debug_ranges.PushUint64(high_pc); } else { - debug_ranges.PushUint32(low_pc - compilation_unit_low_pc); - debug_ranges.PushUint32(high_pc - compilation_unit_low_pc); + debug_ranges.PushUint32(low_pc); + debug_ranges.PushUint32(high_pc); } } // Write end-of-list entry. diff --git a/compiler/debug/elf_debug_writer.cc b/compiler/debug/elf_debug_writer.cc index 01bd6797c9..4dd802495c 100644 --- a/compiler/debug/elf_debug_writer.cc +++ b/compiler/debug/elf_debug_writer.cc @@ -39,32 +39,31 @@ void WriteDebugInfo(ElfBuilder<ElfTypes>* builder, const ArrayRef<const MethodDebugInfo>& method_infos, dwarf::CFIFormat cfi_format, bool write_oat_patches) { - // Add methods to .symtab. + // Write .strtab and .symtab. WriteDebugSymbols(builder, method_infos, true /* with_signature */); - // Generate CFI (stack unwinding information). + + // Write .debug_frame. WriteCFISection(builder, method_infos, cfi_format, write_oat_patches); - // Write DWARF .debug_* sections. - WriteDebugSections(builder, method_infos, write_oat_patches); -} -template<typename ElfTypes> -static void WriteDebugSections(ElfBuilder<ElfTypes>* builder, - const ArrayRef<const MethodDebugInfo>& method_infos, - bool write_oat_patches) { // Group the methods into compilation units based on source file. std::vector<ElfCompilationUnit> compilation_units; const char* last_source_file = nullptr; for (const MethodDebugInfo& mi : method_infos) { - auto& dex_class_def = mi.dex_file->GetClassDef(mi.class_def_index); - const char* source_file = mi.dex_file->GetSourceFile(dex_class_def); - if (compilation_units.empty() || source_file != last_source_file) { - compilation_units.push_back(ElfCompilationUnit()); + if (mi.dex_file != nullptr) { + auto& dex_class_def = mi.dex_file->GetClassDef(mi.class_def_index); + const char* source_file = mi.dex_file->GetSourceFile(dex_class_def); + if (compilation_units.empty() || source_file != last_source_file) { + compilation_units.push_back(ElfCompilationUnit()); + } + ElfCompilationUnit& cu = compilation_units.back(); + cu.methods.push_back(&mi); + // All methods must have the same addressing mode otherwise the min/max below does not work. + DCHECK_EQ(cu.methods.front()->is_code_address_text_relative, mi.is_code_address_text_relative); + cu.is_code_address_text_relative = mi.is_code_address_text_relative; + cu.code_address = std::min(cu.code_address, mi.code_address); + cu.code_end = std::max(cu.code_end, mi.code_address + mi.code_size); + last_source_file = source_file; } - ElfCompilationUnit& cu = compilation_units.back(); - cu.methods.push_back(&mi); - cu.low_pc = std::min(cu.low_pc, mi.low_pc); - cu.high_pc = std::max(cu.high_pc, mi.high_pc); - last_source_file = source_file; } // Write .debug_line section. @@ -91,28 +90,38 @@ static void WriteDebugSections(ElfBuilder<ElfTypes>* builder, std::vector<uint8_t> MakeMiniDebugInfo( InstructionSet isa, + const InstructionSetFeatures* features, size_t rodata_size, size_t text_size, const ArrayRef<const MethodDebugInfo>& method_infos) { if (Is64BitInstructionSet(isa)) { - return MakeMiniDebugInfoInternal<ElfTypes64>(isa, rodata_size, text_size, method_infos); + return MakeMiniDebugInfoInternal<ElfTypes64>(isa, + features, + rodata_size, + text_size, + method_infos); } else { - return MakeMiniDebugInfoInternal<ElfTypes32>(isa, rodata_size, text_size, method_infos); + return MakeMiniDebugInfoInternal<ElfTypes32>(isa, + features, + rodata_size, + text_size, + method_infos); } } template <typename ElfTypes> -static ArrayRef<const uint8_t> WriteDebugElfFileForMethodInternal( - const MethodDebugInfo& method_info) { - const InstructionSet isa = method_info.compiled_method->GetInstructionSet(); +static ArrayRef<const uint8_t> WriteDebugElfFileForMethodsInternal( + InstructionSet isa, + const InstructionSetFeatures* features, + const ArrayRef<const MethodDebugInfo>& method_infos) { std::vector<uint8_t> buffer; buffer.reserve(KB); VectorOutputStream out("Debug ELF file", &buffer); - std::unique_ptr<ElfBuilder<ElfTypes>> builder(new ElfBuilder<ElfTypes>(isa, &out)); + std::unique_ptr<ElfBuilder<ElfTypes>> builder(new ElfBuilder<ElfTypes>(isa, features, &out)); // No program headers since the ELF file is not linked and has no allocated sections. builder->Start(false /* write_program_headers */); WriteDebugInfo(builder.get(), - ArrayRef<const MethodDebugInfo>(&method_info, 1), + method_infos, dwarf::DW_DEBUG_FRAME_FORMAT, false /* write_oat_patches */); builder->End(); @@ -124,23 +133,27 @@ static ArrayRef<const uint8_t> WriteDebugElfFileForMethodInternal( return ArrayRef<const uint8_t>(result, buffer.size()); } -ArrayRef<const uint8_t> WriteDebugElfFileForMethod(const MethodDebugInfo& method_info) { - const InstructionSet isa = method_info.compiled_method->GetInstructionSet(); +ArrayRef<const uint8_t> WriteDebugElfFileForMethods( + InstructionSet isa, + const InstructionSetFeatures* features, + const ArrayRef<const MethodDebugInfo>& method_infos) { if (Is64BitInstructionSet(isa)) { - return WriteDebugElfFileForMethodInternal<ElfTypes64>(method_info); + return WriteDebugElfFileForMethodsInternal<ElfTypes64>(isa, features, method_infos); } else { - return WriteDebugElfFileForMethodInternal<ElfTypes32>(method_info); + return WriteDebugElfFileForMethodsInternal<ElfTypes32>(isa, features, method_infos); } } template <typename ElfTypes> static ArrayRef<const uint8_t> WriteDebugElfFileForClassesInternal( - const InstructionSet isa, const ArrayRef<mirror::Class*>& types) + InstructionSet isa, + const InstructionSetFeatures* features, + const ArrayRef<mirror::Class*>& types) SHARED_REQUIRES(Locks::mutator_lock_) { std::vector<uint8_t> buffer; buffer.reserve(KB); VectorOutputStream out("Debug ELF file", &buffer); - std::unique_ptr<ElfBuilder<ElfTypes>> builder(new ElfBuilder<ElfTypes>(isa, &out)); + std::unique_ptr<ElfBuilder<ElfTypes>> builder(new ElfBuilder<ElfTypes>(isa, features, &out)); // No program headers since the ELF file is not linked and has no allocated sections. builder->Start(false /* write_program_headers */); ElfDebugInfoWriter<ElfTypes> info_writer(builder.get()); @@ -158,13 +171,39 @@ static ArrayRef<const uint8_t> WriteDebugElfFileForClassesInternal( return ArrayRef<const uint8_t>(result, buffer.size()); } -ArrayRef<const uint8_t> WriteDebugElfFileForClasses(const InstructionSet isa, +ArrayRef<const uint8_t> WriteDebugElfFileForClasses(InstructionSet isa, + const InstructionSetFeatures* features, const ArrayRef<mirror::Class*>& types) { if (Is64BitInstructionSet(isa)) { - return WriteDebugElfFileForClassesInternal<ElfTypes64>(isa, types); + return WriteDebugElfFileForClassesInternal<ElfTypes64>(isa, features, types); } else { - return WriteDebugElfFileForClassesInternal<ElfTypes32>(isa, types); + return WriteDebugElfFileForClassesInternal<ElfTypes32>(isa, features, types); + } +} + +std::vector<MethodDebugInfo> MakeTrampolineInfos(const OatHeader& header) { + std::map<const char*, uint32_t> trampolines = { + { "interpreterToInterpreterBridge", header.GetInterpreterToInterpreterBridgeOffset() }, + { "interpreterToCompiledCodeBridge", header.GetInterpreterToCompiledCodeBridgeOffset() }, + { "jniDlsymLookup", header.GetJniDlsymLookupOffset() }, + { "quickGenericJniTrampoline", header.GetQuickGenericJniTrampolineOffset() }, + { "quickImtConflictTrampoline", header.GetQuickImtConflictTrampolineOffset() }, + { "quickResolutionTrampoline", header.GetQuickResolutionTrampolineOffset() }, + { "quickToInterpreterBridge", header.GetQuickToInterpreterBridgeOffset() }, + }; + std::vector<MethodDebugInfo> result; + for (const auto& it : trampolines) { + if (it.second != 0) { + MethodDebugInfo info = MethodDebugInfo(); + info.trampoline_name = it.first; + info.isa = header.GetInstructionSet(); + info.is_code_address_text_relative = true; + info.code_address = it.second - header.GetExecutableOffset(); + info.code_size = 0; // The symbol lasts until the next symbol. + result.push_back(std::move(info)); + } } + return result; } // Explicit instantiations diff --git a/compiler/debug/elf_debug_writer.h b/compiler/debug/elf_debug_writer.h index 103b501489..736370e2d3 100644 --- a/compiler/debug/elf_debug_writer.h +++ b/compiler/debug/elf_debug_writer.h @@ -17,6 +17,8 @@ #ifndef ART_COMPILER_DEBUG_ELF_DEBUG_WRITER_H_ #define ART_COMPILER_DEBUG_ELF_DEBUG_WRITER_H_ +#include <vector> + #include "base/macros.h" #include "base/mutex.h" #include "debug/dwarf/dwarf_constants.h" @@ -24,6 +26,7 @@ #include "utils/array_ref.h" namespace art { +class OatHeader; namespace mirror { class Class; } @@ -31,22 +34,32 @@ namespace debug { struct MethodDebugInfo; template <typename ElfTypes> -void WriteDebugInfo(ElfBuilder<ElfTypes>* builder, - const ArrayRef<const MethodDebugInfo>& method_infos, - dwarf::CFIFormat cfi_format, - bool write_oat_patches); +void WriteDebugInfo( + ElfBuilder<ElfTypes>* builder, + const ArrayRef<const MethodDebugInfo>& method_infos, + dwarf::CFIFormat cfi_format, + bool write_oat_patches); -std::vector<uint8_t> MakeMiniDebugInfo(InstructionSet isa, - size_t rodata_section_size, - size_t text_section_size, - const ArrayRef<const MethodDebugInfo>& method_infos); +std::vector<uint8_t> MakeMiniDebugInfo( + InstructionSet isa, + const InstructionSetFeatures* features, + size_t rodata_section_size, + size_t text_section_size, + const ArrayRef<const MethodDebugInfo>& method_infos); -ArrayRef<const uint8_t> WriteDebugElfFileForMethod(const MethodDebugInfo& method_info); +ArrayRef<const uint8_t> WriteDebugElfFileForMethods( + InstructionSet isa, + const InstructionSetFeatures* features, + const ArrayRef<const MethodDebugInfo>& method_infos); -ArrayRef<const uint8_t> WriteDebugElfFileForClasses(const InstructionSet isa, - const ArrayRef<mirror::Class*>& types) +ArrayRef<const uint8_t> WriteDebugElfFileForClasses( + InstructionSet isa, + const InstructionSetFeatures* features, + const ArrayRef<mirror::Class*>& types) SHARED_REQUIRES(Locks::mutator_lock_); +std::vector<MethodDebugInfo> MakeTrampolineInfos(const OatHeader& oat_header); + } // namespace debug } // namespace art diff --git a/compiler/debug/elf_gnu_debugdata_writer.h b/compiler/debug/elf_gnu_debugdata_writer.h index 5c7d1c72a4..fb63d62572 100644 --- a/compiler/debug/elf_gnu_debugdata_writer.h +++ b/compiler/debug/elf_gnu_debugdata_writer.h @@ -79,13 +79,14 @@ static void XzCompress(const std::vector<uint8_t>* src, std::vector<uint8_t>* ds template <typename ElfTypes> static std::vector<uint8_t> MakeMiniDebugInfoInternal( InstructionSet isa, + const InstructionSetFeatures* features, size_t rodata_section_size, size_t text_section_size, const ArrayRef<const MethodDebugInfo>& method_infos) { std::vector<uint8_t> buffer; buffer.reserve(KB); VectorOutputStream out("Mini-debug-info ELF file", &buffer); - std::unique_ptr<ElfBuilder<ElfTypes>> builder(new ElfBuilder<ElfTypes>(isa, &out)); + std::unique_ptr<ElfBuilder<ElfTypes>> builder(new ElfBuilder<ElfTypes>(isa, features, &out)); builder->Start(); // Mirror .rodata and .text as NOBITS sections. // It is needed to detected relocations after compression. diff --git a/compiler/debug/elf_symtab_writer.h b/compiler/debug/elf_symtab_writer.h index 41508f44b4..045edddd77 100644 --- a/compiler/debug/elf_symtab_writer.h +++ b/compiler/debug/elf_symtab_writer.h @@ -39,7 +39,7 @@ template <typename ElfTypes> static void WriteDebugSymbols(ElfBuilder<ElfTypes>* builder, const ArrayRef<const MethodDebugInfo>& method_infos, bool with_signature) { - bool generated_mapping_symbol = false; + uint64_t mapping_symbol_address = std::numeric_limits<uint64_t>::max(); auto* strtab = builder->GetStrTab(); auto* symtab = builder->GetSymTab(); @@ -47,12 +47,12 @@ static void WriteDebugSymbols(ElfBuilder<ElfTypes>* builder, return; } - // Find all addresses (low_pc) which contain deduped methods. + // Find all addresses which contain deduped methods. // The first instance of method is not marked deduped_, but the rest is. - std::unordered_set<uint32_t> deduped_addresses; + std::unordered_set<uint64_t> deduped_addresses; for (const MethodDebugInfo& info : method_infos) { if (info.deduped) { - deduped_addresses.insert(info.low_pc); + deduped_addresses.insert(info.code_address); } } @@ -64,40 +64,37 @@ static void WriteDebugSymbols(ElfBuilder<ElfTypes>* builder, if (info.deduped) { continue; // Add symbol only for the first instance. } - std::string name = PrettyMethod(info.dex_method_index, *info.dex_file, with_signature); - if (deduped_addresses.find(info.low_pc) != deduped_addresses.end()) { - name += " [DEDUPED]"; + size_t name_offset; + if (info.trampoline_name != nullptr) { + name_offset = strtab->Write(info.trampoline_name); + } else { + DCHECK(info.dex_file != nullptr); + std::string name = PrettyMethod(info.dex_method_index, *info.dex_file, with_signature); + if (deduped_addresses.find(info.code_address) != deduped_addresses.end()) { + name += " [DEDUPED]"; + } + // If we write method names without signature, we might see the same name multiple times. + name_offset = (name == last_name ? last_name_offset : strtab->Write(name)); + last_name = std::move(name); + last_name_offset = name_offset; } - // If we write method names without signature, we might see the same name multiple times. - size_t name_offset = (name == last_name ? last_name_offset : strtab->Write(name)); - const auto* text = builder->GetText()->Exists() ? builder->GetText() : nullptr; - const bool is_relative = (text != nullptr); - uint32_t low_pc = info.low_pc; + const auto* text = info.is_code_address_text_relative ? builder->GetText() : nullptr; + uint64_t address = info.code_address + (text != nullptr ? text->GetAddress() : 0); // Add in code delta, e.g., thumb bit 0 for Thumb2 code. - low_pc += info.compiled_method->CodeDelta(); - symtab->Add(name_offset, - text, - low_pc, - is_relative, - info.high_pc - info.low_pc, - STB_GLOBAL, - STT_FUNC); + address += CompiledMethod::CodeDelta(info.isa); + symtab->Add(name_offset, text, address, info.code_size, STB_GLOBAL, STT_FUNC); // Conforming to aaelf, add $t mapping symbol to indicate start of a sequence of thumb2 // instructions, so that disassembler tools can correctly disassemble. // Note that even if we generate just a single mapping symbol, ARM's Streamline // requires it to match function symbol. Just address 0 does not work. - if (info.compiled_method->GetInstructionSet() == kThumb2) { - if (!generated_mapping_symbol || !kGenerateSingleArmMappingSymbol) { - symtab->Add(strtab->Write("$t"), text, info.low_pc & ~1, - is_relative, 0, STB_LOCAL, STT_NOTYPE); - generated_mapping_symbol = true; + if (info.isa == kThumb2) { + if (address < mapping_symbol_address || !kGenerateSingleArmMappingSymbol) { + symtab->Add(strtab->Write("$t"), text, address & ~1, 0, STB_LOCAL, STT_NOTYPE); + mapping_symbol_address = address; } } - - last_name = std::move(name); - last_name_offset = name_offset; } strtab->End(); diff --git a/compiler/debug/method_debug_info.h b/compiler/debug/method_debug_info.h index 6b3dd8c528..ed1da2c26e 100644 --- a/compiler/debug/method_debug_info.h +++ b/compiler/debug/method_debug_info.h @@ -24,22 +24,22 @@ namespace art { namespace debug { struct MethodDebugInfo { - const DexFile* dex_file; + const char* trampoline_name; + const DexFile* dex_file; // Native methods (trampolines) do not reference dex file. size_t class_def_index; uint32_t dex_method_index; uint32_t access_flags; const DexFile::CodeItem* code_item; + InstructionSet isa; bool deduped; - uintptr_t low_pc; - uintptr_t high_pc; - CompiledMethod* compiled_method; - - bool IsFromOptimizingCompiler() const { - return compiled_method->GetQuickCode().size() > 0 && - compiled_method->GetVmapTable().size() > 0 && - compiled_method->GetGcMap().size() == 0 && - code_item != nullptr; - } + bool is_native_debuggable; + bool is_optimized; + bool is_code_address_text_relative; // Is the address offset from start of .text section? + uint64_t code_address; + uint32_t code_size; + uint32_t frame_size_in_bytes; + const void* code_info; + ArrayRef<const uint8_t> cfi; }; } // namespace debug diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc index ad4ddadd2f..48c4356cfd 100644 --- a/compiler/dex/quick/dex_file_method_inliner.cc +++ b/compiler/dex/quick/dex_file_method_inliner.cc @@ -101,6 +101,14 @@ static constexpr bool kIntrinsicIsStatic[] = { false, // kIntrinsicCas false, // kIntrinsicUnsafeGet false, // kIntrinsicUnsafePut + false, // kIntrinsicUnsafeGetAndAddInt, + false, // kIntrinsicUnsafeGetAndAddLong, + false, // kIntrinsicUnsafeGetAndSetInt, + false, // kIntrinsicUnsafeGetAndSetLong, + false, // kIntrinsicUnsafeGetAndSetObject, + false, // kIntrinsicUnsafeLoadFence, + false, // kIntrinsicUnsafeStoreFence, + false, // kIntrinsicUnsafeFullFence, true, // kIntrinsicSystemArrayCopyCharArray true, // kIntrinsicSystemArrayCopy }; @@ -177,6 +185,14 @@ static_assert(kIntrinsicIsStatic[kIntrinsicPoke], "Poke must be static"); static_assert(!kIntrinsicIsStatic[kIntrinsicCas], "Cas must not be static"); static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafeGet], "UnsafeGet must not be static"); static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafePut], "UnsafePut must not be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafeGetAndAddInt], "UnsafeGetAndAddInt must not be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafeGetAndAddLong], "UnsafeGetAndAddLong must not be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafeGetAndSetInt], "UnsafeGetAndSetInt must not be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafeGetAndSetLong], "UnsafeGetAndSetLong must not be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafeGetAndSetObject], "UnsafeGetAndSetObject must not be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafeLoadFence], "UnsafeLoadFence must not be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafeStoreFence], "UnsafeStoreFence must not be static"); +static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafeFullFence], "UnsafeFullFence must not be static"); static_assert(kIntrinsicIsStatic[kIntrinsicSystemArrayCopyCharArray], "SystemArrayCopyCharArray must be static"); static_assert(kIntrinsicIsStatic[kIntrinsicSystemArrayCopy], @@ -318,6 +334,14 @@ const char* const DexFileMethodInliner::kNameCacheNames[] = { "putObject", // kNameCachePutObject "putObjectVolatile", // kNameCachePutObjectVolatile "putOrderedObject", // kNameCachePutOrderedObject + "getAndAddInt", // kNameCacheGetAndAddInt, + "getAndAddLong", // kNameCacheGetAndAddLong, + "getAndSetInt", // kNameCacheGetAndSetInt, + "getAndSetLong", // kNameCacheGetAndSetLong, + "getAndSetObject", // kNameCacheGetAndSetObject, + "loadFence", // kNameCacheLoadFence, + "storeFence", // kNameCacheStoreFence, + "fullFence", // kNameCacheFullFence, "arraycopy", // kNameCacheArrayCopy "bitCount", // kNameCacheBitCount "compare", // kNameCacheCompare @@ -404,10 +428,14 @@ const DexFileMethodInliner::ProtoDef DexFileMethodInliner::kProtoCacheDefs[] = { kClassCacheJavaLangObject, kClassCacheJavaLangObject } }, // kProtoCacheObjectJ_I { kClassCacheInt, 2, { kClassCacheJavaLangObject, kClassCacheLong } }, + // kProtoCacheObjectJI_I + { kClassCacheInt, 3, { kClassCacheJavaLangObject, kClassCacheLong, kClassCacheInt } }, // kProtoCacheObjectJI_V { kClassCacheVoid, 3, { kClassCacheJavaLangObject, kClassCacheLong, kClassCacheInt } }, // kProtoCacheObjectJ_J { kClassCacheLong, 2, { kClassCacheJavaLangObject, kClassCacheLong } }, + // kProtoCacheObjectJJ_J + { kClassCacheLong, 3, { kClassCacheJavaLangObject, kClassCacheLong, kClassCacheLong } }, // kProtoCacheObjectJJ_V { kClassCacheVoid, 3, { kClassCacheJavaLangObject, kClassCacheLong, kClassCacheLong } }, // kProtoCacheObjectJ_Object @@ -415,6 +443,9 @@ const DexFileMethodInliner::ProtoDef DexFileMethodInliner::kProtoCacheDefs[] = { // kProtoCacheObjectJObject_V { kClassCacheVoid, 3, { kClassCacheJavaLangObject, kClassCacheLong, kClassCacheJavaLangObject } }, + // kProtoCacheObjectJObject_Object + { kClassCacheJavaLangObject, 3, { kClassCacheJavaLangObject, kClassCacheLong, + kClassCacheJavaLangObject } }, // kProtoCacheCharArrayICharArrayII_V { kClassCacheVoid, 5, {kClassCacheJavaLangCharArray, kClassCacheInt, kClassCacheJavaLangCharArray, kClassCacheInt, kClassCacheInt} }, @@ -574,6 +605,13 @@ const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods INTRINSIC(JavaLangString, IndexOf, I_I, kIntrinsicIndexOf, kIntrinsicFlagBase0), INTRINSIC(JavaLangString, Length, _I, kIntrinsicIsEmptyOrLength, kIntrinsicFlagLength), + INTRINSIC(JavaLangStringFactory, NewStringFromBytes, ByteArrayIII_String, + kIntrinsicNewStringFromBytes, kIntrinsicFlagNone), + INTRINSIC(JavaLangStringFactory, NewStringFromChars, IICharArray_String, + kIntrinsicNewStringFromChars, kIntrinsicFlagNone), + INTRINSIC(JavaLangStringFactory, NewStringFromString, String_String, + kIntrinsicNewStringFromString, kIntrinsicFlagNone), + INTRINSIC(JavaLangThread, CurrentThread, _Thread, kIntrinsicCurrentThread, 0), INTRINSIC(LibcoreIoMemory, PeekByte, J_B, kIntrinsicPeek, kSignedByte), @@ -609,6 +647,16 @@ const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods UNSAFE_GET_PUT(Object, Object, kIntrinsicFlagIsObject), #undef UNSAFE_GET_PUT + // 1.8 + INTRINSIC(SunMiscUnsafe, GetAndAddInt, ObjectJI_I, kIntrinsicUnsafeGetAndAddInt, 0), + INTRINSIC(SunMiscUnsafe, GetAndAddLong, ObjectJJ_J, kIntrinsicUnsafeGetAndAddLong, 0), + INTRINSIC(SunMiscUnsafe, GetAndSetInt, ObjectJI_I, kIntrinsicUnsafeGetAndSetInt, 0), + INTRINSIC(SunMiscUnsafe, GetAndSetLong, ObjectJJ_J, kIntrinsicUnsafeGetAndSetLong, 0), + INTRINSIC(SunMiscUnsafe, GetAndSetObject, ObjectJObject_Object, kIntrinsicUnsafeGetAndSetObject, 0), + INTRINSIC(SunMiscUnsafe, LoadFence, _V, kIntrinsicUnsafeLoadFence, 0), + INTRINSIC(SunMiscUnsafe, StoreFence, _V, kIntrinsicUnsafeStoreFence, 0), + INTRINSIC(SunMiscUnsafe, FullFence, _V, kIntrinsicUnsafeFullFence, 0), + INTRINSIC(JavaLangSystem, ArrayCopy, CharArrayICharArrayII_V , kIntrinsicSystemArrayCopyCharArray, 0), INTRINSIC(JavaLangSystem, ArrayCopy, ObjectIObjectII_V , kIntrinsicSystemArrayCopy, @@ -815,6 +863,14 @@ bool DexFileMethodInliner::GenIntrinsic(Mir2Lir* backend, CallInfo* info) { case kIntrinsicRotateRight: case kIntrinsicRotateLeft: case kIntrinsicSignum: + case kIntrinsicUnsafeGetAndAddInt: + case kIntrinsicUnsafeGetAndAddLong: + case kIntrinsicUnsafeGetAndSetInt: + case kIntrinsicUnsafeGetAndSetLong: + case kIntrinsicUnsafeGetAndSetObject: + case kIntrinsicUnsafeLoadFence: + case kIntrinsicUnsafeStoreFence: + case kIntrinsicUnsafeFullFence: case kIntrinsicSystemArrayCopy: return false; // not implemented in quick. default: diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h index b465db2c54..34b56cd494 100644 --- a/compiler/dex/quick/dex_file_method_inliner.h +++ b/compiler/dex/quick/dex_file_method_inliner.h @@ -227,6 +227,14 @@ class DexFileMethodInliner { kNameCachePutObject, kNameCachePutObjectVolatile, kNameCachePutOrderedObject, + kNameCacheGetAndAddInt, + kNameCacheGetAndAddLong, + kNameCacheGetAndSetInt, + kNameCacheGetAndSetLong, + kNameCacheGetAndSetObject, + kNameCacheLoadFence, + kNameCacheStoreFence, + kNameCacheFullFence, kNameCacheArrayCopy, kNameCacheBitCount, kNameCacheCompare, @@ -282,11 +290,14 @@ class DexFileMethodInliner { kProtoCacheObjectJJJ_Z, kProtoCacheObjectJObjectObject_Z, kProtoCacheObjectJ_I, + kProtoCacheObjectJI_I, kProtoCacheObjectJI_V, kProtoCacheObjectJ_J, + kProtoCacheObjectJJ_J, kProtoCacheObjectJJ_V, kProtoCacheObjectJ_Object, kProtoCacheObjectJObject_V, + kProtoCacheObjectJObject_Object, kProtoCacheCharArrayICharArrayII_V, kProtoCacheObjectIObjectII_V, kProtoCacheIICharArrayI_V, diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index 3100b6da0f..5d8e3baacb 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -16,9 +16,6 @@ #include "compiler_driver.h" -#define ATRACE_TAG ATRACE_TAG_DALVIK -#include <utils/Trace.h> - #include <unordered_set> #include <vector> #include <unistd.h> @@ -30,6 +27,7 @@ #include "art_field-inl.h" #include "art_method-inl.h" #include "base/stl_util.h" +#include "base/systrace.h" #include "base/time_utils.h" #include "base/timing_logger.h" #include "class_linker-inl.h" diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h index a220959288..4db82a638d 100644 --- a/compiler/driver/compiler_options.h +++ b/compiler/driver/compiler_options.h @@ -159,10 +159,16 @@ class CompilerOptions FINAL { size_t GetInlineDepthLimit() const { return inline_depth_limit_; } + void SetInlineDepthLimit(size_t limit) { + inline_depth_limit_ = limit; + } size_t GetInlineMaxCodeUnits() const { return inline_max_code_units_; } + void SetInlineMaxCodeUnits(size_t units) { + inline_max_code_units_ = units; + } double GetTopKProfileThreshold() const { return top_k_profile_threshold_; diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h index f7da609e5d..ef44a6fe1c 100644 --- a/compiler/elf_builder.h +++ b/compiler/elf_builder.h @@ -20,6 +20,7 @@ #include <vector> #include "arch/instruction_set.h" +#include "arch/mips/instruction_set_features_mips.h" #include "base/bit_utils.h" #include "base/casts.h" #include "base/unix_file/fd_file.h" @@ -38,6 +39,7 @@ namespace art { // .rodata - DEX files and oat metadata. // .text - Compiled code. // .bss - Zero-initialized writeable section. +// .MIPS.abiflags - MIPS specific section. // .dynstr - Names for .dynsym. // .dynsym - A few oat-specific dynamic symbols. // .hash - Hash-table for .dynsym. @@ -163,12 +165,6 @@ class ElfBuilder FINAL { } } - // Returns true if the section was written to disk. - // (Used to check whether we have .text when writing JIT debug info) - bool Exists() const { - return finished_; - } - // Get the location of this section in virtual memory. Elf_Addr GetAddress() const { CHECK(started_); @@ -362,16 +358,18 @@ class ElfBuilder FINAL { void Add(Elf_Word name, const Section* section, Elf_Addr addr, - bool is_relative, Elf_Word size, uint8_t binding, - uint8_t type, - uint8_t other = 0) { - DCHECK(section != nullptr || !is_relative); - Elf_Addr abs_addr = addr + (is_relative ? section->GetAddress() : 0); - Elf_Word section_index = - (section != nullptr) ? section->GetSectionIndex() : static_cast<Elf_Word>(SHN_ABS); - Add(name, section_index, abs_addr, size, binding, type, other); + uint8_t type) { + Elf_Word section_index; + if (section != nullptr) { + DCHECK_LE(section->GetAddress(), addr); + DCHECK_LE(addr, section->GetAddress() + section->GetSize()); + section_index = section->GetSectionIndex(); + } else { + section_index = static_cast<Elf_Word>(SHN_ABS); + } + Add(name, section_index, addr, size, binding, type); } void Add(Elf_Word name, @@ -379,21 +377,90 @@ class ElfBuilder FINAL { Elf_Addr addr, Elf_Word size, uint8_t binding, - uint8_t type, - uint8_t other = 0) { + uint8_t type) { Elf_Sym sym = Elf_Sym(); sym.st_name = name; sym.st_value = addr; sym.st_size = size; - sym.st_other = other; + sym.st_other = 0; sym.st_shndx = section_index; sym.st_info = (binding << 4) + (type & 0xf); CachedSection::Add(&sym, sizeof(sym)); } }; - ElfBuilder(InstructionSet isa, OutputStream* output) + class AbiflagsSection FINAL : public Section { + public: + // Section with Mips abiflag info. + static constexpr uint8_t MIPS_AFL_REG_NONE = 0; // no registers + static constexpr uint8_t MIPS_AFL_REG_32 = 1; // 32-bit registers + static constexpr uint8_t MIPS_AFL_REG_64 = 2; // 64-bit registers + static constexpr uint32_t MIPS_AFL_FLAGS1_ODDSPREG = 1; // Uses odd single-prec fp regs + static constexpr uint8_t MIPS_ABI_FP_DOUBLE = 1; // -mdouble-float + static constexpr uint8_t MIPS_ABI_FP_XX = 5; // -mfpxx + static constexpr uint8_t MIPS_ABI_FP_64A = 7; // -mips32r* -mfp64 -mno-odd-spreg + + AbiflagsSection(ElfBuilder<ElfTypes>* owner, + const std::string& name, + Elf_Word type, + Elf_Word flags, + const Section* link, + Elf_Word info, + Elf_Word align, + Elf_Word entsize, + InstructionSet isa, + const InstructionSetFeatures* features) + : Section(owner, name, type, flags, link, info, align, entsize) { + if (isa == kMips || isa == kMips64) { + bool fpu32 = false; // assume mips64 values + uint8_t isa_rev = 6; // assume mips64 values + if (isa == kMips) { + // adjust for mips32 values + fpu32 = features->AsMipsInstructionSetFeatures()->Is32BitFloatingPoint(); + isa_rev = features->AsMipsInstructionSetFeatures()->IsR6() + ? 6 + : features->AsMipsInstructionSetFeatures()->IsMipsIsaRevGreaterThanEqual2() + ? (fpu32 ? 2 : 5) + : 1; + } + abiflags_.version = 0; // version of flags structure + abiflags_.isa_level = (isa == kMips) ? 32 : 64; + abiflags_.isa_rev = isa_rev; + abiflags_.gpr_size = (isa == kMips) ? MIPS_AFL_REG_32 : MIPS_AFL_REG_64; + abiflags_.cpr1_size = fpu32 ? MIPS_AFL_REG_32 : MIPS_AFL_REG_64; + abiflags_.cpr2_size = MIPS_AFL_REG_NONE; + // Set the fp_abi to MIPS_ABI_FP_64A for mips32 with 64-bit FPUs (ie: mips32 R5 and R6). + // Otherwise set to MIPS_ABI_FP_DOUBLE. + abiflags_.fp_abi = (isa == kMips && !fpu32) ? MIPS_ABI_FP_64A : MIPS_ABI_FP_DOUBLE; + abiflags_.isa_ext = 0; + abiflags_.ases = 0; + // To keep the code simple, we are not using odd FP reg for single floats for both + // mips32 and mips64 ART. Therefore we are not setting the MIPS_AFL_FLAGS1_ODDSPREG bit. + abiflags_.flags1 = 0; + abiflags_.flags2 = 0; + } + } + + Elf_Word GetSize() const { + return sizeof(abiflags_); + } + + void Write() { + this->WriteFully(&abiflags_, sizeof(abiflags_)); + } + + private: + struct { + uint16_t version; // version of this structure + uint8_t isa_level, isa_rev, gpr_size, cpr1_size, cpr2_size; + uint8_t fp_abi; + uint32_t isa_ext, ases, flags1, flags2; + } abiflags_; + }; + + ElfBuilder(InstructionSet isa, const InstructionSetFeatures* features, OutputStream* output) : isa_(isa), + features_(features), stream_(output), rodata_(this, ".rodata", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0), text_(this, ".text", SHT_PROGBITS, SHF_ALLOC | SHF_EXECINSTR, nullptr, 0, kPageSize, 0), @@ -410,6 +477,8 @@ class ElfBuilder FINAL { debug_info_(this, ".debug_info", SHT_PROGBITS, 0, nullptr, 0, 1, 0), debug_line_(this, ".debug_line", SHT_PROGBITS, 0, nullptr, 0, 1, 0), shstrtab_(this, ".shstrtab", 0, 1), + abiflags_(this, ".MIPS.abiflags", SHT_MIPS_ABIFLAGS, SHF_ALLOC, nullptr, 0, kPageSize, 0, + isa, features), started_(false), write_program_headers_(false), loaded_size_(0u), @@ -419,6 +488,7 @@ class ElfBuilder FINAL { dynamic_.phdr_flags_ = PF_R | PF_W; dynamic_.phdr_type_ = PT_DYNAMIC; eh_frame_hdr_.phdr_type_ = PT_GNU_EH_FRAME; + abiflags_.phdr_type_ = PT_MIPS_ABIFLAGS; } ~ElfBuilder() {} @@ -520,7 +590,7 @@ class ElfBuilder FINAL { stream_.Flush(); // The main ELF header. - Elf_Ehdr elf_header = MakeElfHeader(isa_); + Elf_Ehdr elf_header = MakeElfHeader(isa_, features_); elf_header.e_shoff = section_headers_offset; elf_header.e_shnum = shdrs.size(); elf_header.e_shstrndx = shstrtab_.GetSectionIndex(); @@ -564,7 +634,12 @@ class ElfBuilder FINAL { Elf_Word rodata_address = rodata_.GetAddress(); Elf_Word text_address = RoundUp(rodata_address + rodata_size, kPageSize); Elf_Word bss_address = RoundUp(text_address + text_size, kPageSize); - Elf_Word dynstr_address = RoundUp(bss_address + bss_size, kPageSize); + Elf_Word abiflags_address = RoundUp(bss_address + bss_size, kPageSize); + Elf_Word abiflags_size = 0; + if (isa_ == kMips || isa_ == kMips64) { + abiflags_size = abiflags_.GetSize(); + } + Elf_Word dynstr_address = RoundUp(abiflags_address + abiflags_size, kPageSize); // Cache .dynstr, .dynsym and .hash data. dynstr_.Add(""); // dynstr should start with empty string. @@ -649,6 +724,12 @@ class ElfBuilder FINAL { return loaded_size_; } + void WriteMIPSabiflagsSection() { + abiflags_.Start(); + abiflags_.Write(); + abiflags_.End(); + } + // Returns true if all writes and seeks on the output stream succeeded. bool Good() { return stream_.Good(); @@ -668,7 +749,7 @@ class ElfBuilder FINAL { } private: - static Elf_Ehdr MakeElfHeader(InstructionSet isa) { + static Elf_Ehdr MakeElfHeader(InstructionSet isa, const InstructionSetFeatures* features) { Elf_Ehdr elf_header = Elf_Ehdr(); switch (isa) { case kArm: @@ -696,18 +777,20 @@ class ElfBuilder FINAL { case kMips: { elf_header.e_machine = EM_MIPS; elf_header.e_flags = (EF_MIPS_NOREORDER | - EF_MIPS_PIC | - EF_MIPS_CPIC | - EF_MIPS_ABI_O32 | - EF_MIPS_ARCH_32R2); + EF_MIPS_PIC | + EF_MIPS_CPIC | + EF_MIPS_ABI_O32 | + features->AsMipsInstructionSetFeatures()->IsR6() + ? EF_MIPS_ARCH_32R6 + : EF_MIPS_ARCH_32R2); break; } case kMips64: { elf_header.e_machine = EM_MIPS; elf_header.e_flags = (EF_MIPS_NOREORDER | - EF_MIPS_PIC | - EF_MIPS_CPIC | - EF_MIPS_ARCH_64R6); + EF_MIPS_PIC | + EF_MIPS_CPIC | + EF_MIPS_ARCH_64R6); break; } case kNone: { @@ -818,6 +901,7 @@ class ElfBuilder FINAL { } InstructionSet isa_; + const InstructionSetFeatures* features_; ErrorDelayingOutputStream stream_; @@ -836,6 +920,7 @@ class ElfBuilder FINAL { Section debug_info_; Section debug_line_; StringSection shstrtab_; + AbiflagsSection abiflags_; std::vector<std::unique_ptr<Section>> other_sections_; // List of used section in the order in which they were written. diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc index 19346ecc2b..bed864b534 100644 --- a/compiler/elf_writer_quick.cc +++ b/compiler/elf_writer_quick.cc @@ -51,10 +51,12 @@ constexpr dwarf::CFIFormat kCFIFormat = dwarf::DW_DEBUG_FRAME_FORMAT; class DebugInfoTask : public Task { public: DebugInfoTask(InstructionSet isa, + const InstructionSetFeatures* features, size_t rodata_section_size, size_t text_section_size, const ArrayRef<const debug::MethodDebugInfo>& method_infos) : isa_(isa), + instruction_set_features_(features), rodata_section_size_(rodata_section_size), text_section_size_(text_section_size), method_infos_(method_infos) { @@ -62,6 +64,7 @@ class DebugInfoTask : public Task { void Run(Thread*) { result_ = debug::MakeMiniDebugInfo(isa_, + instruction_set_features_, rodata_section_size_, text_section_size_, method_infos_); @@ -73,6 +76,7 @@ class DebugInfoTask : public Task { private: InstructionSet isa_; + const InstructionSetFeatures* instruction_set_features_; size_t rodata_section_size_; size_t text_section_size_; const ArrayRef<const debug::MethodDebugInfo>& method_infos_; @@ -83,6 +87,7 @@ template <typename ElfTypes> class ElfWriterQuick FINAL : public ElfWriter { public: ElfWriterQuick(InstructionSet instruction_set, + const InstructionSetFeatures* features, const CompilerOptions* compiler_options, File* elf_file); ~ElfWriterQuick(); @@ -107,6 +112,7 @@ class ElfWriterQuick FINAL : public ElfWriter { std::vector<uint8_t>* buffer); private: + const InstructionSetFeatures* instruction_set_features_; const CompilerOptions* const compiler_options_; File* const elf_file_; size_t rodata_size_; @@ -121,27 +127,36 @@ class ElfWriterQuick FINAL : public ElfWriter { }; std::unique_ptr<ElfWriter> CreateElfWriterQuick(InstructionSet instruction_set, + const InstructionSetFeatures* features, const CompilerOptions* compiler_options, File* elf_file) { if (Is64BitInstructionSet(instruction_set)) { - return MakeUnique<ElfWriterQuick<ElfTypes64>>(instruction_set, compiler_options, elf_file); + return MakeUnique<ElfWriterQuick<ElfTypes64>>(instruction_set, + features, + compiler_options, + elf_file); } else { - return MakeUnique<ElfWriterQuick<ElfTypes32>>(instruction_set, compiler_options, elf_file); + return MakeUnique<ElfWriterQuick<ElfTypes32>>(instruction_set, + features, + compiler_options, + elf_file); } } template <typename ElfTypes> ElfWriterQuick<ElfTypes>::ElfWriterQuick(InstructionSet instruction_set, + const InstructionSetFeatures* features, const CompilerOptions* compiler_options, File* elf_file) : ElfWriter(), + instruction_set_features_(features), compiler_options_(compiler_options), elf_file_(elf_file), rodata_size_(0u), text_size_(0u), bss_size_(0u), output_stream_(MakeUnique<BufferedOutputStream>(MakeUnique<FileOutputStream>(elf_file))), - builder_(new ElfBuilder<ElfTypes>(instruction_set, output_stream_.get())) {} + builder_(new ElfBuilder<ElfTypes>(instruction_set, features, output_stream_.get())) {} template <typename ElfTypes> ElfWriterQuick<ElfTypes>::~ElfWriterQuick() {} @@ -195,6 +210,9 @@ void ElfWriterQuick<ElfTypes>::WriteDynamicSection() { if (bss_size_ != 0u) { builder_->GetBss()->WriteNoBitsSection(bss_size_); } + if (builder_->GetIsa() == kMips || builder_->GetIsa() == kMips64) { + builder_->WriteMIPSabiflagsSection(); + } builder_->WriteDynamicSection(); } @@ -205,7 +223,11 @@ void ElfWriterQuick<ElfTypes>::PrepareDebugInfo( // Prepare the mini-debug-info in background while we do other I/O. Thread* self = Thread::Current(); debug_info_task_ = std::unique_ptr<DebugInfoTask>( - new DebugInfoTask(builder_->GetIsa(), rodata_size_, text_size_, method_infos)); + new DebugInfoTask(builder_->GetIsa(), + instruction_set_features_, + rodata_size_, + text_size_, + method_infos)); debug_info_thread_pool_ = std::unique_ptr<ThreadPool>( new ThreadPool("Mini-debug-info writer", 1)); debug_info_thread_pool_->AddTask(self, debug_info_task_.get()); diff --git a/compiler/elf_writer_quick.h b/compiler/elf_writer_quick.h index 347d372fe2..3d5dd39a66 100644 --- a/compiler/elf_writer_quick.h +++ b/compiler/elf_writer_quick.h @@ -26,8 +26,10 @@ namespace art { class CompilerOptions; +class InstructionSetFeatures; std::unique_ptr<ElfWriter> CreateElfWriterQuick(InstructionSet instruction_set, + const InstructionSetFeatures* features, const CompilerOptions* compiler_options, File* elf_file); diff --git a/compiler/image_test.cc b/compiler/image_test.cc index 5763cec43f..7779e44519 100644 --- a/compiler/image_test.cc +++ b/compiler/image_test.cc @@ -24,6 +24,7 @@ #include "class_linker-inl.h" #include "common_compiler_test.h" #include "debug/method_debug_info.h" +#include "driver/compiler_options.h" #include "elf_writer.h" #include "elf_writer_quick.h" #include "gc/space/image_space.h" @@ -48,8 +49,12 @@ class ImageTest : public CommonCompilerTest { }; void ImageTest::TestWriteRead(ImageHeader::StorageMode storage_mode) { - // TODO: Test does not currently work with optimizing. - CreateCompilerDriver(Compiler::kQuick, kRuntimeISA); + CreateCompilerDriver(Compiler::kOptimizing, kRuntimeISA, kIsTargetBuild ? 2U : 16U); + + // Set inline filter values. + compiler_options_->SetInlineDepthLimit(CompilerOptions::kDefaultInlineDepthLimit); + compiler_options_->SetInlineMaxCodeUnits(CompilerOptions::kDefaultInlineMaxCodeUnits); + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); // Enable write for dex2dex. for (const DexFile* dex_file : class_linker->GetBootClassPath()) { @@ -99,6 +104,7 @@ void ImageTest::TestWriteRead(ImageHeader::StorageMode storage_mode) { const std::vector<const DexFile*>& dex_files = class_linker->GetBootClassPath(); std::unique_ptr<ElfWriter> elf_writer = CreateElfWriterQuick( compiler_driver_->GetInstructionSet(), + compiler_driver_->GetInstructionSetFeatures(), &compiler_driver_->GetCompilerOptions(), oat_file.GetFile()); elf_writer->Start(); @@ -282,14 +288,17 @@ void ImageTest::TestWriteRead(ImageHeader::StorageMode storage_mode) { } TEST_F(ImageTest, WriteReadUncompressed) { + TEST_DISABLED_FOR_READ_BARRIER(); // b/27578460 TestWriteRead(ImageHeader::kStorageModeUncompressed); } TEST_F(ImageTest, WriteReadLZ4) { + TEST_DISABLED_FOR_READ_BARRIER(); // b/27578460 TestWriteRead(ImageHeader::kStorageModeLZ4); } TEST_F(ImageTest, WriteReadLZ4HC) { + TEST_DISABLED_FOR_READ_BARRIER(); // b/27578460 TestWriteRead(ImageHeader::kStorageModeLZ4HC); } diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index 871435b85f..b1b971f6ba 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -266,17 +266,9 @@ bool ImageWriter::Write(int image_fd, << PrettyDuration(NanoTime() - compress_start_time); } - // Write header first, as uncompressed. - image_header->data_size_ = data_size; - if (!image_file->WriteFully(image_info.image_->Begin(), sizeof(ImageHeader))) { - PLOG(ERROR) << "Failed to write image file header " << image_filename; - image_file->Erase(); - return false; - } - // Write out the image + fields + methods. const bool is_compressed = compressed_data != nullptr; - if (!image_file->WriteFully(image_data_to_write, data_size)) { + if (!image_file->PwriteFully(image_data_to_write, data_size, sizeof(ImageHeader))) { PLOG(ERROR) << "Failed to write image file data " << image_filename; image_file->Erase(); return false; @@ -291,13 +283,33 @@ bool ImageWriter::Write(int image_fd, if (!is_compressed) { CHECK_EQ(bitmap_position_in_file, bitmap_section.Offset()); } - if (!image_file->Write(reinterpret_cast<char*>(image_info.image_bitmap_->Begin()), - bitmap_section.Size(), - bitmap_position_in_file)) { + if (!image_file->PwriteFully(reinterpret_cast<char*>(image_info.image_bitmap_->Begin()), + bitmap_section.Size(), + bitmap_position_in_file)) { PLOG(ERROR) << "Failed to write image file " << image_filename; image_file->Erase(); return false; } + + int err = image_file->Flush(); + if (err < 0) { + PLOG(ERROR) << "Failed to flush image file " << image_filename << " with result " << err; + image_file->Erase(); + return false; + } + + // Write header last in case the compiler gets killed in the middle of image writing. + // We do not want to have a corrupted image with a valid header. + // The header is uncompressed since it contains whether the image is compressed or not. + image_header->data_size_ = data_size; + if (!image_file->PwriteFully(reinterpret_cast<char*>(image_info.image_->Begin()), + sizeof(ImageHeader), + 0)) { + PLOG(ERROR) << "Failed to write image file header " << image_filename; + image_file->Erase(); + return false; + } + CHECK_EQ(bitmap_position_in_file + bitmap_section.Size(), static_cast<size_t>(image_file->GetLength())); if (image_file->FlushCloseOrErase() != 0) { diff --git a/compiler/image_writer.h b/compiler/image_writer.h index dba9dd71fc..f204b28380 100644 --- a/compiler/image_writer.h +++ b/compiler/image_writer.h @@ -443,7 +443,7 @@ class ImageWriter FINAL { static Bin BinTypeForNativeRelocationType(NativeObjectRelocationType type); - uintptr_t NativeOffsetInImage(void* obj); + uintptr_t NativeOffsetInImage(void* obj) SHARED_REQUIRES(Locks::mutator_lock_); // Location of where the object will be when the image is loaded at runtime. template <typename T> diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc index 79a6d38fc6..cda2e274ce 100644 --- a/compiler/jit/jit_compiler.cc +++ b/compiler/jit/jit_compiler.cc @@ -69,7 +69,8 @@ extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t cou DCHECK(jit_compiler != nullptr); if (jit_compiler->GetCompilerOptions()->GetGenerateDebugInfo()) { const ArrayRef<mirror::Class*> types_array(types, count); - ArrayRef<const uint8_t> elf_file = debug::WriteDebugElfFileForClasses(kRuntimeISA, types_array); + ArrayRef<const uint8_t> elf_file = debug::WriteDebugElfFileForClasses( + kRuntimeISA, jit_compiler->GetCompilerDriver()->GetInstructionSetFeatures(), types_array); CreateJITCodeEntry(std::unique_ptr<const uint8_t[]>(elf_file.data()), elf_file.size()); } } @@ -85,7 +86,7 @@ NO_RETURN static void Usage(const char* fmt, ...) { exit(EXIT_FAILURE); } -JitCompiler::JitCompiler() : total_time_(0) { +JitCompiler::JitCompiler() { compiler_options_.reset(new CompilerOptions( CompilerOptions::kDefaultCompilerFilter, CompilerOptions::kDefaultHugeMethodThreshold, @@ -168,13 +169,14 @@ JitCompiler::JitCompiler() : total_time_(0) { compiler_driver_->SetDedupeEnabled(false); compiler_driver_->SetSupportBootImageFixup(false); + size_t thread_count = compiler_driver_->GetThreadCount(); if (compiler_options_->GetGenerateDebugInfo()) { #ifdef __ANDROID__ const char* prefix = "/data/misc/trace"; #else const char* prefix = "/tmp"; #endif - DCHECK_EQ(compiler_driver_->GetThreadCount(), 1u) + DCHECK_EQ(thread_count, 1u) << "Generating debug info only works with one compiler thread"; std::string perf_filename = std::string(prefix) + "/perf-" + std::to_string(getpid()) + ".map"; perf_file_.reset(OS::CreateEmptyFileWriteOnly(perf_filename.c_str())); @@ -183,6 +185,10 @@ JitCompiler::JitCompiler() : total_time_(0) { " Are you on a user build? Perf only works on userdebug/eng builds"; } } + + size_t inline_depth_limit = compiler_driver_->GetCompilerOptions().GetInlineDepthLimit(); + DCHECK_LT(thread_count * inline_depth_limit, std::numeric_limits<uint16_t>::max()) + << "ProfilingInfo's inline counter can potentially overflow"; } JitCompiler::~JitCompiler() { @@ -195,7 +201,6 @@ JitCompiler::~JitCompiler() { bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool osr) { DCHECK(!method->IsProxyMethod()); TimingLogger logger("JIT compiler timing logger", true, VLOG_IS_ON(jit)); - const uint64_t start_time = NanoTime(); StackHandleScope<2> hs(self); self->AssertNoPendingException(); Runtime* runtime = Runtime::Current(); @@ -236,7 +241,6 @@ bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool osr) { runtime->GetJitArenaPool()->TrimMaps(); } - total_time_ += NanoTime() - start_time; runtime->GetJit()->AddTimingLogger(logger); return success; } diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h index 5294d0ee35..533dccf216 100644 --- a/compiler/jit/jit_compiler.h +++ b/compiler/jit/jit_compiler.h @@ -18,13 +18,10 @@ #define ART_COMPILER_JIT_JIT_COMPILER_H_ #include "base/mutex.h" -#include "compiler_callbacks.h" #include "compiled_method.h" -#include "dex/verification_results.h" #include "dex/quick/dex_file_to_method_inliner_map.h" #include "driver/compiler_driver.h" #include "driver/compiler_options.h" -#include "oat_file.h" namespace art { @@ -37,23 +34,22 @@ class JitCompiler { public: static JitCompiler* Create(); virtual ~JitCompiler(); + + // Compilation entrypoint. Returns whether the compilation succeeded. bool CompileMethod(Thread* self, ArtMethod* method, bool osr) SHARED_REQUIRES(Locks::mutator_lock_); - CompilerCallbacks* GetCompilerCallbacks() const; - size_t GetTotalCompileTime() const { - return total_time_; - } + CompilerOptions* GetCompilerOptions() const { return compiler_options_.get(); } + CompilerDriver* GetCompilerDriver() const { + return compiler_driver_.get(); + } private: - uint64_t total_time_; std::unique_ptr<CompilerOptions> compiler_options_; std::unique_ptr<CumulativeLogger> cumulative_logger_; - std::unique_ptr<VerificationResults> verification_results_; std::unique_ptr<DexFileToMethodInlinerMap> method_inliner_map_; - std::unique_ptr<CompilerCallbacks> callbacks_; std::unique_ptr<CompilerDriver> compiler_driver_; std::unique_ptr<const InstructionSetFeatures> instruction_set_features_; std::unique_ptr<File> perf_file_; @@ -62,8 +58,7 @@ class JitCompiler { // This is in the compiler since the runtime doesn't have access to the compiled method // structures. - bool AddToCodeCache(ArtMethod* method, - const CompiledMethod* compiled_method) + bool AddToCodeCache(ArtMethod* method, const CompiledMethod* compiled_method) SHARED_REQUIRES(Locks::mutator_lock_); DISALLOW_COPY_AND_ASSIGN(JitCompiler); diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc index 8d60be20ee..cf836a9c9f 100644 --- a/compiler/jni/jni_compiler_test.cc +++ b/compiler/jni/jni_compiler_test.cc @@ -220,8 +220,7 @@ void JniCompilerTest::CompileAndRunIntMethodThroughStubImpl() { std::string reason; ASSERT_TRUE(Runtime::Current()->GetJavaVM()-> - LoadNativeLibrary(env_, "", class_loader_, /* is_shared_namespace */ false, - nullptr, nullptr, &reason)) + LoadNativeLibrary(env_, "", class_loader_, nullptr, &reason)) << reason; jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 24); @@ -236,8 +235,7 @@ void JniCompilerTest::CompileAndRunStaticIntMethodThroughStubImpl() { std::string reason; ASSERT_TRUE(Runtime::Current()->GetJavaVM()-> - LoadNativeLibrary(env_, "", class_loader_, /* is_shared_namespace */ false, - nullptr, nullptr, &reason)) + LoadNativeLibrary(env_, "", class_loader_, nullptr, &reason)) << reason; jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 42); diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc index 14fd1054c3..d22044aca3 100644 --- a/compiler/oat_test.cc +++ b/compiler/oat_test.cc @@ -176,6 +176,7 @@ class OatTest : public CommonCompilerTest { bool verify) { std::unique_ptr<ElfWriter> elf_writer = CreateElfWriterQuick( compiler_driver_->GetInstructionSet(), + compiler_driver_->GetInstructionSetFeatures(), &compiler_driver_->GetCompilerOptions(), file); elf_writer->Start(); diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc index c60b02a227..c2f19c9d61 100644 --- a/compiler/oat_writer.cc +++ b/compiler/oat_writer.cc @@ -806,20 +806,29 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor { } } - if (writer_->compiler_driver_->GetCompilerOptions().GenerateAnyDebugInfo()) { + const CompilerOptions& compiler_options = writer_->compiler_driver_->GetCompilerOptions(); + // Exclude quickened dex methods (code_size == 0) since they have no native code. + if (compiler_options.GenerateAnyDebugInfo() && code_size != 0) { + bool has_code_info = method_header->IsOptimized(); // Record debug information for this function if we are doing that. - const uint32_t quick_code_start = quick_code_offset - - writer_->oat_header_->GetExecutableOffset() - thumb_offset; - writer_->method_info_.push_back(debug::MethodDebugInfo { - dex_file_, - class_def_index_, - it.GetMemberIndex(), - it.GetMethodAccessFlags(), - it.GetMethodCodeItem(), - deduped, - quick_code_start, - quick_code_start + code_size, - compiled_method}); + debug::MethodDebugInfo info = debug::MethodDebugInfo(); + info.trampoline_name = nullptr; + info.dex_file = dex_file_; + info.class_def_index = class_def_index_; + info.dex_method_index = it.GetMemberIndex(); + info.access_flags = it.GetMethodAccessFlags(); + info.code_item = it.GetMethodCodeItem(); + info.isa = compiled_method->GetInstructionSet(); + info.deduped = deduped; + info.is_native_debuggable = compiler_options.GetNativeDebuggable(); + info.is_optimized = method_header->IsOptimized(); + info.is_code_address_text_relative = true; + info.code_address = code_offset - writer_->oat_header_->GetExecutableOffset(); + info.code_size = code_size; + info.frame_size_in_bytes = compiled_method->GetFrameSizeInBytes(); + info.code_info = has_code_info ? compiled_method->GetVmapTable().data() : nullptr; + info.cfi = compiled_method->GetCFIInfo(); + writer_->method_info_.push_back(info); } if (kIsDebugBuild) { diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h index 74aab4efd0..5e7a4a37d1 100644 --- a/compiler/oat_writer.h +++ b/compiler/oat_writer.h @@ -202,6 +202,10 @@ class OatWriter { ~OatWriter(); + void AddMethodDebugInfos(const std::vector<debug::MethodDebugInfo>& infos) { + method_info_.insert(method_info_.end(), infos.begin(), infos.end()); + } + ArrayRef<const debug::MethodDebugInfo> GetMethodDebugInfo() const { return ArrayRef<const debug::MethodDebugInfo>(method_info_); } diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 967d156cf6..af50363e31 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -543,8 +543,16 @@ void CodeGenerator::AllocateLocations(HInstruction* instruction) { DCHECK(CheckTypeConsistency(instruction)); LocationSummary* locations = instruction->GetLocations(); if (!instruction->IsSuspendCheckEntry()) { - if (locations != nullptr && locations->CanCall()) { - MarkNotLeaf(); + if (locations != nullptr) { + if (locations->CanCall()) { + MarkNotLeaf(); + } else if (locations->Intrinsified() && + instruction->IsInvokeStaticOrDirect() && + !instruction->AsInvokeStaticOrDirect()->HasCurrentMethodInput()) { + // A static method call that has been fully intrinsified, and cannot call on the slow + // path or refer to the current method directly, no longer needs current method. + return; + } } if (instruction->NeedsCurrentMethod()) { SetRequiresCurrentMethod(); diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index aa9b01f30b..0b7fefafdd 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -5727,6 +5727,71 @@ void InstructionCodeGeneratorARM::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); } + +void LocationsBuilderARM::VisitBitwiseNegatedRight(HBitwiseNegatedRight* instruction) { + LocationSummary* locations = + new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + DCHECK(instruction->GetResultType() == Primitive::kPrimInt + || instruction->GetResultType() == Primitive::kPrimLong); + + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); +} + +void InstructionCodeGeneratorARM::VisitBitwiseNegatedRight(HBitwiseNegatedRight* instruction) { + LocationSummary* locations = instruction->GetLocations(); + Location first = locations->InAt(0); + Location second = locations->InAt(1); + Location out = locations->Out(); + + if (instruction->GetResultType() == Primitive::kPrimInt) { + Register first_reg = first.AsRegister<Register>(); + ShifterOperand second_reg(second.AsRegister<Register>()); + Register out_reg = out.AsRegister<Register>(); + + switch (instruction->GetOpKind()) { + case HInstruction::kAnd: + __ bic(out_reg, first_reg, second_reg); + break; + case HInstruction::kOr: + __ orn(out_reg, first_reg, second_reg); + break; + // There is no EON on arm. + case HInstruction::kXor: + default: + LOG(FATAL) << "Unexpected instruction " << instruction->DebugName(); + UNREACHABLE(); + } + return; + + } else { + DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong); + Register first_low = first.AsRegisterPairLow<Register>(); + Register first_high = first.AsRegisterPairHigh<Register>(); + ShifterOperand second_low(second.AsRegisterPairLow<Register>()); + ShifterOperand second_high(second.AsRegisterPairHigh<Register>()); + Register out_low = out.AsRegisterPairLow<Register>(); + Register out_high = out.AsRegisterPairHigh<Register>(); + + switch (instruction->GetOpKind()) { + case HInstruction::kAnd: + __ bic(out_low, first_low, second_low); + __ bic(out_high, first_high, second_high); + break; + case HInstruction::kOr: + __ orn(out_low, first_low, second_low); + __ orn(out_high, first_high, second_high); + break; + // There is no EON on arm. + case HInstruction::kXor: + default: + LOG(FATAL) << "Unexpected instruction " << instruction->DebugName(); + UNREACHABLE(); + } + } +} + void InstructionCodeGeneratorARM::GenerateAndConst(Register out, Register first, uint32_t value) { // Optimize special cases for individual halfs of `and-long` (`and` is simplified earlier). if (value == 0xffffffffu) { diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 985dc056f6..89b9e2c599 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -1862,7 +1862,7 @@ void InstructionCodeGeneratorARM64::VisitAnd(HAnd* instruction) { HandleBinaryOp(instruction); } -void LocationsBuilderARM64::VisitArm64BitwiseNegatedRight(HArm64BitwiseNegatedRight* instr) { +void LocationsBuilderARM64::VisitBitwiseNegatedRight(HBitwiseNegatedRight* instr) { DCHECK(Primitive::IsIntegralType(instr->GetType())) << instr->GetType(); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); locations->SetInAt(0, Location::RequiresRegister()); @@ -1871,8 +1871,7 @@ void LocationsBuilderARM64::VisitArm64BitwiseNegatedRight(HArm64BitwiseNegatedRi locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); } -void InstructionCodeGeneratorARM64::VisitArm64BitwiseNegatedRight( - HArm64BitwiseNegatedRight* instr) { +void InstructionCodeGeneratorARM64::VisitBitwiseNegatedRight(HBitwiseNegatedRight* instr) { Register dst = OutputRegister(instr); Register lhs = InputRegisterAt(instr, 0); Register rhs = InputRegisterAt(instr, 1); diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc index b9638f2027..4f1e90cd7f 100644 --- a/compiler/optimizing/graph_visualizer.cc +++ b/compiler/optimizing/graph_visualizer.cc @@ -440,13 +440,13 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor { void VisitMultiplyAccumulate(HMultiplyAccumulate* instruction) OVERRIDE { StartAttributeStream("kind") << instruction->GetOpKind(); } -#endif -#ifdef ART_ENABLE_CODEGEN_arm64 - void VisitArm64BitwiseNegatedRight(HArm64BitwiseNegatedRight* instruction) OVERRIDE { + void VisitBitwiseNegatedRight(HBitwiseNegatedRight* instruction) OVERRIDE { StartAttributeStream("kind") << instruction->GetOpKind(); } +#endif +#ifdef ART_ENABLE_CODEGEN_arm64 void VisitArm64DataProcWithShifterOp(HArm64DataProcWithShifterOp* instruction) OVERRIDE { StartAttributeStream("kind") << instruction->GetInstrKind() << "+" << instruction->GetOpKind(); if (HArm64DataProcWithShifterOp::IsShiftOp(instruction->GetOpKind())) { diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 3e3719e6ea..d861e39c8b 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -28,6 +28,8 @@ #include "driver/dex_compilation_unit.h" #include "instruction_simplifier.h" #include "intrinsics.h" +#include "jit/jit.h" +#include "jit/jit_code_cache.h" #include "mirror/class_loader.h" #include "mirror/dex_cache.h" #include "nodes.h" @@ -220,6 +222,33 @@ static uint32_t FindClassIndexIn(mirror::Class* cls, return index; } +class ScopedProfilingInfoInlineUse { + public: + explicit ScopedProfilingInfoInlineUse(ArtMethod* method, Thread* self) + : method_(method), + self_(self), + // Fetch the profiling info ahead of using it. If it's null when fetching, + // we should not call JitCodeCache::DoneInlining. + profiling_info_( + Runtime::Current()->GetJit()->GetCodeCache()->NotifyCompilerUse(method, self)) { + } + + ~ScopedProfilingInfoInlineUse() { + if (profiling_info_ != nullptr) { + size_t pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize(); + DCHECK_EQ(profiling_info_, method_->GetProfilingInfo(pointer_size)); + Runtime::Current()->GetJit()->GetCodeCache()->DoneCompilerUse(method_, self_); + } + } + + ProfilingInfo* GetProfilingInfo() const { return profiling_info_; } + + private: + ArtMethod* const method_; + Thread* const self_; + ProfilingInfo* const profiling_info_; +}; + bool HInliner::TryInline(HInvoke* invoke_instruction) { if (invoke_instruction->IsInvokeUnresolved()) { return false; // Don't bother to move further if we know the method is unresolved. @@ -271,30 +300,32 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) { // Check if we can use an inline cache. ArtMethod* caller = graph_->GetArtMethod(); - size_t pointer_size = class_linker->GetImagePointerSize(); - // Under JIT, we should always know the caller. - DCHECK(!Runtime::Current()->UseJit() || (caller != nullptr)); - if (caller != nullptr && caller->GetProfilingInfo(pointer_size) != nullptr) { - ProfilingInfo* profiling_info = caller->GetProfilingInfo(pointer_size); - const InlineCache& ic = *profiling_info->GetInlineCache(invoke_instruction->GetDexPc()); - if (ic.IsUnitialized()) { - VLOG(compiler) << "Interface or virtual call to " - << PrettyMethod(method_index, caller_dex_file) - << " is not hit and not inlined"; - return false; - } else if (ic.IsMonomorphic()) { - MaybeRecordStat(kMonomorphicCall); - return TryInlineMonomorphicCall(invoke_instruction, resolved_method, ic); - } else if (ic.IsPolymorphic()) { - MaybeRecordStat(kPolymorphicCall); - return TryInlinePolymorphicCall(invoke_instruction, resolved_method, ic); - } else { - DCHECK(ic.IsMegamorphic()); - VLOG(compiler) << "Interface or virtual call to " - << PrettyMethod(method_index, caller_dex_file) - << " is megamorphic and not inlined"; - MaybeRecordStat(kMegamorphicCall); - return false; + if (Runtime::Current()->UseJit()) { + // Under JIT, we should always know the caller. + DCHECK(caller != nullptr); + ScopedProfilingInfoInlineUse spiis(caller, soa.Self()); + ProfilingInfo* profiling_info = spiis.GetProfilingInfo(); + if (profiling_info != nullptr) { + const InlineCache& ic = *profiling_info->GetInlineCache(invoke_instruction->GetDexPc()); + if (ic.IsUninitialized()) { + VLOG(compiler) << "Interface or virtual call to " + << PrettyMethod(method_index, caller_dex_file) + << " is not hit and not inlined"; + return false; + } else if (ic.IsMonomorphic()) { + MaybeRecordStat(kMonomorphicCall); + return TryInlineMonomorphicCall(invoke_instruction, resolved_method, ic); + } else if (ic.IsPolymorphic()) { + MaybeRecordStat(kPolymorphicCall); + return TryInlinePolymorphicCall(invoke_instruction, resolved_method, ic); + } else { + DCHECK(ic.IsMegamorphic()); + VLOG(compiler) << "Interface or virtual call to " + << PrettyMethod(method_index, caller_dex_file) + << " is megamorphic and not inlined"; + MaybeRecordStat(kMegamorphicCall); + return false; + } } } diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc index f8a9a94e62..049901b882 100644 --- a/compiler/optimizing/instruction_simplifier.cc +++ b/compiler/optimizing/instruction_simplifier.cc @@ -70,6 +70,10 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor { void VisitGreaterThanOrEqual(HGreaterThanOrEqual* condition) OVERRIDE; void VisitLessThan(HLessThan* condition) OVERRIDE; void VisitLessThanOrEqual(HLessThanOrEqual* condition) OVERRIDE; + void VisitBelow(HBelow* condition) OVERRIDE; + void VisitBelowOrEqual(HBelowOrEqual* condition) OVERRIDE; + void VisitAbove(HAbove* condition) OVERRIDE; + void VisitAboveOrEqual(HAboveOrEqual* condition) OVERRIDE; void VisitDiv(HDiv* instruction) OVERRIDE; void VisitMul(HMul* instruction) OVERRIDE; void VisitNeg(HNeg* instruction) OVERRIDE; @@ -94,6 +98,7 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor { void SimplifyCompare(HInvoke* invoke, bool has_zero_op); void SimplifyIsNaN(HInvoke* invoke); void SimplifyFP2Int(HInvoke* invoke); + void SimplifyMemBarrier(HInvoke* invoke, MemBarrierKind barrier_kind); OptimizingCompilerStats* stats_; bool simplification_occurred_ = false; @@ -558,6 +563,36 @@ void InstructionSimplifierVisitor::VisitSuspendCheck(HSuspendCheck* check) { block->RemoveInstruction(check); } +static HCondition* GetOppositeConditionSwapOps(ArenaAllocator* arena, HInstruction* cond) { + HInstruction *lhs = cond->InputAt(0); + HInstruction *rhs = cond->InputAt(1); + switch (cond->GetKind()) { + case HInstruction::kEqual: + return new (arena) HEqual(rhs, lhs); + case HInstruction::kNotEqual: + return new (arena) HNotEqual(rhs, lhs); + case HInstruction::kLessThan: + return new (arena) HGreaterThan(rhs, lhs); + case HInstruction::kLessThanOrEqual: + return new (arena) HGreaterThanOrEqual(rhs, lhs); + case HInstruction::kGreaterThan: + return new (arena) HLessThan(rhs, lhs); + case HInstruction::kGreaterThanOrEqual: + return new (arena) HLessThanOrEqual(rhs, lhs); + case HInstruction::kBelow: + return new (arena) HAbove(rhs, lhs); + case HInstruction::kBelowOrEqual: + return new (arena) HAboveOrEqual(rhs, lhs); + case HInstruction::kAbove: + return new (arena) HBelow(rhs, lhs); + case HInstruction::kAboveOrEqual: + return new (arena) HBelowOrEqual(rhs, lhs); + default: + LOG(FATAL) << "Unknown ConditionType " << cond->GetKind(); + } + return nullptr; +} + void InstructionSimplifierVisitor::VisitEqual(HEqual* equal) { HInstruction* input_const = equal->GetConstantRight(); if (input_const != nullptr) { @@ -981,13 +1016,47 @@ void InstructionSimplifierVisitor::VisitLessThanOrEqual(HLessThanOrEqual* condit VisitCondition(condition); } -// TODO: unsigned comparisons too? +void InstructionSimplifierVisitor::VisitBelow(HBelow* condition) { + VisitCondition(condition); +} + +void InstructionSimplifierVisitor::VisitBelowOrEqual(HBelowOrEqual* condition) { + VisitCondition(condition); +} + +void InstructionSimplifierVisitor::VisitAbove(HAbove* condition) { + VisitCondition(condition); +} + +void InstructionSimplifierVisitor::VisitAboveOrEqual(HAboveOrEqual* condition) { + VisitCondition(condition); +} void InstructionSimplifierVisitor::VisitCondition(HCondition* condition) { - // Try to fold an HCompare into this HCondition. + // Reverse condition if left is constant. Our code generators prefer constant + // on the right hand side. + if (condition->GetLeft()->IsConstant() && !condition->GetRight()->IsConstant()) { + HBasicBlock* block = condition->GetBlock(); + HCondition* replacement = GetOppositeConditionSwapOps(block->GetGraph()->GetArena(), condition); + // If it is a fp we must set the opposite bias. + if (replacement != nullptr) { + if (condition->IsLtBias()) { + replacement->SetBias(ComparisonBias::kGtBias); + } else if (condition->IsGtBias()) { + replacement->SetBias(ComparisonBias::kLtBias); + } + block->ReplaceAndRemoveInstructionWith(condition, replacement); + RecordSimplification(); + + condition = replacement; + } + } HInstruction* left = condition->GetLeft(); HInstruction* right = condition->GetRight(); + + // Try to fold an HCompare into this HCondition. + // We can only replace an HCondition which compares a Compare to 0. // Both 'dx' and 'jack' generate a compare to 0 when compiling a // condition with a long, float or double comparison as input. @@ -1594,6 +1663,12 @@ void InstructionSimplifierVisitor::SimplifyFP2Int(HInvoke* invoke) { invoke->ReplaceWithExceptInReplacementAtIndex(select, 0); // false at index 0 } +void InstructionSimplifierVisitor::SimplifyMemBarrier(HInvoke* invoke, MemBarrierKind barrier_kind) { + uint32_t dex_pc = invoke->GetDexPc(); + HMemoryBarrier* mem_barrier = new (GetGraph()->GetArena()) HMemoryBarrier(barrier_kind, dex_pc); + invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, mem_barrier); +} + void InstructionSimplifierVisitor::VisitInvoke(HInvoke* instruction) { switch (instruction->GetIntrinsic()) { case Intrinsics::kStringEquals: @@ -1626,6 +1701,15 @@ void InstructionSimplifierVisitor::VisitInvoke(HInvoke* instruction) { case Intrinsics::kDoubleDoubleToLongBits: SimplifyFP2Int(instruction); break; + case Intrinsics::kUnsafeLoadFence: + SimplifyMemBarrier(instruction, MemBarrierKind::kLoadAny); + break; + case Intrinsics::kUnsafeStoreFence: + SimplifyMemBarrier(instruction, MemBarrierKind::kAnyStore); + break; + case Intrinsics::kUnsafeFullFence: + SimplifyMemBarrier(instruction, MemBarrierKind::kAnyAny); + break; default: break; } diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc index db1f9a79aa..cd026b8770 100644 --- a/compiler/optimizing/instruction_simplifier_arm.cc +++ b/compiler/optimizing/instruction_simplifier_arm.cc @@ -26,5 +26,18 @@ void InstructionSimplifierArmVisitor::VisitMul(HMul* instruction) { } } +void InstructionSimplifierArmVisitor::VisitOr(HOr* instruction) { + if (TryMergeNegatedInput(instruction)) { + RecordSimplification(); + } +} + +void InstructionSimplifierArmVisitor::VisitAnd(HAnd* instruction) { + if (TryMergeNegatedInput(instruction)) { + RecordSimplification(); + } +} + + } // namespace arm } // namespace art diff --git a/compiler/optimizing/instruction_simplifier_arm.h b/compiler/optimizing/instruction_simplifier_arm.h index 379b95d6ae..14c940eb21 100644 --- a/compiler/optimizing/instruction_simplifier_arm.h +++ b/compiler/optimizing/instruction_simplifier_arm.h @@ -36,6 +36,8 @@ class InstructionSimplifierArmVisitor : public HGraphVisitor { } void VisitMul(HMul* instruction) OVERRIDE; + void VisitOr(HOr* instruction) OVERRIDE; + void VisitAnd(HAnd* instruction) OVERRIDE; OptimizingCompilerStats* stats_; }; diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc index c2bbdccc29..f00d960877 100644 --- a/compiler/optimizing/instruction_simplifier_arm64.cc +++ b/compiler/optimizing/instruction_simplifier_arm64.cc @@ -180,51 +180,10 @@ bool InstructionSimplifierArm64Visitor::TryMergeIntoUsersShifterOperand(HInstruc return true; } -bool InstructionSimplifierArm64Visitor::TryMergeNegatedInput(HBinaryOperation* op) { - DCHECK(op->IsAnd() || op->IsOr() || op->IsXor()) << op->DebugName(); - HInstruction* left = op->GetLeft(); - HInstruction* right = op->GetRight(); - - // Only consider the case where there is exactly one Not, with 2 Not's De - // Morgan's laws should be applied instead. - if (left->IsNot() ^ right->IsNot()) { - HInstruction* hnot = (left->IsNot() ? left : right); - HInstruction* hother = (left->IsNot() ? right : left); - - // Only do the simplification if the Not has only one use and can thus be - // safely removed. Even though ARM64 negated bitwise operations do not have - // an immediate variant (only register), we still do the simplification when - // `hother` is a constant, because it removes an instruction if the constant - // cannot be encoded as an immediate: - // mov r0, #large_constant - // neg r2, r1 - // and r0, r0, r2 - // becomes: - // mov r0, #large_constant - // bic r0, r0, r1 - if (hnot->HasOnlyOneNonEnvironmentUse()) { - // Replace code looking like - // NOT tmp, mask - // AND dst, src, tmp (respectively ORR, EOR) - // with - // BIC dst, src, mask (respectively ORN, EON) - HInstruction* src = hnot->AsNot()->GetInput(); - - HArm64BitwiseNegatedRight* neg_op = new (GetGraph()->GetArena()) - HArm64BitwiseNegatedRight(op->GetType(), op->GetKind(), hother, src, op->GetDexPc()); - - op->GetBlock()->ReplaceAndRemoveInstructionWith(op, neg_op); - hnot->GetBlock()->RemoveInstruction(hnot); - RecordSimplification(); - return true; - } - } - - return false; -} - void InstructionSimplifierArm64Visitor::VisitAnd(HAnd* instruction) { - TryMergeNegatedInput(instruction); + if (TryMergeNegatedInput(instruction)) { + RecordSimplification(); + } } void InstructionSimplifierArm64Visitor::VisitArrayGet(HArrayGet* instruction) { @@ -248,7 +207,9 @@ void InstructionSimplifierArm64Visitor::VisitMul(HMul* instruction) { } void InstructionSimplifierArm64Visitor::VisitOr(HOr* instruction) { - TryMergeNegatedInput(instruction); + if (TryMergeNegatedInput(instruction)) { + RecordSimplification(); + } } void InstructionSimplifierArm64Visitor::VisitShl(HShl* instruction) { @@ -284,7 +245,9 @@ void InstructionSimplifierArm64Visitor::VisitUShr(HUShr* instruction) { } void InstructionSimplifierArm64Visitor::VisitXor(HXor* instruction) { - TryMergeNegatedInput(instruction); + if (TryMergeNegatedInput(instruction)) { + RecordSimplification(); + } } } // namespace arm64 diff --git a/compiler/optimizing/instruction_simplifier_arm64.h b/compiler/optimizing/instruction_simplifier_arm64.h index cf8458713f..338120bbbc 100644 --- a/compiler/optimizing/instruction_simplifier_arm64.h +++ b/compiler/optimizing/instruction_simplifier_arm64.h @@ -51,10 +51,6 @@ class InstructionSimplifierArm64Visitor : public HGraphVisitor { return TryMergeIntoShifterOperand(use, bitfield_op, true); } - // For bitwise operations (And/Or/Xor) with a negated input, try to use - // a negated bitwise instruction. - bool TryMergeNegatedInput(HBinaryOperation* op); - // HInstruction visitors, sorted alphabetically. void VisitAnd(HAnd* instruction) OVERRIDE; void VisitArrayGet(HArrayGet* instruction) OVERRIDE; diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc index 45d196fa6d..a11b5bd5c3 100644 --- a/compiler/optimizing/instruction_simplifier_shared.cc +++ b/compiler/optimizing/instruction_simplifier_shared.cc @@ -186,4 +186,47 @@ bool TryCombineMultiplyAccumulate(HMul* mul, InstructionSet isa) { return false; } + +bool TryMergeNegatedInput(HBinaryOperation* op) { + DCHECK(op->IsAnd() || op->IsOr() || op->IsXor()) << op->DebugName(); + HInstruction* left = op->GetLeft(); + HInstruction* right = op->GetRight(); + + // Only consider the case where there is exactly one Not, with 2 Not's De + // Morgan's laws should be applied instead. + if (left->IsNot() ^ right->IsNot()) { + HInstruction* hnot = (left->IsNot() ? left : right); + HInstruction* hother = (left->IsNot() ? right : left); + + // Only do the simplification if the Not has only one use and can thus be + // safely removed. Even though ARM64 negated bitwise operations do not have + // an immediate variant (only register), we still do the simplification when + // `hother` is a constant, because it removes an instruction if the constant + // cannot be encoded as an immediate: + // mov r0, #large_constant + // neg r2, r1 + // and r0, r0, r2 + // becomes: + // mov r0, #large_constant + // bic r0, r0, r1 + if (hnot->HasOnlyOneNonEnvironmentUse()) { + // Replace code looking like + // NOT tmp, mask + // AND dst, src, tmp (respectively ORR, EOR) + // with + // BIC dst, src, mask (respectively ORN, EON) + HInstruction* src = hnot->AsNot()->GetInput(); + + HBitwiseNegatedRight* neg_op = new (hnot->GetBlock()->GetGraph()->GetArena()) + HBitwiseNegatedRight(op->GetType(), op->GetKind(), hother, src, op->GetDexPc()); + + op->GetBlock()->ReplaceAndRemoveInstructionWith(op, neg_op); + hnot->GetBlock()->RemoveInstruction(hnot); + return true; + } + } + + return false; +} + } // namespace art diff --git a/compiler/optimizing/instruction_simplifier_shared.h b/compiler/optimizing/instruction_simplifier_shared.h index 9832ecc058..b1fe8f4756 100644 --- a/compiler/optimizing/instruction_simplifier_shared.h +++ b/compiler/optimizing/instruction_simplifier_shared.h @@ -22,6 +22,9 @@ namespace art { bool TryCombineMultiplyAccumulate(HMul* mul, InstructionSet isa); +// For bitwise operations (And/Or/Xor) with a negated input, try to use +// a negated bitwise instruction. +bool TryMergeNegatedInput(HBinaryOperation* op); } // namespace art diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc index 3ed0278871..5d4c4e2950 100644 --- a/compiler/optimizing/intrinsics.cc +++ b/compiler/optimizing/intrinsics.cc @@ -472,6 +472,24 @@ static Intrinsics GetIntrinsic(InlineMethod method) { break; } + // 1.8. + case kIntrinsicUnsafeGetAndAddInt: + return Intrinsics::kUnsafeGetAndAddInt; + case kIntrinsicUnsafeGetAndAddLong: + return Intrinsics::kUnsafeGetAndAddLong; + case kIntrinsicUnsafeGetAndSetInt: + return Intrinsics::kUnsafeGetAndSetInt; + case kIntrinsicUnsafeGetAndSetLong: + return Intrinsics::kUnsafeGetAndSetLong; + case kIntrinsicUnsafeGetAndSetObject: + return Intrinsics::kUnsafeGetAndSetObject; + case kIntrinsicUnsafeLoadFence: + return Intrinsics::kUnsafeLoadFence; + case kIntrinsicUnsafeStoreFence: + return Intrinsics::kUnsafeStoreFence; + case kIntrinsicUnsafeFullFence: + return Intrinsics::kUnsafeFullFence; + // Virtual cases. case kIntrinsicReferenceGetReferent: diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h index 2ab50bb436..3da82851a6 100644 --- a/compiler/optimizing/intrinsics.h +++ b/compiler/optimizing/intrinsics.h @@ -193,6 +193,49 @@ class SystemArrayCopyOptimizations : public IntrinsicOptimizations { #undef INTRISIC_OPTIMIZATION +// +// Macros for use in the intrinsics code generators. +// + +// Defines an unimplemented intrinsic: that is, a method call that is recognized as an +// intrinsic to exploit e.g. no side-effects or exceptions, but otherwise not handled +// by this architecture-specific intrinsics code generator. Eventually it is implemented +// as a true method call. +#define UNIMPLEMENTED_INTRINSIC(Arch, Name) \ +void IntrinsicLocationsBuilder ## Arch::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \ +} \ +void IntrinsicCodeGenerator ## Arch::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \ +} + +// Defines a list of unreached intrinsics: that is, method calls that are recognized as +// an intrinsic, and then always converted into HIR instructions before they reach any +// architecture-specific intrinsics code generator. +#define UNREACHABLE_INTRINSIC(Arch, Name) \ +void IntrinsicLocationsBuilder ## Arch::Visit ## Name(HInvoke* invoke) { \ + LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \ + << " should have been converted to HIR"; \ +} \ +void IntrinsicCodeGenerator ## Arch::Visit ## Name(HInvoke* invoke) { \ + LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \ + << " should have been converted to HIR"; \ +} +#define UNREACHABLE_INTRINSICS(Arch) \ +UNREACHABLE_INTRINSIC(Arch, FloatFloatToIntBits) \ +UNREACHABLE_INTRINSIC(Arch, DoubleDoubleToLongBits) \ +UNREACHABLE_INTRINSIC(Arch, FloatIsNaN) \ +UNREACHABLE_INTRINSIC(Arch, DoubleIsNaN) \ +UNREACHABLE_INTRINSIC(Arch, IntegerRotateLeft) \ +UNREACHABLE_INTRINSIC(Arch, LongRotateLeft) \ +UNREACHABLE_INTRINSIC(Arch, IntegerRotateRight) \ +UNREACHABLE_INTRINSIC(Arch, LongRotateRight) \ +UNREACHABLE_INTRINSIC(Arch, IntegerCompare) \ +UNREACHABLE_INTRINSIC(Arch, LongCompare) \ +UNREACHABLE_INTRINSIC(Arch, IntegerSignum) \ +UNREACHABLE_INTRINSIC(Arch, LongSignum) \ +UNREACHABLE_INTRINSIC(Arch, UnsafeLoadFence) \ +UNREACHABLE_INTRINSIC(Arch, UnsafeStoreFence) \ +UNREACHABLE_INTRINSIC(Arch, UnsafeFullFence) + } // namespace art #endif // ART_COMPILER_OPTIMIZING_INTRINSICS_H_ diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc index 276085ef30..4b94c94f39 100644 --- a/compiler/optimizing/intrinsics_arm.cc +++ b/compiler/optimizing/intrinsics_arm.cc @@ -1224,8 +1224,9 @@ void IntrinsicCodeGeneratorARM::VisitStringNewStringFromBytes(HInvoke* invoke) { __ LoadFromOffset( kLoadWord, LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocStringFromBytes).Int32Value()); - codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); + CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>(); __ blx(LR); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); __ Bind(slow_path->GetExitLabel()); } @@ -1251,8 +1252,9 @@ void IntrinsicCodeGeneratorARM::VisitStringNewStringFromChars(HInvoke* invoke) { // all include a null check on `data` before calling that method. __ LoadFromOffset( kLoadWord, LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocStringFromChars).Int32Value()); - codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); + CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>(); __ blx(LR); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); } void IntrinsicLocationsBuilderARM::VisitStringNewStringFromString(HInvoke* invoke) { @@ -1276,8 +1278,9 @@ void IntrinsicCodeGeneratorARM::VisitStringNewStringFromString(HInvoke* invoke) __ LoadFromOffset(kLoadWord, LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocStringFromString).Int32Value()); - codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); + CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>(); __ blx(LR); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); __ Bind(slow_path->GetExitLabel()); } @@ -1979,54 +1982,37 @@ void IntrinsicCodeGeneratorARM::VisitStringGetCharsNoCheck(HInvoke* invoke) { __ Bind(&done); } -// Unimplemented intrinsics. - -#define UNIMPLEMENTED_INTRINSIC(Name) \ -void IntrinsicLocationsBuilderARM::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \ -} \ -void IntrinsicCodeGeneratorARM::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \ -} - -UNIMPLEMENTED_INTRINSIC(IntegerBitCount) -UNIMPLEMENTED_INTRINSIC(LongBitCount) -UNIMPLEMENTED_INTRINSIC(MathMinDoubleDouble) -UNIMPLEMENTED_INTRINSIC(MathMinFloatFloat) -UNIMPLEMENTED_INTRINSIC(MathMaxDoubleDouble) -UNIMPLEMENTED_INTRINSIC(MathMaxFloatFloat) -UNIMPLEMENTED_INTRINSIC(MathMinLongLong) -UNIMPLEMENTED_INTRINSIC(MathMaxLongLong) -UNIMPLEMENTED_INTRINSIC(MathCeil) // Could be done by changing rounding mode, maybe? -UNIMPLEMENTED_INTRINSIC(MathFloor) // Could be done by changing rounding mode, maybe? -UNIMPLEMENTED_INTRINSIC(MathRint) -UNIMPLEMENTED_INTRINSIC(MathRoundDouble) // Could be done by changing rounding mode, maybe? -UNIMPLEMENTED_INTRINSIC(MathRoundFloat) // Could be done by changing rounding mode, maybe? -UNIMPLEMENTED_INTRINSIC(UnsafeCASLong) // High register pressure. -UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar) -UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent) - -UNIMPLEMENTED_INTRINSIC(FloatIsInfinite) -UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite) - -UNIMPLEMENTED_INTRINSIC(IntegerHighestOneBit) -UNIMPLEMENTED_INTRINSIC(LongHighestOneBit) -UNIMPLEMENTED_INTRINSIC(IntegerLowestOneBit) -UNIMPLEMENTED_INTRINSIC(LongLowestOneBit) - -// Handled as HIR instructions. -UNIMPLEMENTED_INTRINSIC(FloatFloatToIntBits) -UNIMPLEMENTED_INTRINSIC(DoubleDoubleToLongBits) -UNIMPLEMENTED_INTRINSIC(FloatIsNaN) -UNIMPLEMENTED_INTRINSIC(DoubleIsNaN) -UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft) -UNIMPLEMENTED_INTRINSIC(LongRotateLeft) -UNIMPLEMENTED_INTRINSIC(IntegerRotateRight) -UNIMPLEMENTED_INTRINSIC(LongRotateRight) -UNIMPLEMENTED_INTRINSIC(IntegerCompare) -UNIMPLEMENTED_INTRINSIC(LongCompare) -UNIMPLEMENTED_INTRINSIC(IntegerSignum) -UNIMPLEMENTED_INTRINSIC(LongSignum) - -#undef UNIMPLEMENTED_INTRINSIC +UNIMPLEMENTED_INTRINSIC(ARM, IntegerBitCount) +UNIMPLEMENTED_INTRINSIC(ARM, LongBitCount) +UNIMPLEMENTED_INTRINSIC(ARM, MathMinDoubleDouble) +UNIMPLEMENTED_INTRINSIC(ARM, MathMinFloatFloat) +UNIMPLEMENTED_INTRINSIC(ARM, MathMaxDoubleDouble) +UNIMPLEMENTED_INTRINSIC(ARM, MathMaxFloatFloat) +UNIMPLEMENTED_INTRINSIC(ARM, MathMinLongLong) +UNIMPLEMENTED_INTRINSIC(ARM, MathMaxLongLong) +UNIMPLEMENTED_INTRINSIC(ARM, MathCeil) // Could be done by changing rounding mode, maybe? +UNIMPLEMENTED_INTRINSIC(ARM, MathFloor) // Could be done by changing rounding mode, maybe? +UNIMPLEMENTED_INTRINSIC(ARM, MathRint) +UNIMPLEMENTED_INTRINSIC(ARM, MathRoundDouble) // Could be done by changing rounding mode, maybe? +UNIMPLEMENTED_INTRINSIC(ARM, MathRoundFloat) // Could be done by changing rounding mode, maybe? +UNIMPLEMENTED_INTRINSIC(ARM, UnsafeCASLong) // High register pressure. +UNIMPLEMENTED_INTRINSIC(ARM, SystemArrayCopyChar) +UNIMPLEMENTED_INTRINSIC(ARM, ReferenceGetReferent) +UNIMPLEMENTED_INTRINSIC(ARM, FloatIsInfinite) +UNIMPLEMENTED_INTRINSIC(ARM, DoubleIsInfinite) +UNIMPLEMENTED_INTRINSIC(ARM, IntegerHighestOneBit) +UNIMPLEMENTED_INTRINSIC(ARM, LongHighestOneBit) +UNIMPLEMENTED_INTRINSIC(ARM, IntegerLowestOneBit) +UNIMPLEMENTED_INTRINSIC(ARM, LongLowestOneBit) + +// 1.8. +UNIMPLEMENTED_INTRINSIC(ARM, UnsafeGetAndAddInt) +UNIMPLEMENTED_INTRINSIC(ARM, UnsafeGetAndAddLong) +UNIMPLEMENTED_INTRINSIC(ARM, UnsafeGetAndSetInt) +UNIMPLEMENTED_INTRINSIC(ARM, UnsafeGetAndSetLong) +UNIMPLEMENTED_INTRINSIC(ARM, UnsafeGetAndSetObject) + +UNREACHABLE_INTRINSICS(ARM) #undef __ diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 9f9d8c493f..5de2306506 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -46,6 +46,7 @@ using helpers::RegisterFrom; using helpers::SRegisterFrom; using helpers::WRegisterFrom; using helpers::XRegisterFrom; +using helpers::InputRegisterAt; namespace { @@ -367,6 +368,40 @@ void IntrinsicCodeGeneratorARM64::VisitLongReverse(HInvoke* invoke) { GenReverse(invoke->GetLocations(), Primitive::kPrimLong, GetVIXLAssembler()); } +static void GenBitCount(HInvoke* instr, bool is_long, vixl::MacroAssembler* masm) { + DCHECK(instr->GetType() == Primitive::kPrimInt); + DCHECK((is_long && instr->InputAt(0)->GetType() == Primitive::kPrimLong) || + (!is_long && instr->InputAt(0)->GetType() == Primitive::kPrimInt)); + + Location out = instr->GetLocations()->Out(); + UseScratchRegisterScope temps(masm); + + Register src = InputRegisterAt(instr, 0); + Register dst = is_long ? XRegisterFrom(out) : WRegisterFrom(out); + FPRegister fpr = is_long ? temps.AcquireD() : temps.AcquireS(); + + __ Fmov(fpr, src); + __ Cnt(fpr.V8B(), fpr.V8B()); + __ Addv(fpr.B(), fpr.V8B()); + __ Fmov(dst, fpr); +} + +void IntrinsicLocationsBuilderARM64::VisitLongBitCount(HInvoke* invoke) { + CreateIntToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorARM64::VisitLongBitCount(HInvoke* invoke) { + GenBitCount(invoke, /* is_long */ true, GetVIXLAssembler()); +} + +void IntrinsicLocationsBuilderARM64::VisitIntegerBitCount(HInvoke* invoke) { + CreateIntToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorARM64::VisitIntegerBitCount(HInvoke* invoke) { + GenBitCount(invoke, /* is_long */ false, GetVIXLAssembler()); +} + static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { LocationSummary* locations = new (arena) LocationSummary(invoke, LocationSummary::kNoCall, @@ -1374,8 +1409,9 @@ void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromBytes(HInvoke* invoke) __ Ldr(lr, MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocStringFromBytes).Int32Value())); - codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); + CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>(); __ Blr(lr); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); __ Bind(slow_path->GetExitLabel()); } @@ -1401,19 +1437,17 @@ void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromChars(HInvoke* invoke) // all include a null check on `data` before calling that method. __ Ldr(lr, MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocStringFromChars).Int32Value())); - codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); + CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>(); __ Blr(lr); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); } void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromString(HInvoke* invoke) { - // The inputs plus one temp. LocationSummary* locations = new (arena_) LocationSummary(invoke, LocationSummary::kCall, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); - locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); - locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2))); locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); } @@ -1429,8 +1463,9 @@ void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromString(HInvoke* invoke __ Ldr(lr, MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocStringFromString).Int32Value())); - codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); + CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>(); __ Blr(lr); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); __ Bind(slow_path->GetExitLabel()); } @@ -1672,43 +1707,260 @@ void IntrinsicCodeGeneratorARM64::VisitStringGetCharsNoCheck(HInvoke* invoke) { __ Bind(&done); } -// Unimplemented intrinsics. +// Mirrors ARRAYCOPY_SHORT_CHAR_ARRAY_THRESHOLD in libcore, so we can choose to use the native +// implementation there for longer copy lengths. +static constexpr int32_t kSystemArrayCopyThreshold = 32; -#define UNIMPLEMENTED_INTRINSIC(Name) \ -void IntrinsicLocationsBuilderARM64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \ -} \ -void IntrinsicCodeGeneratorARM64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \ +static void SetSystemArrayCopyLocationRequires(LocationSummary* locations, + uint32_t at, + HInstruction* input) { + HIntConstant* const_input = input->AsIntConstant(); + if (const_input != nullptr && !vixl::Assembler::IsImmAddSub(const_input->GetValue())) { + locations->SetInAt(at, Location::RequiresRegister()); + } else { + locations->SetInAt(at, Location::RegisterOrConstant(input)); + } } -UNIMPLEMENTED_INTRINSIC(IntegerBitCount) -UNIMPLEMENTED_INTRINSIC(LongBitCount) -UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar) -UNIMPLEMENTED_INTRINSIC(SystemArrayCopy) -UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent) +void IntrinsicLocationsBuilderARM64::VisitSystemArrayCopyChar(HInvoke* invoke) { + // Check to see if we have known failures that will cause us to have to bail out + // to the runtime, and just generate the runtime call directly. + HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstant(); + HIntConstant* dst_pos = invoke->InputAt(3)->AsIntConstant(); + + // The positions must be non-negative. + if ((src_pos != nullptr && src_pos->GetValue() < 0) || + (dst_pos != nullptr && dst_pos->GetValue() < 0)) { + // We will have to fail anyways. + return; + } + + // The length must be >= 0 and not so long that we would (currently) prefer libcore's + // native implementation. + HIntConstant* length = invoke->InputAt(4)->AsIntConstant(); + if (length != nullptr) { + int32_t len = length->GetValue(); + if (len < 0 || len > kSystemArrayCopyThreshold) { + // Just call as normal. + return; + } + } + + ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena(); + LocationSummary* locations = new (allocator) LocationSummary(invoke, + LocationSummary::kCallOnSlowPath, + kIntrinsified); + // arraycopy(char[] src, int src_pos, char[] dst, int dst_pos, int length). + locations->SetInAt(0, Location::RequiresRegister()); + SetSystemArrayCopyLocationRequires(locations, 1, invoke->InputAt(1)); + locations->SetInAt(2, Location::RequiresRegister()); + SetSystemArrayCopyLocationRequires(locations, 3, invoke->InputAt(3)); + SetSystemArrayCopyLocationRequires(locations, 4, invoke->InputAt(4)); -UNIMPLEMENTED_INTRINSIC(FloatIsInfinite) -UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite) + locations->AddTemp(Location::RequiresRegister()); + locations->AddTemp(Location::RequiresRegister()); + locations->AddTemp(Location::RequiresRegister()); +} + +static void CheckSystemArrayCopyPosition(vixl::MacroAssembler* masm, + const Location& pos, + const Register& input, + const Location& length, + SlowPathCodeARM64* slow_path, + const Register& input_len, + const Register& temp, + bool length_is_input_length = false) { + const int32_t length_offset = mirror::Array::LengthOffset().Int32Value(); + if (pos.IsConstant()) { + int32_t pos_const = pos.GetConstant()->AsIntConstant()->GetValue(); + if (pos_const == 0) { + if (!length_is_input_length) { + // Check that length(input) >= length. + __ Ldr(temp, MemOperand(input, length_offset)); + __ Cmp(temp, OperandFrom(length, Primitive::kPrimInt)); + __ B(slow_path->GetEntryLabel(), lt); + } + } else { + // Check that length(input) >= pos. + __ Ldr(input_len, MemOperand(input, length_offset)); + __ Subs(temp, input_len, pos_const); + __ B(slow_path->GetEntryLabel(), lt); + + // Check that (length(input) - pos) >= length. + __ Cmp(temp, OperandFrom(length, Primitive::kPrimInt)); + __ B(slow_path->GetEntryLabel(), lt); + } + } else if (length_is_input_length) { + // The only way the copy can succeed is if pos is zero. + __ Cbnz(WRegisterFrom(pos), slow_path->GetEntryLabel()); + } else { + // Check that pos >= 0. + Register pos_reg = WRegisterFrom(pos); + __ Tbnz(pos_reg, pos_reg.size() - 1, slow_path->GetEntryLabel()); + + // Check that pos <= length(input) && (length(input) - pos) >= length. + __ Ldr(temp, MemOperand(input, length_offset)); + __ Subs(temp, temp, pos_reg); + // Ccmp if length(input) >= pos, else definitely bail to slow path (N!=V == lt). + __ Ccmp(temp, OperandFrom(length, Primitive::kPrimInt), NFlag, ge); + __ B(slow_path->GetEntryLabel(), lt); + } +} + +// Compute base source address, base destination address, and end source address +// for System.arraycopy* intrinsics. +static void GenSystemArrayCopyAddresses(vixl::MacroAssembler* masm, + Primitive::Type type, + const Register& src, + const Location& src_pos, + const Register& dst, + const Location& dst_pos, + const Location& copy_length, + const Register& src_base, + const Register& dst_base, + const Register& src_end) { + DCHECK(type == Primitive::kPrimNot || type == Primitive::kPrimChar) + << "Unexpected element type: " + << type; + const int32_t char_size = Primitive::ComponentSize(type); + const int32_t char_size_shift = Primitive::ComponentSizeShift(type); + + uint32_t offset = mirror::Array::DataOffset(char_size).Uint32Value(); + if (src_pos.IsConstant()) { + int32_t constant = src_pos.GetConstant()->AsIntConstant()->GetValue(); + __ Add(src_base, src, char_size * constant + offset); + } else { + __ Add(src_base, src, offset); + __ Add(src_base, + src_base, + Operand(XRegisterFrom(src_pos), LSL, char_size_shift)); + } + + if (dst_pos.IsConstant()) { + int32_t constant = dst_pos.GetConstant()->AsIntConstant()->GetValue(); + __ Add(dst_base, dst, char_size * constant + offset); + } else { + __ Add(dst_base, dst, offset); + __ Add(dst_base, + dst_base, + Operand(XRegisterFrom(dst_pos), LSL, char_size_shift)); + } + + if (copy_length.IsConstant()) { + int32_t constant = copy_length.GetConstant()->AsIntConstant()->GetValue(); + __ Add(src_end, src_base, char_size * constant); + } else { + __ Add(src_end, + src_base, + Operand(XRegisterFrom(copy_length), LSL, char_size_shift)); + } +} + +void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopyChar(HInvoke* invoke) { + vixl::MacroAssembler* masm = GetVIXLAssembler(); + LocationSummary* locations = invoke->GetLocations(); + Register src = XRegisterFrom(locations->InAt(0)); + Location src_pos = locations->InAt(1); + Register dst = XRegisterFrom(locations->InAt(2)); + Location dst_pos = locations->InAt(3); + Location length = locations->InAt(4); + + SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke); + codegen_->AddSlowPath(slow_path); + + // If source and destination are the same, take the slow path. Overlapping copy regions must be + // copied in reverse and we can't know in all cases if it's needed. + __ Cmp(src, dst); + __ B(slow_path->GetEntryLabel(), eq); + + // Bail out if the source is null. + __ Cbz(src, slow_path->GetEntryLabel()); + + // Bail out if the destination is null. + __ Cbz(dst, slow_path->GetEntryLabel()); + + if (!length.IsConstant()) { + // If the length is negative, bail out. + __ Tbnz(WRegisterFrom(length), kWRegSize - 1, slow_path->GetEntryLabel()); + // If the length > 32 then (currently) prefer libcore's native implementation. + __ Cmp(WRegisterFrom(length), kSystemArrayCopyThreshold); + __ B(slow_path->GetEntryLabel(), gt); + } else { + // We have already checked in the LocationsBuilder for the constant case. + DCHECK_GE(length.GetConstant()->AsIntConstant()->GetValue(), 0); + DCHECK_LE(length.GetConstant()->AsIntConstant()->GetValue(), 32); + } + + Register src_curr_addr = WRegisterFrom(locations->GetTemp(0)); + Register dst_curr_addr = WRegisterFrom(locations->GetTemp(1)); + Register src_stop_addr = WRegisterFrom(locations->GetTemp(2)); + + CheckSystemArrayCopyPosition(masm, + src_pos, + src, + length, + slow_path, + src_curr_addr, + dst_curr_addr, + false); + + CheckSystemArrayCopyPosition(masm, + dst_pos, + dst, + length, + slow_path, + src_curr_addr, + dst_curr_addr, + false); + + src_curr_addr = src_curr_addr.X(); + dst_curr_addr = dst_curr_addr.X(); + src_stop_addr = src_stop_addr.X(); + + GenSystemArrayCopyAddresses(masm, + Primitive::kPrimChar, + src, + src_pos, + dst, + dst_pos, + length, + src_curr_addr, + dst_curr_addr, + src_stop_addr); + + // Iterate over the arrays and do a raw copy of the chars. + const int32_t char_size = Primitive::ComponentSize(Primitive::kPrimChar); + UseScratchRegisterScope temps(masm); + Register tmp = temps.AcquireW(); + vixl::Label loop, done; + __ Bind(&loop); + __ Cmp(src_curr_addr, src_stop_addr); + __ B(&done, eq); + __ Ldrh(tmp, MemOperand(src_curr_addr, char_size, vixl::PostIndex)); + __ Strh(tmp, MemOperand(dst_curr_addr, char_size, vixl::PostIndex)); + __ B(&loop); + __ Bind(&done); + + __ Bind(slow_path->GetExitLabel()); +} -UNIMPLEMENTED_INTRINSIC(IntegerHighestOneBit) -UNIMPLEMENTED_INTRINSIC(LongHighestOneBit) -UNIMPLEMENTED_INTRINSIC(IntegerLowestOneBit) -UNIMPLEMENTED_INTRINSIC(LongLowestOneBit) +UNIMPLEMENTED_INTRINSIC(ARM64, SystemArrayCopy) +UNIMPLEMENTED_INTRINSIC(ARM64, ReferenceGetReferent) +UNIMPLEMENTED_INTRINSIC(ARM64, FloatIsInfinite) +UNIMPLEMENTED_INTRINSIC(ARM64, DoubleIsInfinite) +UNIMPLEMENTED_INTRINSIC(ARM64, IntegerHighestOneBit) +UNIMPLEMENTED_INTRINSIC(ARM64, LongHighestOneBit) +UNIMPLEMENTED_INTRINSIC(ARM64, IntegerLowestOneBit) +UNIMPLEMENTED_INTRINSIC(ARM64, LongLowestOneBit) -// Handled as HIR instructions. -UNIMPLEMENTED_INTRINSIC(FloatFloatToIntBits) -UNIMPLEMENTED_INTRINSIC(DoubleDoubleToLongBits) -UNIMPLEMENTED_INTRINSIC(FloatIsNaN) -UNIMPLEMENTED_INTRINSIC(DoubleIsNaN) -UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft) -UNIMPLEMENTED_INTRINSIC(LongRotateLeft) -UNIMPLEMENTED_INTRINSIC(IntegerRotateRight) -UNIMPLEMENTED_INTRINSIC(LongRotateRight) -UNIMPLEMENTED_INTRINSIC(IntegerCompare) -UNIMPLEMENTED_INTRINSIC(LongCompare) -UNIMPLEMENTED_INTRINSIC(IntegerSignum) -UNIMPLEMENTED_INTRINSIC(LongSignum) +// 1.8. +UNIMPLEMENTED_INTRINSIC(ARM64, UnsafeGetAndAddInt) +UNIMPLEMENTED_INTRINSIC(ARM64, UnsafeGetAndAddLong) +UNIMPLEMENTED_INTRINSIC(ARM64, UnsafeGetAndSetInt) +UNIMPLEMENTED_INTRINSIC(ARM64, UnsafeGetAndSetLong) +UNIMPLEMENTED_INTRINSIC(ARM64, UnsafeGetAndSetObject) -#undef UNIMPLEMENTED_INTRINSIC +UNREACHABLE_INTRINSICS(ARM64) #undef __ diff --git a/compiler/optimizing/intrinsics_list.h b/compiler/optimizing/intrinsics_list.h index e1aea924cf..dd9294d486 100644 --- a/compiler/optimizing/intrinsics_list.h +++ b/compiler/optimizing/intrinsics_list.h @@ -19,7 +19,7 @@ // All intrinsics supported by the optimizing compiler. Format is name, then whether it is expected // to be a HInvokeStaticOrDirect node (compared to HInvokeVirtual), then whether it requires an -// environment. +// environment, may have side effects, or may throw exceptions. #define INTRINSICS_LIST(V) \ V(DoubleDoubleToRawLongBits, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow) \ @@ -128,6 +128,14 @@ V(UnsafePutLong, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \ V(UnsafePutLongOrdered, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \ V(UnsafePutLongVolatile, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \ + V(UnsafeGetAndAddInt, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \ + V(UnsafeGetAndAddLong, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \ + V(UnsafeGetAndSetInt, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \ + V(UnsafeGetAndSetLong, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \ + V(UnsafeGetAndSetObject, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \ + V(UnsafeLoadFence, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \ + V(UnsafeStoreFence, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \ + V(UnsafeFullFence, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) \ V(ReferenceGetReferent, kDirect, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow) #endif // ART_COMPILER_OPTIMIZING_INTRINSICS_LIST_H_ diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc index a737d8100a..c306cf93a1 100644 --- a/compiler/optimizing/intrinsics_mips.cc +++ b/compiler/optimizing/intrinsics_mips.cc @@ -490,7 +490,6 @@ void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfLeadingZeros(HInvoke* invoke) static void GenNumberOfTrailingZeroes(LocationSummary* locations, bool is64bit, bool isR6, - bool isR2OrNewer, MipsAssembler* assembler) { Register out = locations->Out().AsRegister<Register>(); Register in_lo; @@ -503,7 +502,7 @@ static void GenNumberOfTrailingZeroes(LocationSummary* locations, // If in_lo is zero then count the number of trailing zeroes in in_hi; // otherwise count the number of trailing zeroes in in_lo. - // AT = in_lo ? in_lo : in_hi; + // out = in_lo ? in_lo : in_hi; if (isR6) { __ Seleqz(out, in_hi, in_lo); __ Selnez(TMP, in_lo, in_lo); @@ -522,50 +521,26 @@ static void GenNumberOfTrailingZeroes(LocationSummary* locations, in_lo = in; } - // We don't have an instruction to count the number of trailing zeroes. - // Start by flipping the bits end-for-end so we can count the number of - // leading zeroes instead. - if (isR2OrNewer) { + if (isR6) { + // We don't have an instruction to count the number of trailing zeroes. + // Start by flipping the bits end-for-end so we can count the number of + // leading zeroes instead. __ Rotr(out, in, 16); __ Wsbh(out, out); - } else { - // MIPS32r1 - // __ Rotr(out, in, 16); - __ Sll(TMP, in, 16); - __ Srl(out, in, 16); - __ Or(out, out, TMP); - // __ Wsbh(out, out); - __ LoadConst32(AT, 0x00FF00FF); - __ And(TMP, out, AT); - __ Sll(TMP, TMP, 8); - __ Srl(out, out, 8); - __ And(out, out, AT); - __ Or(out, out, TMP); - } - - if (isR6) { __ Bitswap(out, out); __ ClzR6(out, out); } else { - __ LoadConst32(AT, 0x0F0F0F0F); - __ And(TMP, out, AT); - __ Sll(TMP, TMP, 4); - __ Srl(out, out, 4); - __ And(out, out, AT); - __ Or(out, TMP, out); - __ LoadConst32(AT, 0x33333333); - __ And(TMP, out, AT); - __ Sll(TMP, TMP, 2); - __ Srl(out, out, 2); - __ And(out, out, AT); - __ Or(out, TMP, out); - __ LoadConst32(AT, 0x55555555); - __ And(TMP, out, AT); - __ Sll(TMP, TMP, 1); - __ Srl(out, out, 1); - __ And(out, out, AT); - __ Or(out, TMP, out); + // Convert trailing zeroes to trailing ones, and bits to their left + // to zeroes. + __ Addiu(TMP, in, -1); + __ Xor(out, TMP, in); + __ And(out, out, TMP); + // Count number of leading zeroes. __ ClzR2(out, out); + // Subtract number of leading zeroes from 32 to get number of trailing ones. + // Remember that the trailing ones were formerly trailing zeroes. + __ LoadConst32(TMP, 32); + __ Subu(out, TMP, out); } if (is64bit) { @@ -587,11 +562,7 @@ void IntrinsicLocationsBuilderMIPS::VisitIntegerNumberOfTrailingZeros(HInvoke* i } void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) { - GenNumberOfTrailingZeroes(invoke->GetLocations(), - /* is64bit */ false, - IsR6(), - IsR2OrNewer(), - GetAssembler()); + GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ false, IsR6(), GetAssembler()); } // int java.lang.Long.numberOfTrailingZeros(long i) @@ -600,207 +571,7 @@ void IntrinsicLocationsBuilderMIPS::VisitLongNumberOfTrailingZeros(HInvoke* invo } void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfTrailingZeros(HInvoke* invoke) { - GenNumberOfTrailingZeroes(invoke->GetLocations(), - /* is64bit */ true, - IsR6(), - IsR2OrNewer(), - GetAssembler()); -} - -enum RotationDirection { - kRotateRight, - kRotateLeft, -}; - -static void GenRotate(HInvoke* invoke, - Primitive::Type type, - bool isR2OrNewer, - RotationDirection direction, - MipsAssembler* assembler) { - DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong); - - LocationSummary* locations = invoke->GetLocations(); - if (invoke->InputAt(1)->IsIntConstant()) { - int32_t shift = static_cast<int32_t>(invoke->InputAt(1)->AsIntConstant()->GetValue()); - if (type == Primitive::kPrimInt) { - Register in = locations->InAt(0).AsRegister<Register>(); - Register out = locations->Out().AsRegister<Register>(); - - shift &= 0x1f; - if (direction == kRotateLeft) { - shift = (32 - shift) & 0x1F; - } - - if (isR2OrNewer) { - if ((shift != 0) || (out != in)) { - __ Rotr(out, in, shift); - } - } else { - if (shift == 0) { - if (out != in) { - __ Move(out, in); - } - } else { - __ Srl(AT, in, shift); - __ Sll(out, in, 32 - shift); - __ Or(out, out, AT); - } - } - } else { // Primitive::kPrimLong - Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>(); - Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>(); - Register out_lo = locations->Out().AsRegisterPairLow<Register>(); - Register out_hi = locations->Out().AsRegisterPairHigh<Register>(); - - shift &= 0x3f; - if (direction == kRotateLeft) { - shift = (64 - shift) & 0x3F; - } - - if (shift == 0) { - __ Move(out_lo, in_lo); - __ Move(out_hi, in_hi); - } else if (shift == 32) { - __ Move(out_lo, in_hi); - __ Move(out_hi, in_lo); - } else if (shift < 32) { - __ Srl(AT, in_lo, shift); - __ Sll(out_lo, in_hi, 32 - shift); - __ Or(out_lo, out_lo, AT); - __ Srl(AT, in_hi, shift); - __ Sll(out_hi, in_lo, 32 - shift); - __ Or(out_hi, out_hi, AT); - } else { - __ Sll(AT, in_lo, 64 - shift); - __ Srl(out_lo, in_hi, shift - 32); - __ Or(out_lo, out_lo, AT); - __ Sll(AT, in_hi, 64 - shift); - __ Srl(out_hi, in_lo, shift - 32); - __ Or(out_hi, out_hi, AT); - } - } - } else { // !invoke->InputAt(1)->IsIntConstant() - Register shamt = locations->InAt(1).AsRegister<Register>(); - if (type == Primitive::kPrimInt) { - Register in = locations->InAt(0).AsRegister<Register>(); - Register out = locations->Out().AsRegister<Register>(); - - if (isR2OrNewer) { - if (direction == kRotateRight) { - __ Rotrv(out, in, shamt); - } else { - // negu tmp, shamt - __ Subu(TMP, ZERO, shamt); - __ Rotrv(out, in, TMP); - } - } else { - if (direction == kRotateRight) { - __ Srlv(AT, in, shamt); - __ Subu(TMP, ZERO, shamt); - __ Sllv(out, in, TMP); - __ Or(out, out, AT); - } else { - __ Sllv(AT, in, shamt); - __ Subu(TMP, ZERO, shamt); - __ Srlv(out, in, TMP); - __ Or(out, out, AT); - } - } - } else { // Primitive::kPrimLong - Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>(); - Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>(); - Register out_lo = locations->Out().AsRegisterPairLow<Register>(); - Register out_hi = locations->Out().AsRegisterPairHigh<Register>(); - - MipsLabel done; - - if (direction == kRotateRight) { - __ Nor(TMP, ZERO, shamt); - __ Srlv(AT, in_lo, shamt); - __ Sll(out_lo, in_hi, 1); - __ Sllv(out_lo, out_lo, TMP); - __ Or(out_lo, out_lo, AT); - __ Srlv(AT, in_hi, shamt); - __ Sll(out_hi, in_lo, 1); - __ Sllv(out_hi, out_hi, TMP); - __ Or(out_hi, out_hi, AT); - } else { - __ Nor(TMP, ZERO, shamt); - __ Sllv(AT, in_lo, shamt); - __ Srl(out_lo, in_hi, 1); - __ Srlv(out_lo, out_lo, TMP); - __ Or(out_lo, out_lo, AT); - __ Sllv(AT, in_hi, shamt); - __ Srl(out_hi, in_lo, 1); - __ Srlv(out_hi, out_hi, TMP); - __ Or(out_hi, out_hi, AT); - } - - __ Andi(TMP, shamt, 32); - __ Beqz(TMP, &done); - __ Move(TMP, out_hi); - __ Move(out_hi, out_lo); - __ Move(out_lo, TMP); - - __ Bind(&done); - } - } -} - -// int java.lang.Integer.rotateRight(int i, int distance) -void IntrinsicLocationsBuilderMIPS::VisitIntegerRotateRight(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); - locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1))); - locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); -} - -void IntrinsicCodeGeneratorMIPS::VisitIntegerRotateRight(HInvoke* invoke) { - GenRotate(invoke, Primitive::kPrimInt, IsR2OrNewer(), kRotateRight, GetAssembler()); -} - -// long java.lang.Long.rotateRight(long i, int distance) -void IntrinsicLocationsBuilderMIPS::VisitLongRotateRight(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); - locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1))); - locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); -} - -void IntrinsicCodeGeneratorMIPS::VisitLongRotateRight(HInvoke* invoke) { - GenRotate(invoke, Primitive::kPrimLong, IsR2OrNewer(), kRotateRight, GetAssembler()); -} - -// int java.lang.Integer.rotateLeft(int i, int distance) -void IntrinsicLocationsBuilderMIPS::VisitIntegerRotateLeft(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); - locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1))); - locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); -} - -void IntrinsicCodeGeneratorMIPS::VisitIntegerRotateLeft(HInvoke* invoke) { - GenRotate(invoke, Primitive::kPrimInt, IsR2OrNewer(), kRotateLeft, GetAssembler()); -} - -// long java.lang.Long.rotateLeft(long i, int distance) -void IntrinsicLocationsBuilderMIPS::VisitLongRotateLeft(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); - locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1))); - locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); -} - -void IntrinsicCodeGeneratorMIPS::VisitLongRotateLeft(HInvoke* invoke) { - GenRotate(invoke, Primitive::kPrimLong, IsR2OrNewer(), kRotateLeft, GetAssembler()); + GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ true, IsR6(), GetAssembler()); } // int java.lang.Integer.reverse(int) @@ -839,6 +610,142 @@ static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) { locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap); } +static void GenBitCount(LocationSummary* locations, + Primitive::Type type, + bool isR6, + MipsAssembler* assembler) { + DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong); + + Register out = locations->Out().AsRegister<Register>(); + + // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel + // + // A generalization of the best bit counting method to integers of + // bit-widths up to 128 (parameterized by type T) is this: + // + // v = v - ((v >> 1) & (T)~(T)0/3); // temp + // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp + // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp + // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; // count + // + // For comparison, for 32-bit quantities, this algorithm can be executed + // using 20 MIPS instructions (the calls to LoadConst32() generate two + // machine instructions each for the values being used in this algorithm). + // A(n unrolled) loop-based algorithm required 25 instructions. + // + // For 64-bit quantities, this algorithm gets executed twice, (once + // for in_lo, and again for in_hi), but saves a few instructions + // because the mask values only have to be loaded once. Using this + // algorithm the count for a 64-bit operand can be performed in 33 + // instructions compared to a loop-based algorithm which required 47 + // instructions. + + if (type == Primitive::kPrimInt) { + Register in = locations->InAt(0).AsRegister<Register>(); + + __ Srl(TMP, in, 1); + __ LoadConst32(AT, 0x55555555); + __ And(TMP, TMP, AT); + __ Subu(TMP, in, TMP); + __ LoadConst32(AT, 0x33333333); + __ And(out, TMP, AT); + __ Srl(TMP, TMP, 2); + __ And(TMP, TMP, AT); + __ Addu(TMP, out, TMP); + __ Srl(out, TMP, 4); + __ Addu(out, out, TMP); + __ LoadConst32(AT, 0x0F0F0F0F); + __ And(out, out, AT); + __ LoadConst32(TMP, 0x01010101); + if (isR6) { + __ MulR6(out, out, TMP); + } else { + __ MulR2(out, out, TMP); + } + __ Srl(out, out, 24); + } else if (type == Primitive::kPrimLong) { + Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>(); + Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>(); + Register tmp_hi = locations->GetTemp(0).AsRegister<Register>(); + Register out_hi = locations->GetTemp(1).AsRegister<Register>(); + Register tmp_lo = TMP; + Register out_lo = out; + + __ Srl(tmp_lo, in_lo, 1); + __ Srl(tmp_hi, in_hi, 1); + + __ LoadConst32(AT, 0x55555555); + + __ And(tmp_lo, tmp_lo, AT); + __ Subu(tmp_lo, in_lo, tmp_lo); + + __ And(tmp_hi, tmp_hi, AT); + __ Subu(tmp_hi, in_hi, tmp_hi); + + __ LoadConst32(AT, 0x33333333); + + __ And(out_lo, tmp_lo, AT); + __ Srl(tmp_lo, tmp_lo, 2); + __ And(tmp_lo, tmp_lo, AT); + __ Addu(tmp_lo, out_lo, tmp_lo); + __ Srl(out_lo, tmp_lo, 4); + __ Addu(out_lo, out_lo, tmp_lo); + + __ And(out_hi, tmp_hi, AT); + __ Srl(tmp_hi, tmp_hi, 2); + __ And(tmp_hi, tmp_hi, AT); + __ Addu(tmp_hi, out_hi, tmp_hi); + __ Srl(out_hi, tmp_hi, 4); + __ Addu(out_hi, out_hi, tmp_hi); + + __ LoadConst32(AT, 0x0F0F0F0F); + + __ And(out_lo, out_lo, AT); + __ And(out_hi, out_hi, AT); + + __ LoadConst32(AT, 0x01010101); + + if (isR6) { + __ MulR6(out_lo, out_lo, AT); + + __ MulR6(out_hi, out_hi, AT); + } else { + __ MulR2(out_lo, out_lo, AT); + + __ MulR2(out_hi, out_hi, AT); + } + + __ Srl(out_lo, out_lo, 24); + __ Srl(out_hi, out_hi, 24); + + __ Addu(out, out_hi, out_lo); + } +} + +// int java.lang.Integer.bitCount(int) +void IntrinsicLocationsBuilderMIPS::VisitIntegerBitCount(HInvoke* invoke) { + CreateIntToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorMIPS::VisitIntegerBitCount(HInvoke* invoke) { + GenBitCount(invoke->GetLocations(), Primitive::kPrimInt, IsR6(), GetAssembler()); +} + +// int java.lang.Long.bitCount(int) +void IntrinsicLocationsBuilderMIPS::VisitLongBitCount(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kNoCall, + kIntrinsified); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister()); + locations->AddTemp(Location::RequiresRegister()); + locations->AddTemp(Location::RequiresRegister()); +} + +void IntrinsicCodeGeneratorMIPS::VisitLongBitCount(HInvoke* invoke) { + GenBitCount(invoke->GetLocations(), Primitive::kPrimLong, IsR6(), GetAssembler()); +} + static void MathAbsFP(LocationSummary* locations, bool is64bit, MipsAssembler* assembler) { FRegister in = locations->InAt(0).AsFpuRegister<FRegister>(); FRegister out = locations->Out().AsFpuRegister<FRegister>(); @@ -1550,6 +1457,24 @@ void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeLongNative(HInvoke* invoke) { } } +// Thread java.lang.Thread.currentThread() +void IntrinsicLocationsBuilderMIPS::VisitThreadCurrentThread(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kNoCall, + kIntrinsified); + locations->SetOut(Location::RequiresRegister()); +} + +void IntrinsicCodeGeneratorMIPS::VisitThreadCurrentThread(HInvoke* invoke) { + MipsAssembler* assembler = GetAssembler(); + Register out = invoke->GetLocations()->Out().AsRegister<Register>(); + + __ LoadFromOffset(kLoadWord, + out, + TR, + Thread::PeerOffset<kMipsPointerSize>().Int32Value()); +} + // char java.lang.String.charAt(int index) void IntrinsicLocationsBuilderMIPS::VisitStringCharAt(HInvoke* invoke) { LocationSummary* locations = new (arena_) LocationSummary(invoke, @@ -1557,7 +1482,9 @@ void IntrinsicLocationsBuilderMIPS::VisitStringCharAt(HInvoke* invoke) { kIntrinsified); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); - locations->SetOut(Location::SameAsFirstInput()); + // The inputs will be considered live at the last instruction and restored. This will overwrite + // the output with kNoOutputOverlap. + locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); } void IntrinsicCodeGeneratorMIPS::VisitStringCharAt(HInvoke* invoke) { @@ -1596,6 +1523,40 @@ void IntrinsicCodeGeneratorMIPS::VisitStringCharAt(HInvoke* invoke) { __ Bind(slow_path->GetExitLabel()); } +// int java.lang.String.compareTo(String anotherString) +void IntrinsicLocationsBuilderMIPS::VisitStringCompareTo(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kCall, + kIntrinsified); + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); + Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt); + locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>())); +} + +void IntrinsicCodeGeneratorMIPS::VisitStringCompareTo(HInvoke* invoke) { + MipsAssembler* assembler = GetAssembler(); + LocationSummary* locations = invoke->GetLocations(); + + // Note that the null check must have been done earlier. + DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0))); + + Register argument = locations->InAt(1).AsRegister<Register>(); + SlowPathCodeMIPS* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS(invoke); + codegen_->AddSlowPath(slow_path); + __ Beqz(argument, slow_path->GetEntryLabel()); + + __ LoadFromOffset(kLoadWord, + T9, + TR, + QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, + pStringCompareTo).Int32Value()); + __ Jalr(T9); + __ Nop(); + __ Bind(slow_path->GetExitLabel()); +} + // boolean java.lang.String.equals(Object anObject) void IntrinsicLocationsBuilderMIPS::VisitStringEquals(HInvoke* invoke) { LocationSummary* locations = new (arena_) LocationSummary(invoke, @@ -1698,90 +1659,439 @@ void IntrinsicCodeGeneratorMIPS::VisitStringEquals(HInvoke* invoke) { __ Bind(&end); } +static void GenerateStringIndexOf(HInvoke* invoke, + bool start_at_zero, + MipsAssembler* assembler, + CodeGeneratorMIPS* codegen, + ArenaAllocator* allocator) { + LocationSummary* locations = invoke->GetLocations(); + Register tmp_reg = start_at_zero ? locations->GetTemp(0).AsRegister<Register>() : TMP; + + // Note that the null check must have been done earlier. + DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0))); + + // Check for code points > 0xFFFF. Either a slow-path check when we + // don't know statically, or directly dispatch if we have a constant. + SlowPathCodeMIPS* slow_path = nullptr; + if (invoke->InputAt(1)->IsIntConstant()) { + if (!IsUint<16>(invoke->InputAt(1)->AsIntConstant()->GetValue())) { + // Always needs the slow-path. We could directly dispatch to it, + // but this case should be rare, so for simplicity just put the + // full slow-path down and branch unconditionally. + slow_path = new (allocator) IntrinsicSlowPathMIPS(invoke); + codegen->AddSlowPath(slow_path); + __ B(slow_path->GetEntryLabel()); + __ Bind(slow_path->GetExitLabel()); + return; + } + } else { + Register char_reg = locations->InAt(1).AsRegister<Register>(); + // The "bltu" conditional branch tests to see if the character value + // fits in a valid 16-bit (MIPS halfword) value. If it doesn't then + // the character being searched for, if it exists in the string, is + // encoded using UTF-16 and stored in the string as two (16-bit) + // halfwords. Currently the assembly code used to implement this + // intrinsic doesn't support searching for a character stored as + // two halfwords so we fallback to using the generic implementation + // of indexOf(). + __ LoadConst32(tmp_reg, std::numeric_limits<uint16_t>::max()); + slow_path = new (allocator) IntrinsicSlowPathMIPS(invoke); + codegen->AddSlowPath(slow_path); + __ Bltu(tmp_reg, char_reg, slow_path->GetEntryLabel()); + } + + if (start_at_zero) { + DCHECK_EQ(tmp_reg, A2); + // Start-index = 0. + __ Clear(tmp_reg); + } + + __ LoadFromOffset(kLoadWord, + T9, + TR, + QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, pIndexOf).Int32Value()); + __ Jalr(T9); + __ Nop(); + + if (slow_path != nullptr) { + __ Bind(slow_path->GetExitLabel()); + } +} + +// int java.lang.String.indexOf(int ch) +void IntrinsicLocationsBuilderMIPS::VisitStringIndexOf(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kCall, + kIntrinsified); + // We have a hand-crafted assembly stub that follows the runtime + // calling convention. So it's best to align the inputs accordingly. + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); + Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt); + locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>())); + + // Need a temp for slow-path codepoint compare, and need to send start-index=0. + locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2))); +} + +void IntrinsicCodeGeneratorMIPS::VisitStringIndexOf(HInvoke* invoke) { + GenerateStringIndexOf(invoke, + /* start_at_zero */ true, + GetAssembler(), + codegen_, + GetAllocator()); +} + +// int java.lang.String.indexOf(int ch, int fromIndex) +void IntrinsicLocationsBuilderMIPS::VisitStringIndexOfAfter(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kCall, + kIntrinsified); + // We have a hand-crafted assembly stub that follows the runtime + // calling convention. So it's best to align the inputs accordingly. + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); + locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2))); + Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt); + locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>())); + + // Need a temp for slow-path codepoint compare. + locations->AddTemp(Location::RequiresRegister()); +} + +void IntrinsicCodeGeneratorMIPS::VisitStringIndexOfAfter(HInvoke* invoke) { + GenerateStringIndexOf(invoke, + /* start_at_zero */ false, + GetAssembler(), + codegen_, + GetAllocator()); +} + +// java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount) +void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromBytes(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kCall, + kIntrinsified); + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); + locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2))); + locations->SetInAt(3, Location::RegisterLocation(calling_convention.GetRegisterAt(3))); + Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt); + locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>())); +} + +void IntrinsicCodeGeneratorMIPS::VisitStringNewStringFromBytes(HInvoke* invoke) { + MipsAssembler* assembler = GetAssembler(); + LocationSummary* locations = invoke->GetLocations(); + + Register byte_array = locations->InAt(0).AsRegister<Register>(); + SlowPathCodeMIPS* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS(invoke); + codegen_->AddSlowPath(slow_path); + __ Beqz(byte_array, slow_path->GetEntryLabel()); + + __ LoadFromOffset(kLoadWord, + T9, + TR, + QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, pAllocStringFromBytes).Int32Value()); + __ Jalr(T9); + __ Nop(); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); + __ Bind(slow_path->GetExitLabel()); +} + +// java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data) +void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromChars(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kCall, + kIntrinsified); + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); + locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2))); + Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt); + locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>())); +} + +void IntrinsicCodeGeneratorMIPS::VisitStringNewStringFromChars(HInvoke* invoke) { + MipsAssembler* assembler = GetAssembler(); + + // No need to emit code checking whether `locations->InAt(2)` is a null + // pointer, as callers of the native method + // + // java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data) + // + // all include a null check on `data` before calling that method. + + __ LoadFromOffset(kLoadWord, + T9, + TR, + QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, pAllocStringFromChars).Int32Value()); + __ Jalr(T9); + __ Nop(); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); +} + +// java.lang.StringFactory.newStringFromString(String toCopy) +void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromString(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kCall, + kIntrinsified); + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt); + locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>())); +} + +void IntrinsicCodeGeneratorMIPS::VisitStringNewStringFromString(HInvoke* invoke) { + MipsAssembler* assembler = GetAssembler(); + LocationSummary* locations = invoke->GetLocations(); + + Register string_to_copy = locations->InAt(0).AsRegister<Register>(); + SlowPathCodeMIPS* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS(invoke); + codegen_->AddSlowPath(slow_path); + __ Beqz(string_to_copy, slow_path->GetEntryLabel()); + + __ LoadFromOffset(kLoadWord, + T9, + TR, + QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, pAllocStringFromString).Int32Value()); + __ Jalr(T9); + __ Nop(); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); + __ Bind(slow_path->GetExitLabel()); +} + +static void GenIsInfinite(LocationSummary* locations, + const Primitive::Type type, + const bool isR6, + MipsAssembler* assembler) { + FRegister in = locations->InAt(0).AsFpuRegister<FRegister>(); + Register out = locations->Out().AsRegister<Register>(); + + DCHECK(type == Primitive::kPrimFloat || type == Primitive::kPrimDouble); + + if (isR6) { + if (type == Primitive::kPrimDouble) { + __ ClassD(FTMP, in); + } else { + __ ClassS(FTMP, in); + } + __ Mfc1(out, FTMP); + __ Andi(out, out, kPositiveInfinity | kNegativeInfinity); + __ Sltu(out, ZERO, out); + } else { + // If one, or more, of the exponent bits is zero, then the number can't be infinite. + if (type == Primitive::kPrimDouble) { + __ MoveFromFpuHigh(TMP, in); + __ LoadConst32(AT, 0x7FF00000); + } else { + __ Mfc1(TMP, in); + __ LoadConst32(AT, 0x7F800000); + } + __ Xor(TMP, TMP, AT); + + __ Sll(TMP, TMP, 1); + + if (type == Primitive::kPrimDouble) { + __ Mfc1(AT, in); + __ Or(TMP, TMP, AT); + } + // If any of the significand bits are one, then the number is not infinite. + __ Sltiu(out, TMP, 1); + } +} + +// boolean java.lang.Float.isInfinite(float) +void IntrinsicLocationsBuilderMIPS::VisitFloatIsInfinite(HInvoke* invoke) { + CreateFPToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorMIPS::VisitFloatIsInfinite(HInvoke* invoke) { + GenIsInfinite(invoke->GetLocations(), Primitive::kPrimFloat, IsR6(), GetAssembler()); +} + +// boolean java.lang.Double.isInfinite(double) +void IntrinsicLocationsBuilderMIPS::VisitDoubleIsInfinite(HInvoke* invoke) { + CreateFPToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorMIPS::VisitDoubleIsInfinite(HInvoke* invoke) { + GenIsInfinite(invoke->GetLocations(), Primitive::kPrimDouble, IsR6(), GetAssembler()); +} + +static void GenHighestOneBit(LocationSummary* locations, + const Primitive::Type type, + bool isR6, + MipsAssembler* assembler) { + DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong); + + if (type == Primitive::kPrimLong) { + Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>(); + Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>(); + Register out_lo = locations->Out().AsRegisterPairLow<Register>(); + Register out_hi = locations->Out().AsRegisterPairHigh<Register>(); + + if (isR6) { + __ ClzR6(TMP, in_hi); + } else { + __ ClzR2(TMP, in_hi); + } + __ LoadConst32(AT, 0x80000000); + __ Srlv(out_hi, AT, TMP); + __ And(out_hi, out_hi, in_hi); + if (isR6) { + __ ClzR6(TMP, in_lo); + } else { + __ ClzR2(TMP, in_lo); + } + __ Srlv(out_lo, AT, TMP); + __ And(out_lo, out_lo, in_lo); + if (isR6) { + __ Seleqz(out_lo, out_lo, out_hi); + } else { + __ Movn(out_lo, ZERO, out_hi); + } + } else { + Register in = locations->InAt(0).AsRegister<Register>(); + Register out = locations->Out().AsRegister<Register>(); + + if (isR6) { + __ ClzR6(TMP, in); + } else { + __ ClzR2(TMP, in); + } + __ LoadConst32(AT, 0x80000000); + __ Srlv(AT, AT, TMP); // Srlv shifts in the range of [0;31] bits (lower 5 bits of arg). + __ And(out, AT, in); // So this is required for 0 (=shift by 32). + } +} + +// int java.lang.Integer.highestOneBit(int) +void IntrinsicLocationsBuilderMIPS::VisitIntegerHighestOneBit(HInvoke* invoke) { + CreateIntToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorMIPS::VisitIntegerHighestOneBit(HInvoke* invoke) { + GenHighestOneBit(invoke->GetLocations(), Primitive::kPrimInt, IsR6(), GetAssembler()); +} + +// long java.lang.Long.highestOneBit(long) +void IntrinsicLocationsBuilderMIPS::VisitLongHighestOneBit(HInvoke* invoke) { + CreateIntToIntLocations(arena_, invoke, Location::kOutputOverlap); +} + +void IntrinsicCodeGeneratorMIPS::VisitLongHighestOneBit(HInvoke* invoke) { + GenHighestOneBit(invoke->GetLocations(), Primitive::kPrimLong, IsR6(), GetAssembler()); +} + +static void GenLowestOneBit(LocationSummary* locations, + const Primitive::Type type, + bool isR6, + MipsAssembler* assembler) { + DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong); + + if (type == Primitive::kPrimLong) { + Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>(); + Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>(); + Register out_lo = locations->Out().AsRegisterPairLow<Register>(); + Register out_hi = locations->Out().AsRegisterPairHigh<Register>(); + + __ Subu(TMP, ZERO, in_lo); + __ And(out_lo, TMP, in_lo); + __ Subu(TMP, ZERO, in_hi); + __ And(out_hi, TMP, in_hi); + if (isR6) { + __ Seleqz(out_hi, out_hi, out_lo); + } else { + __ Movn(out_hi, ZERO, out_lo); + } + } else { + Register in = locations->InAt(0).AsRegister<Register>(); + Register out = locations->Out().AsRegister<Register>(); + + __ Subu(TMP, ZERO, in); + __ And(out, TMP, in); + } +} + +// int java.lang.Integer.lowestOneBit(int) +void IntrinsicLocationsBuilderMIPS::VisitIntegerLowestOneBit(HInvoke* invoke) { + CreateIntToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorMIPS::VisitIntegerLowestOneBit(HInvoke* invoke) { + GenLowestOneBit(invoke->GetLocations(), Primitive::kPrimInt, IsR6(), GetAssembler()); +} + +// long java.lang.Long.lowestOneBit(long) +void IntrinsicLocationsBuilderMIPS::VisitLongLowestOneBit(HInvoke* invoke) { + CreateIntToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorMIPS::VisitLongLowestOneBit(HInvoke* invoke) { + GenLowestOneBit(invoke->GetLocations(), Primitive::kPrimLong, IsR6(), GetAssembler()); +} + // Unimplemented intrinsics. -#define UNIMPLEMENTED_INTRINSIC(Name) \ -void IntrinsicLocationsBuilderMIPS::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \ -} \ -void IntrinsicCodeGeneratorMIPS::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \ -} - -UNIMPLEMENTED_INTRINSIC(IntegerBitCount) -UNIMPLEMENTED_INTRINSIC(LongBitCount) - -UNIMPLEMENTED_INTRINSIC(MathCeil) -UNIMPLEMENTED_INTRINSIC(MathFloor) -UNIMPLEMENTED_INTRINSIC(MathRint) -UNIMPLEMENTED_INTRINSIC(MathRoundDouble) -UNIMPLEMENTED_INTRINSIC(MathRoundFloat) -UNIMPLEMENTED_INTRINSIC(ThreadCurrentThread) -UNIMPLEMENTED_INTRINSIC(UnsafeGet) -UNIMPLEMENTED_INTRINSIC(UnsafeGetVolatile) -UNIMPLEMENTED_INTRINSIC(UnsafeGetLong) -UNIMPLEMENTED_INTRINSIC(UnsafeGetLongVolatile) -UNIMPLEMENTED_INTRINSIC(UnsafeGetObject) -UNIMPLEMENTED_INTRINSIC(UnsafeGetObjectVolatile) -UNIMPLEMENTED_INTRINSIC(UnsafePut) -UNIMPLEMENTED_INTRINSIC(UnsafePutOrdered) -UNIMPLEMENTED_INTRINSIC(UnsafePutVolatile) -UNIMPLEMENTED_INTRINSIC(UnsafePutObject) -UNIMPLEMENTED_INTRINSIC(UnsafePutObjectOrdered) -UNIMPLEMENTED_INTRINSIC(UnsafePutObjectVolatile) -UNIMPLEMENTED_INTRINSIC(UnsafePutLong) -UNIMPLEMENTED_INTRINSIC(UnsafePutLongOrdered) -UNIMPLEMENTED_INTRINSIC(UnsafePutLongVolatile) -UNIMPLEMENTED_INTRINSIC(UnsafeCASInt) -UNIMPLEMENTED_INTRINSIC(UnsafeCASLong) -UNIMPLEMENTED_INTRINSIC(UnsafeCASObject) -UNIMPLEMENTED_INTRINSIC(StringCompareTo) -UNIMPLEMENTED_INTRINSIC(StringIndexOf) -UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter) -UNIMPLEMENTED_INTRINSIC(StringNewStringFromBytes) -UNIMPLEMENTED_INTRINSIC(StringNewStringFromChars) -UNIMPLEMENTED_INTRINSIC(StringNewStringFromString) - -UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent) -UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck) -UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar) -UNIMPLEMENTED_INTRINSIC(SystemArrayCopy) - -UNIMPLEMENTED_INTRINSIC(MathCos) -UNIMPLEMENTED_INTRINSIC(MathSin) -UNIMPLEMENTED_INTRINSIC(MathAcos) -UNIMPLEMENTED_INTRINSIC(MathAsin) -UNIMPLEMENTED_INTRINSIC(MathAtan) -UNIMPLEMENTED_INTRINSIC(MathAtan2) -UNIMPLEMENTED_INTRINSIC(MathCbrt) -UNIMPLEMENTED_INTRINSIC(MathCosh) -UNIMPLEMENTED_INTRINSIC(MathExp) -UNIMPLEMENTED_INTRINSIC(MathExpm1) -UNIMPLEMENTED_INTRINSIC(MathHypot) -UNIMPLEMENTED_INTRINSIC(MathLog) -UNIMPLEMENTED_INTRINSIC(MathLog10) -UNIMPLEMENTED_INTRINSIC(MathNextAfter) -UNIMPLEMENTED_INTRINSIC(MathSinh) -UNIMPLEMENTED_INTRINSIC(MathTan) -UNIMPLEMENTED_INTRINSIC(MathTanh) - -UNIMPLEMENTED_INTRINSIC(FloatIsInfinite) -UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite) - -UNIMPLEMENTED_INTRINSIC(IntegerHighestOneBit) -UNIMPLEMENTED_INTRINSIC(LongHighestOneBit) -UNIMPLEMENTED_INTRINSIC(IntegerLowestOneBit) -UNIMPLEMENTED_INTRINSIC(LongLowestOneBit) - -// Handled as HIR instructions. -UNIMPLEMENTED_INTRINSIC(FloatFloatToIntBits) -UNIMPLEMENTED_INTRINSIC(DoubleDoubleToLongBits) -UNIMPLEMENTED_INTRINSIC(FloatIsNaN) -UNIMPLEMENTED_INTRINSIC(DoubleIsNaN) -UNIMPLEMENTED_INTRINSIC(IntegerCompare) -UNIMPLEMENTED_INTRINSIC(LongCompare) -UNIMPLEMENTED_INTRINSIC(IntegerSignum) -UNIMPLEMENTED_INTRINSIC(LongSignum) - -#undef UNIMPLEMENTED_INTRINSIC +UNIMPLEMENTED_INTRINSIC(MIPS, MathCeil) +UNIMPLEMENTED_INTRINSIC(MIPS, MathFloor) +UNIMPLEMENTED_INTRINSIC(MIPS, MathRint) +UNIMPLEMENTED_INTRINSIC(MIPS, MathRoundDouble) +UNIMPLEMENTED_INTRINSIC(MIPS, MathRoundFloat) +UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGet) +UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetVolatile) +UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetLong) +UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetLongVolatile) +UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetObject) +UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetObjectVolatile) +UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePut) +UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutOrdered) +UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutVolatile) +UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutObject) +UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutObjectOrdered) +UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutObjectVolatile) +UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutLong) +UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutLongOrdered) +UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutLongVolatile) +UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeCASInt) +UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeCASLong) +UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeCASObject) + +UNIMPLEMENTED_INTRINSIC(MIPS, ReferenceGetReferent) +UNIMPLEMENTED_INTRINSIC(MIPS, StringGetCharsNoCheck) +UNIMPLEMENTED_INTRINSIC(MIPS, SystemArrayCopyChar) +UNIMPLEMENTED_INTRINSIC(MIPS, SystemArrayCopy) + +UNIMPLEMENTED_INTRINSIC(MIPS, MathCos) +UNIMPLEMENTED_INTRINSIC(MIPS, MathSin) +UNIMPLEMENTED_INTRINSIC(MIPS, MathAcos) +UNIMPLEMENTED_INTRINSIC(MIPS, MathAsin) +UNIMPLEMENTED_INTRINSIC(MIPS, MathAtan) +UNIMPLEMENTED_INTRINSIC(MIPS, MathAtan2) +UNIMPLEMENTED_INTRINSIC(MIPS, MathCbrt) +UNIMPLEMENTED_INTRINSIC(MIPS, MathCosh) +UNIMPLEMENTED_INTRINSIC(MIPS, MathExp) +UNIMPLEMENTED_INTRINSIC(MIPS, MathExpm1) +UNIMPLEMENTED_INTRINSIC(MIPS, MathHypot) +UNIMPLEMENTED_INTRINSIC(MIPS, MathLog) +UNIMPLEMENTED_INTRINSIC(MIPS, MathLog10) +UNIMPLEMENTED_INTRINSIC(MIPS, MathNextAfter) +UNIMPLEMENTED_INTRINSIC(MIPS, MathSinh) +UNIMPLEMENTED_INTRINSIC(MIPS, MathTan) +UNIMPLEMENTED_INTRINSIC(MIPS, MathTanh) + +// 1.8. +UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndAddInt) +UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndAddLong) +UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetInt) +UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetLong) +UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetObject) + +UNREACHABLE_INTRINSICS(MIPS) #undef __ diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc index a7a2560043..cf973aa841 100644 --- a/compiler/optimizing/intrinsics_mips64.cc +++ b/compiler/optimizing/intrinsics_mips64.cc @@ -340,130 +340,6 @@ void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invok GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); } -static void GenRotateRight(HInvoke* invoke, - Primitive::Type type, - Mips64Assembler* assembler) { - DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong); - - LocationSummary* locations = invoke->GetLocations(); - GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>(); - GpuRegister out = locations->Out().AsRegister<GpuRegister>(); - - if (invoke->InputAt(1)->IsIntConstant()) { - uint32_t shift = static_cast<uint32_t>(invoke->InputAt(1)->AsIntConstant()->GetValue()); - if (type == Primitive::kPrimInt) { - shift &= 0x1f; - __ Rotr(out, in, shift); - } else { - shift &= 0x3f; - if (shift < 32) { - __ Drotr(out, in, shift); - } else { - shift &= 0x1f; - __ Drotr32(out, in, shift); - } - } - } else { - GpuRegister shamt = locations->InAt(1).AsRegister<GpuRegister>(); - if (type == Primitive::kPrimInt) { - __ Rotrv(out, in, shamt); - } else { - __ Drotrv(out, in, shamt); - } - } -} - -// int java.lang.Integer.rotateRight(int i, int distance) -void IntrinsicLocationsBuilderMIPS64::VisitIntegerRotateRight(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); - locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1))); - locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); -} - -void IntrinsicCodeGeneratorMIPS64::VisitIntegerRotateRight(HInvoke* invoke) { - GenRotateRight(invoke, Primitive::kPrimInt, GetAssembler()); -} - -// long java.lang.Long.rotateRight(long i, int distance) -void IntrinsicLocationsBuilderMIPS64::VisitLongRotateRight(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); - locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1))); - locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); -} - -void IntrinsicCodeGeneratorMIPS64::VisitLongRotateRight(HInvoke* invoke) { - GenRotateRight(invoke, Primitive::kPrimLong, GetAssembler()); -} - -static void GenRotateLeft(HInvoke* invoke, - Primitive::Type type, - Mips64Assembler* assembler) { - DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong); - - LocationSummary* locations = invoke->GetLocations(); - GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>(); - GpuRegister out = locations->Out().AsRegister<GpuRegister>(); - - if (invoke->InputAt(1)->IsIntConstant()) { - int32_t shift = -static_cast<int32_t>(invoke->InputAt(1)->AsIntConstant()->GetValue()); - if (type == Primitive::kPrimInt) { - shift &= 0x1f; - __ Rotr(out, in, shift); - } else { - shift &= 0x3f; - if (shift < 32) { - __ Drotr(out, in, shift); - } else { - shift &= 0x1f; - __ Drotr32(out, in, shift); - } - } - } else { - GpuRegister shamt = locations->InAt(1).AsRegister<GpuRegister>(); - if (type == Primitive::kPrimInt) { - __ Subu(TMP, ZERO, shamt); - __ Rotrv(out, in, TMP); - } else { - __ Dsubu(TMP, ZERO, shamt); - __ Drotrv(out, in, TMP); - } - } -} - -// int java.lang.Integer.rotateLeft(int i, int distance) -void IntrinsicLocationsBuilderMIPS64::VisitIntegerRotateLeft(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); - locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1))); - locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); -} - -void IntrinsicCodeGeneratorMIPS64::VisitIntegerRotateLeft(HInvoke* invoke) { - GenRotateLeft(invoke, Primitive::kPrimInt, GetAssembler()); -} - -// long java.lang.Long.rotateLeft(long i, int distance) -void IntrinsicLocationsBuilderMIPS64::VisitLongRotateLeft(HInvoke* invoke) { - LocationSummary* locations = new (arena_) LocationSummary(invoke, - LocationSummary::kNoCall, - kIntrinsified); - locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1))); - locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); -} - -void IntrinsicCodeGeneratorMIPS64::VisitLongRotateLeft(HInvoke* invoke) { - GenRotateLeft(invoke, Primitive::kPrimLong, GetAssembler()); -} - static void GenReverse(LocationSummary* locations, Primitive::Type type, Mips64Assembler* assembler) { @@ -1480,10 +1356,10 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringCompareTo(HInvoke* invoke) { __ Beqzc(argument, slow_path->GetEntryLabel()); __ LoadFromOffset(kLoadDoubleword, - TMP, + T9, TR, QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, pStringCompareTo).Int32Value()); - __ Jalr(TMP); + __ Jalr(T9); __ Nop(); __ Bind(slow_path->GetExitLabel()); } @@ -1627,17 +1503,14 @@ static void GenerateStringIndexOf(HInvoke* invoke, DCHECK_EQ(tmp_reg, A2); // Start-index = 0. __ Clear(tmp_reg); - } else { - __ Slt(TMP, A2, ZERO); // if fromIndex < 0 - __ Seleqz(A2, A2, TMP); // fromIndex = 0 } __ LoadFromOffset(kLoadDoubleword, - TMP, + T9, TR, QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, pIndexOf).Int32Value()); CheckEntrypointTypes<kQuickIndexOf, int32_t, void*, uint32_t, uint32_t>(); - __ Jalr(TMP); + __ Jalr(T9); __ Nop(); if (slow_path != nullptr) { @@ -1710,13 +1583,14 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromBytes(HInvoke* invoke __ Beqzc(byte_array, slow_path->GetEntryLabel()); __ LoadFromOffset(kLoadDoubleword, - TMP, + T9, TR, QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, pAllocStringFromBytes).Int32Value()); - codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); - __ Jalr(TMP); + CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>(); + __ Jalr(T9); __ Nop(); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); __ Bind(slow_path->GetExitLabel()); } @@ -1743,24 +1617,23 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromChars(HInvoke* invoke // // all include a null check on `data` before calling that method. __ LoadFromOffset(kLoadDoubleword, - TMP, + T9, TR, QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, pAllocStringFromChars).Int32Value()); - codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); - __ Jalr(TMP); + CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>(); + __ Jalr(T9); __ Nop(); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); } -// java.lang.String.String(String original) +// java.lang.StringFactory.newStringFromString(String toCopy) void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromString(HInvoke* invoke) { LocationSummary* locations = new (arena_) LocationSummary(invoke, LocationSummary::kCall, kIntrinsified); InvokeRuntimeCallingConvention calling_convention; locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); - locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); - locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2))); Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt); locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>())); } @@ -1775,72 +1648,93 @@ void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromString(HInvoke* invok __ Beqzc(string_to_copy, slow_path->GetEntryLabel()); __ LoadFromOffset(kLoadDoubleword, - TMP, + T9, TR, QUICK_ENTRYPOINT_OFFSET(kMips64DoublewordSize, pAllocStringFromString).Int32Value()); - codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); - __ Jalr(TMP); + CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>(); + __ Jalr(T9); __ Nop(); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); __ Bind(slow_path->GetExitLabel()); } -// Unimplemented intrinsics. - -#define UNIMPLEMENTED_INTRINSIC(Name) \ -void IntrinsicLocationsBuilderMIPS64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \ -} \ -void IntrinsicCodeGeneratorMIPS64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \ -} - -UNIMPLEMENTED_INTRINSIC(IntegerBitCount) -UNIMPLEMENTED_INTRINSIC(LongBitCount) - -UNIMPLEMENTED_INTRINSIC(MathRoundDouble) -UNIMPLEMENTED_INTRINSIC(MathRoundFloat) - -UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent) -UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck) -UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar) -UNIMPLEMENTED_INTRINSIC(SystemArrayCopy) - -UNIMPLEMENTED_INTRINSIC(MathCos) -UNIMPLEMENTED_INTRINSIC(MathSin) -UNIMPLEMENTED_INTRINSIC(MathAcos) -UNIMPLEMENTED_INTRINSIC(MathAsin) -UNIMPLEMENTED_INTRINSIC(MathAtan) -UNIMPLEMENTED_INTRINSIC(MathAtan2) -UNIMPLEMENTED_INTRINSIC(MathCbrt) -UNIMPLEMENTED_INTRINSIC(MathCosh) -UNIMPLEMENTED_INTRINSIC(MathExp) -UNIMPLEMENTED_INTRINSIC(MathExpm1) -UNIMPLEMENTED_INTRINSIC(MathHypot) -UNIMPLEMENTED_INTRINSIC(MathLog) -UNIMPLEMENTED_INTRINSIC(MathLog10) -UNIMPLEMENTED_INTRINSIC(MathNextAfter) -UNIMPLEMENTED_INTRINSIC(MathSinh) -UNIMPLEMENTED_INTRINSIC(MathTan) -UNIMPLEMENTED_INTRINSIC(MathTanh) - -UNIMPLEMENTED_INTRINSIC(FloatIsInfinite) -UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite) - -UNIMPLEMENTED_INTRINSIC(IntegerHighestOneBit) -UNIMPLEMENTED_INTRINSIC(LongHighestOneBit) -UNIMPLEMENTED_INTRINSIC(IntegerLowestOneBit) -UNIMPLEMENTED_INTRINSIC(LongLowestOneBit) - -// Handled as HIR instructions. -UNIMPLEMENTED_INTRINSIC(FloatFloatToIntBits) -UNIMPLEMENTED_INTRINSIC(DoubleDoubleToLongBits) -UNIMPLEMENTED_INTRINSIC(FloatIsNaN) -UNIMPLEMENTED_INTRINSIC(DoubleIsNaN) -UNIMPLEMENTED_INTRINSIC(IntegerCompare) -UNIMPLEMENTED_INTRINSIC(LongCompare) -UNIMPLEMENTED_INTRINSIC(IntegerSignum) -UNIMPLEMENTED_INTRINSIC(LongSignum) - -#undef UNIMPLEMENTED_INTRINSIC +static void GenIsInfinite(LocationSummary* locations, + bool is64bit, + Mips64Assembler* assembler) { + FpuRegister in = locations->InAt(0).AsFpuRegister<FpuRegister>(); + GpuRegister out = locations->Out().AsRegister<GpuRegister>(); + + if (is64bit) { + __ ClassD(FTMP, in); + } else { + __ ClassS(FTMP, in); + } + __ Mfc1(out, FTMP); + __ Andi(out, out, kPositiveInfinity | kNegativeInfinity); + __ Sltu(out, ZERO, out); +} + +// boolean java.lang.Float.isInfinite(float) +void IntrinsicLocationsBuilderMIPS64::VisitFloatIsInfinite(HInvoke* invoke) { + CreateFPToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorMIPS64::VisitFloatIsInfinite(HInvoke* invoke) { + GenIsInfinite(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); +} + +// boolean java.lang.Double.isInfinite(double) +void IntrinsicLocationsBuilderMIPS64::VisitDoubleIsInfinite(HInvoke* invoke) { + CreateFPToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorMIPS64::VisitDoubleIsInfinite(HInvoke* invoke) { + GenIsInfinite(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); +} + +UNIMPLEMENTED_INTRINSIC(MIPS64, IntegerBitCount) +UNIMPLEMENTED_INTRINSIC(MIPS64, LongBitCount) + +UNIMPLEMENTED_INTRINSIC(MIPS64, MathRoundDouble) +UNIMPLEMENTED_INTRINSIC(MIPS64, MathRoundFloat) + +UNIMPLEMENTED_INTRINSIC(MIPS64, ReferenceGetReferent) +UNIMPLEMENTED_INTRINSIC(MIPS64, StringGetCharsNoCheck) +UNIMPLEMENTED_INTRINSIC(MIPS64, SystemArrayCopyChar) +UNIMPLEMENTED_INTRINSIC(MIPS64, SystemArrayCopy) + +UNIMPLEMENTED_INTRINSIC(MIPS64, MathCos) +UNIMPLEMENTED_INTRINSIC(MIPS64, MathSin) +UNIMPLEMENTED_INTRINSIC(MIPS64, MathAcos) +UNIMPLEMENTED_INTRINSIC(MIPS64, MathAsin) +UNIMPLEMENTED_INTRINSIC(MIPS64, MathAtan) +UNIMPLEMENTED_INTRINSIC(MIPS64, MathAtan2) +UNIMPLEMENTED_INTRINSIC(MIPS64, MathCbrt) +UNIMPLEMENTED_INTRINSIC(MIPS64, MathCosh) +UNIMPLEMENTED_INTRINSIC(MIPS64, MathExp) +UNIMPLEMENTED_INTRINSIC(MIPS64, MathExpm1) +UNIMPLEMENTED_INTRINSIC(MIPS64, MathHypot) +UNIMPLEMENTED_INTRINSIC(MIPS64, MathLog) +UNIMPLEMENTED_INTRINSIC(MIPS64, MathLog10) +UNIMPLEMENTED_INTRINSIC(MIPS64, MathNextAfter) +UNIMPLEMENTED_INTRINSIC(MIPS64, MathSinh) +UNIMPLEMENTED_INTRINSIC(MIPS64, MathTan) +UNIMPLEMENTED_INTRINSIC(MIPS64, MathTanh) + +UNIMPLEMENTED_INTRINSIC(MIPS64, IntegerHighestOneBit) +UNIMPLEMENTED_INTRINSIC(MIPS64, LongHighestOneBit) +UNIMPLEMENTED_INTRINSIC(MIPS64, IntegerLowestOneBit) +UNIMPLEMENTED_INTRINSIC(MIPS64, LongLowestOneBit) + +// 1.8. +UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndAddInt) +UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndAddLong) +UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetInt) +UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetLong) +UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetObject) + +UNREACHABLE_INTRINSICS(MIPS64) #undef __ diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index bfa4e374f7..95fdb9b3f6 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -1546,6 +1546,7 @@ void IntrinsicCodeGeneratorX86::VisitStringNewStringFromBytes(HInvoke* invoke) { __ j(kEqual, slow_path->GetEntryLabel()); __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromBytes))); + CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>(); codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); __ Bind(slow_path->GetExitLabel()); } @@ -1571,6 +1572,7 @@ void IntrinsicCodeGeneratorX86::VisitStringNewStringFromChars(HInvoke* invoke) { // // all include a null check on `data` before calling that method. __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromChars))); + CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>(); codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); } @@ -1594,6 +1596,7 @@ void IntrinsicCodeGeneratorX86::VisitStringNewStringFromString(HInvoke* invoke) __ j(kEqual, slow_path->GetEntryLabel()); __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromString))); + CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>(); codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); __ Bind(slow_path->GetExitLabel()); } @@ -2627,41 +2630,24 @@ void IntrinsicCodeGeneratorX86::VisitLongNumberOfTrailingZeros(HInvoke* invoke) GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true); } -// Unimplemented intrinsics. - -#define UNIMPLEMENTED_INTRINSIC(Name) \ -void IntrinsicLocationsBuilderX86::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \ -} \ -void IntrinsicCodeGeneratorX86::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \ -} - -UNIMPLEMENTED_INTRINSIC(MathRoundDouble) -UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent) -UNIMPLEMENTED_INTRINSIC(SystemArrayCopy) - -UNIMPLEMENTED_INTRINSIC(FloatIsInfinite) -UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite) - -UNIMPLEMENTED_INTRINSIC(IntegerHighestOneBit) -UNIMPLEMENTED_INTRINSIC(LongHighestOneBit) -UNIMPLEMENTED_INTRINSIC(IntegerLowestOneBit) -UNIMPLEMENTED_INTRINSIC(LongLowestOneBit) - -// Handled as HIR instructions. -UNIMPLEMENTED_INTRINSIC(FloatFloatToIntBits) -UNIMPLEMENTED_INTRINSIC(DoubleDoubleToLongBits) -UNIMPLEMENTED_INTRINSIC(FloatIsNaN) -UNIMPLEMENTED_INTRINSIC(DoubleIsNaN) -UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft) -UNIMPLEMENTED_INTRINSIC(LongRotateLeft) -UNIMPLEMENTED_INTRINSIC(IntegerRotateRight) -UNIMPLEMENTED_INTRINSIC(LongRotateRight) -UNIMPLEMENTED_INTRINSIC(IntegerCompare) -UNIMPLEMENTED_INTRINSIC(LongCompare) -UNIMPLEMENTED_INTRINSIC(IntegerSignum) -UNIMPLEMENTED_INTRINSIC(LongSignum) - -#undef UNIMPLEMENTED_INTRINSIC +UNIMPLEMENTED_INTRINSIC(X86, MathRoundDouble) +UNIMPLEMENTED_INTRINSIC(X86, ReferenceGetReferent) +UNIMPLEMENTED_INTRINSIC(X86, SystemArrayCopy) +UNIMPLEMENTED_INTRINSIC(X86, FloatIsInfinite) +UNIMPLEMENTED_INTRINSIC(X86, DoubleIsInfinite) +UNIMPLEMENTED_INTRINSIC(X86, IntegerHighestOneBit) +UNIMPLEMENTED_INTRINSIC(X86, LongHighestOneBit) +UNIMPLEMENTED_INTRINSIC(X86, IntegerLowestOneBit) +UNIMPLEMENTED_INTRINSIC(X86, LongLowestOneBit) + +// 1.8. +UNIMPLEMENTED_INTRINSIC(X86, UnsafeGetAndAddInt) +UNIMPLEMENTED_INTRINSIC(X86, UnsafeGetAndAddLong) +UNIMPLEMENTED_INTRINSIC(X86, UnsafeGetAndSetInt) +UNIMPLEMENTED_INTRINSIC(X86, UnsafeGetAndSetLong) +UNIMPLEMENTED_INTRINSIC(X86, UnsafeGetAndSetObject) + +UNREACHABLE_INTRINSICS(X86) #undef __ diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index 15c399712d..9e568f7b4f 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -1641,6 +1641,7 @@ void IntrinsicCodeGeneratorX86_64::VisitStringNewStringFromBytes(HInvoke* invoke __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocStringFromBytes), /* no_rip */ true)); + CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>(); codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); __ Bind(slow_path->GetExitLabel()); } @@ -1667,6 +1668,7 @@ void IntrinsicCodeGeneratorX86_64::VisitStringNewStringFromChars(HInvoke* invoke // all include a null check on `data` before calling that method. __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocStringFromChars), /* no_rip */ true)); + CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>(); codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); } @@ -1691,6 +1693,7 @@ void IntrinsicCodeGeneratorX86_64::VisitStringNewStringFromString(HInvoke* invok __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocStringFromString), /* no_rip */ true)); + CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>(); codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); __ Bind(slow_path->GetExitLabel()); } @@ -2711,34 +2714,18 @@ void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfTrailingZeros(HInvoke* invok GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true); } -// Unimplemented intrinsics. +UNIMPLEMENTED_INTRINSIC(X86_64, ReferenceGetReferent) +UNIMPLEMENTED_INTRINSIC(X86_64, FloatIsInfinite) +UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite) -#define UNIMPLEMENTED_INTRINSIC(Name) \ -void IntrinsicLocationsBuilderX86_64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \ -} \ -void IntrinsicCodeGeneratorX86_64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \ -} - -UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent) - -UNIMPLEMENTED_INTRINSIC(FloatIsInfinite) -UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite) - -// Handled as HIR instructions. -UNIMPLEMENTED_INTRINSIC(FloatFloatToIntBits) -UNIMPLEMENTED_INTRINSIC(DoubleDoubleToLongBits) -UNIMPLEMENTED_INTRINSIC(FloatIsNaN) -UNIMPLEMENTED_INTRINSIC(DoubleIsNaN) -UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft) -UNIMPLEMENTED_INTRINSIC(LongRotateLeft) -UNIMPLEMENTED_INTRINSIC(IntegerRotateRight) -UNIMPLEMENTED_INTRINSIC(LongRotateRight) -UNIMPLEMENTED_INTRINSIC(IntegerCompare) -UNIMPLEMENTED_INTRINSIC(LongCompare) -UNIMPLEMENTED_INTRINSIC(IntegerSignum) -UNIMPLEMENTED_INTRINSIC(LongSignum) +// 1.8. +UNIMPLEMENTED_INTRINSIC(X86_64, UnsafeGetAndAddInt) +UNIMPLEMENTED_INTRINSIC(X86_64, UnsafeGetAndAddLong) +UNIMPLEMENTED_INTRINSIC(X86_64, UnsafeGetAndSetInt) +UNIMPLEMENTED_INTRINSIC(X86_64, UnsafeGetAndSetLong) +UNIMPLEMENTED_INTRINSIC(X86_64, UnsafeGetAndSetObject) -#undef UNIMPLEMENTED_INTRINSIC +UNREACHABLE_INTRINSICS(X86_64) #undef __ diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index 0e0b83e4b4..98766a31a6 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -127,6 +127,9 @@ void HGraph::RemoveDeadBlocks(const ArenaBitVector& visited) { // Remove the block from the list of blocks, so that further analyses // never see it. blocks_[i] = nullptr; + if (block->IsExitBlock()) { + SetExitBlock(nullptr); + } } } } @@ -1870,7 +1873,7 @@ void HGraph::DeleteDeadEmptyBlock(HBasicBlock* block) { DCHECK(block->GetPhis().IsEmpty()); if (block->IsExitBlock()) { - exit_block_ = nullptr; + SetExitBlock(nullptr); } RemoveElement(reverse_post_order_, block); @@ -2178,7 +2181,9 @@ static void CheckAgainstUpperBound(ReferenceTypeInfo rti, ReferenceTypeInfo uppe DCHECK(upper_bound_rti.IsSupertypeOf(rti)) << " upper_bound_rti: " << upper_bound_rti << " rti: " << rti; - DCHECK(!upper_bound_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes() || rti.IsExact()); + DCHECK(!upper_bound_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes() || rti.IsExact()) + << " upper_bound_rti: " << upper_bound_rti + << " rti: " << rti; } } @@ -2212,6 +2217,10 @@ ReferenceTypeInfo ReferenceTypeInfo::Create(TypeHandle type_handle, bool is_exac if (kIsDebugBuild) { ScopedObjectAccess soa(Thread::Current()); DCHECK(IsValidHandle(type_handle)); + if (!is_exact) { + DCHECK(!type_handle->CannotBeAssignedFromOtherTypes()) + << "Callers of ReferenceTypeInfo::Create should ensure is_exact is properly computed"; + } } return ReferenceTypeInfo(type_handle, is_exact); } diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index b684cc697f..1bb5f5df51 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -26,6 +26,7 @@ #include "base/arena_object.h" #include "base/stl_util.h" #include "dex/compiler_enums.h" +#include "dex_instruction-inl.h" #include "entrypoints/quick/quick_entrypoints_enum.h" #include "handle.h" #include "handle_scope.h" @@ -508,6 +509,8 @@ class HGraph : public ArenaObject<kArenaAllocGraph> { // before cursor. HInstruction* InsertOppositeCondition(HInstruction* cond, HInstruction* cursor); + ReferenceTypeInfo GetInexactObjectRti() const { return inexact_object_rti_; } + private: void RemoveInstructionsAsUsersFromDeadBlocks(const ArenaBitVector& visited) const; void RemoveDeadBlocks(const ArenaBitVector& visited); @@ -1264,6 +1267,7 @@ class HLoopInformationOutwardIterator : public ValueObject { #define FOR_EACH_CONCRETE_INSTRUCTION_SHARED(M) #else #define FOR_EACH_CONCRETE_INSTRUCTION_SHARED(M) \ + M(BitwiseNegatedRight, Instruction) \ M(MultiplyAccumulate, Instruction) #endif @@ -1278,7 +1282,6 @@ class HLoopInformationOutwardIterator : public ValueObject { #define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M) #else #define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M) \ - M(Arm64BitwiseNegatedRight, Instruction) \ M(Arm64DataProcWithShifterOp, Instruction) \ M(Arm64IntermediateAddress, Instruction) #endif @@ -2963,6 +2966,8 @@ class HCondition : public HBinaryOperation { virtual IfCondition GetOppositeCondition() const = 0; bool IsGtBias() const { return GetBias() == ComparisonBias::kGtBias; } + bool IsLtBias() const { return GetBias() == ComparisonBias::kLtBias; } + ComparisonBias GetBias() const { return GetPackedField<ComparisonBiasField>(); } void SetBias(ComparisonBias bias) { SetPackedField<ComparisonBiasField>(bias); } @@ -2973,13 +2978,23 @@ class HCondition : public HBinaryOperation { bool IsFPConditionTrueIfNaN() const { DCHECK(Primitive::IsFloatingPointType(InputAt(0)->GetType())) << InputAt(0)->GetType(); IfCondition if_cond = GetCondition(); - return IsGtBias() ? ((if_cond == kCondGT) || (if_cond == kCondGE)) : (if_cond == kCondNE); + if (if_cond == kCondNE) { + return true; + } else if (if_cond == kCondEQ) { + return false; + } + return ((if_cond == kCondGT) || (if_cond == kCondGE)) && IsGtBias(); } bool IsFPConditionFalseIfNaN() const { DCHECK(Primitive::IsFloatingPointType(InputAt(0)->GetType())) << InputAt(0)->GetType(); IfCondition if_cond = GetCondition(); - return IsGtBias() ? ((if_cond == kCondLT) || (if_cond == kCondLE)) : (if_cond == kCondEQ); + if (if_cond == kCondEQ) { + return true; + } else if (if_cond == kCondNE) { + return false; + } + return ((if_cond == kCondLT) || (if_cond == kCondLE)) && IsGtBias(); } protected: diff --git a/compiler/optimizing/nodes_arm64.h b/compiler/optimizing/nodes_arm64.h index 75a71e78b8..173852a55d 100644 --- a/compiler/optimizing/nodes_arm64.h +++ b/compiler/optimizing/nodes_arm64.h @@ -118,66 +118,6 @@ class HArm64IntermediateAddress : public HExpression<2> { DISALLOW_COPY_AND_ASSIGN(HArm64IntermediateAddress); }; -class HArm64BitwiseNegatedRight : public HBinaryOperation { - public: - HArm64BitwiseNegatedRight(Primitive::Type result_type, - InstructionKind op, - HInstruction* left, - HInstruction* right, - uint32_t dex_pc = kNoDexPc) - : HBinaryOperation(result_type, left, right, SideEffects::None(), dex_pc), - op_kind_(op) { - DCHECK(op == HInstruction::kAnd || op == HInstruction::kOr || op == HInstruction::kXor) << op; - } - - template <typename T, typename U> - auto Compute(T x, U y) const -> decltype(x & ~y) { - static_assert(std::is_same<decltype(x & ~y), decltype(x | ~y)>::value && - std::is_same<decltype(x & ~y), decltype(x ^ ~y)>::value, - "Inconsistent negated bitwise types"); - switch (op_kind_) { - case HInstruction::kAnd: - return x & ~y; - case HInstruction::kOr: - return x | ~y; - case HInstruction::kXor: - return x ^ ~y; - default: - LOG(FATAL) << "Unreachable"; - UNREACHABLE(); - } - } - - HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { - return GetBlock()->GetGraph()->GetIntConstant( - Compute(x->GetValue(), y->GetValue()), GetDexPc()); - } - HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { - return GetBlock()->GetGraph()->GetLongConstant( - Compute(x->GetValue(), y->GetValue()), GetDexPc()); - } - HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED, - HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { - LOG(FATAL) << DebugName() << " is not defined for float values"; - UNREACHABLE(); - } - HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED, - HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { - LOG(FATAL) << DebugName() << " is not defined for double values"; - UNREACHABLE(); - } - - InstructionKind GetOpKind() const { return op_kind_; } - - DECLARE_INSTRUCTION(Arm64BitwiseNegatedRight); - - private: - // Specifies the bitwise operation, which will be then negated. - const InstructionKind op_kind_; - - DISALLOW_COPY_AND_ASSIGN(HArm64BitwiseNegatedRight); -}; - } // namespace art #endif // ART_COMPILER_OPTIMIZING_NODES_ARM64_H_ diff --git a/compiler/optimizing/nodes_shared.h b/compiler/optimizing/nodes_shared.h index b04b622838..c10c718ff4 100644 --- a/compiler/optimizing/nodes_shared.h +++ b/compiler/optimizing/nodes_shared.h @@ -53,6 +53,66 @@ class HMultiplyAccumulate : public HExpression<3> { DISALLOW_COPY_AND_ASSIGN(HMultiplyAccumulate); }; +class HBitwiseNegatedRight : public HBinaryOperation { + public: + HBitwiseNegatedRight(Primitive::Type result_type, + InstructionKind op, + HInstruction* left, + HInstruction* right, + uint32_t dex_pc = kNoDexPc) + : HBinaryOperation(result_type, left, right, SideEffects::None(), dex_pc), + op_kind_(op) { + DCHECK(op == HInstruction::kAnd || op == HInstruction::kOr || op == HInstruction::kXor) << op; + } + + template <typename T, typename U> + auto Compute(T x, U y) const -> decltype(x & ~y) { + static_assert(std::is_same<decltype(x & ~y), decltype(x | ~y)>::value && + std::is_same<decltype(x & ~y), decltype(x ^ ~y)>::value, + "Inconsistent negated bitwise types"); + switch (op_kind_) { + case HInstruction::kAnd: + return x & ~y; + case HInstruction::kOr: + return x | ~y; + case HInstruction::kXor: + return x ^ ~y; + default: + LOG(FATAL) << "Unreachable"; + UNREACHABLE(); + } + } + + HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetIntConstant( + Compute(x->GetValue(), y->GetValue()), GetDexPc()); + } + HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE { + return GetBlock()->GetGraph()->GetLongConstant( + Compute(x->GetValue(), y->GetValue()), GetDexPc()); + } + HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED, + HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { + LOG(FATAL) << DebugName() << " is not defined for float values"; + UNREACHABLE(); + } + HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED, + HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE { + LOG(FATAL) << DebugName() << " is not defined for double values"; + UNREACHABLE(); + } + + InstructionKind GetOpKind() const { return op_kind_; } + + DECLARE_INSTRUCTION(BitwiseNegatedRight); + + private: + // Specifies the bitwise operation, which will be then negated. + const InstructionKind op_kind_; + + DISALLOW_COPY_AND_ASSIGN(HBitwiseNegatedRight); +}; + } // namespace art #endif // ART_COMPILER_OPTIMIZING_NODES_SHARED_H_ diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 13d6d620f8..7a82063bba 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -20,7 +20,7 @@ #include <memory> #include <stdint.h> -#ifdef ART_ENABLE_CODEGEN_arm64 +#ifdef ART_ENABLE_CODEGEN_arm #include "dex_cache_array_fixups_arm.h" #endif @@ -431,6 +431,7 @@ static void MaybeRunInliner(HGraph* graph, static void RunArchOptimizations(InstructionSet instruction_set, HGraph* graph, + CodeGenerator* codegen, OptimizingCompilerStats* stats, PassObserver* pass_observer) { ArenaAllocator* arena = graph->GetArena(); @@ -466,7 +467,8 @@ static void RunArchOptimizations(InstructionSet instruction_set, #endif #ifdef ART_ENABLE_CODEGEN_x86 case kX86: { - x86::PcRelativeFixups* pc_relative_fixups = new (arena) x86::PcRelativeFixups(graph, stats); + x86::PcRelativeFixups* pc_relative_fixups = + new (arena) x86::PcRelativeFixups(graph, codegen, stats); HOptimization* x86_optimizations[] = { pc_relative_fixups }; @@ -483,7 +485,11 @@ NO_INLINE // Avoid increasing caller's frame size by large stack-allocated obje static void AllocateRegisters(HGraph* graph, CodeGenerator* codegen, PassObserver* pass_observer) { - PrepareForRegisterAllocation(graph).Run(); + { + PassScope scope(PrepareForRegisterAllocation::kPrepareForRegisterAllocationPassName, + pass_observer); + PrepareForRegisterAllocation(graph).Run(); + } SsaLivenessAnalysis liveness(graph, codegen); { PassScope scope(SsaLivenessAnalysis::kLivenessPassName, pass_observer); @@ -557,7 +563,7 @@ static void RunOptimizations(HGraph* graph, }; RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer); - RunArchOptimizations(driver->GetInstructionSet(), graph, stats, pass_observer); + RunArchOptimizations(driver->GetInstructionSet(), graph, codegen, stats, pass_observer); AllocateRegisters(graph, codegen, pass_observer); } @@ -909,34 +915,31 @@ bool OptimizingCompiler::JitCompile(Thread* self, return false; } - if (GetCompilerDriver()->GetCompilerOptions().GetGenerateDebugInfo()) { + const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions(); + if (compiler_options.GetGenerateDebugInfo()) { const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code); const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode()); - CompiledMethod compiled_method( - GetCompilerDriver(), - codegen->GetInstructionSet(), - ArrayRef<const uint8_t>(code_allocator.GetMemory()), - codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(), - codegen->GetCoreSpillMask(), - codegen->GetFpuSpillMask(), - ArrayRef<const SrcMapElem>(), - ArrayRef<const uint8_t>(), // mapping_table. - ArrayRef<const uint8_t>(stack_map_data, stack_map_size), - ArrayRef<const uint8_t>(), // native_gc_map. - ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()), - ArrayRef<const LinkerPatch>()); - debug::MethodDebugInfo method_debug_info { - dex_file, - class_def_idx, - method_idx, - access_flags, - code_item, - false, // deduped. - code_address, - code_address + code_allocator.GetSize(), - &compiled_method - }; - ArrayRef<const uint8_t> elf_file = debug::WriteDebugElfFileForMethod(method_debug_info); + debug::MethodDebugInfo info = debug::MethodDebugInfo(); + info.trampoline_name = nullptr; + info.dex_file = dex_file; + info.class_def_index = class_def_idx; + info.dex_method_index = method_idx; + info.access_flags = access_flags; + info.code_item = code_item; + info.isa = codegen->GetInstructionSet(); + info.deduped = false; + info.is_native_debuggable = compiler_options.GetNativeDebuggable(); + info.is_optimized = true; + info.is_code_address_text_relative = false; + info.code_address = code_address; + info.code_size = code_allocator.GetSize(); + info.frame_size_in_bytes = method_header->GetFrameSizeInBytes(); + info.code_info = stack_map_size == 0 ? nullptr : stack_map_data; + info.cfi = ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()); + ArrayRef<const uint8_t> elf_file = debug::WriteDebugElfFileForMethods( + GetCompilerDriver()->GetInstructionSet(), + GetCompilerDriver()->GetInstructionSetFeatures(), + ArrayRef<const debug::MethodDebugInfo>(&info, 1)); CreateJITCodeEntryForAddress(code_address, std::unique_ptr<const uint8_t[]>(elf_file.data()), elf_file.size()); diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc index a6f14616bf..d281a9fc6c 100644 --- a/compiler/optimizing/pc_relative_fixups_x86.cc +++ b/compiler/optimizing/pc_relative_fixups_x86.cc @@ -16,6 +16,7 @@ #include "pc_relative_fixups_x86.h" #include "code_generator_x86.h" +#include "intrinsics_x86.h" namespace art { namespace x86 { @@ -25,7 +26,10 @@ namespace x86 { */ class PCRelativeHandlerVisitor : public HGraphVisitor { public: - explicit PCRelativeHandlerVisitor(HGraph* graph) : HGraphVisitor(graph), base_(nullptr) {} + PCRelativeHandlerVisitor(HGraph* graph, CodeGenerator* codegen) + : HGraphVisitor(graph), + codegen_(down_cast<CodeGeneratorX86*>(codegen)), + base_(nullptr) {} void MoveBaseIfNeeded() { if (base_ != nullptr) { @@ -146,7 +150,6 @@ class PCRelativeHandlerVisitor : public HGraphVisitor { if (base_ != nullptr) { return; } - // Insert the base at the start of the entry block, move it to a better // position later in MoveBaseIfNeeded(). base_ = new (GetGraph()->GetArena()) HX86ComputeBaseMethodAddress(); @@ -180,7 +183,9 @@ class PCRelativeHandlerVisitor : public HGraphVisitor { } bool base_added = false; - if (invoke_static_or_direct != nullptr && invoke_static_or_direct->HasPcRelativeDexCache()) { + if (invoke_static_or_direct != nullptr && + invoke_static_or_direct->HasPcRelativeDexCache() && + !WillHaveCallFreeIntrinsicsCodeGen(invoke)) { InitializePCRelativeBasePointer(); // Add the extra parameter base_. invoke_static_or_direct->AddSpecialInput(base_); @@ -215,6 +220,24 @@ class PCRelativeHandlerVisitor : public HGraphVisitor { } } + bool WillHaveCallFreeIntrinsicsCodeGen(HInvoke* invoke) { + if (invoke->GetIntrinsic() != Intrinsics::kNone) { + // This invoke may have intrinsic code generation defined. However, we must + // now also determine if this code generation is truly there and call-free + // (not unimplemented, no bail on instruction features, or call on slow path). + // This is done by actually calling the locations builder on the instruction + // and clearing out the locations once result is known. We assume this + // call only has creating locations as side effects! + IntrinsicLocationsBuilderX86 builder(codegen_); + bool success = builder.TryDispatch(invoke) && !invoke->GetLocations()->CanCall(); + invoke->SetLocations(nullptr); + return success; + } + return false; + } + + CodeGeneratorX86* codegen_; + // The generated HX86ComputeBaseMethodAddress in the entry block needed as an // input to the HX86LoadFromConstantTable instructions. HX86ComputeBaseMethodAddress* base_; @@ -226,7 +249,7 @@ void PcRelativeFixups::Run() { // that can be live-in at the irreducible loop header. return; } - PCRelativeHandlerVisitor visitor(graph_); + PCRelativeHandlerVisitor visitor(graph_, codegen_); visitor.VisitInsertionOrder(); visitor.MoveBaseIfNeeded(); } diff --git a/compiler/optimizing/pc_relative_fixups_x86.h b/compiler/optimizing/pc_relative_fixups_x86.h index af708acb03..03de2fcece 100644 --- a/compiler/optimizing/pc_relative_fixups_x86.h +++ b/compiler/optimizing/pc_relative_fixups_x86.h @@ -21,14 +21,21 @@ #include "optimization.h" namespace art { + +class CodeGenerator; + namespace x86 { class PcRelativeFixups : public HOptimization { public: - PcRelativeFixups(HGraph* graph, OptimizingCompilerStats* stats) - : HOptimization(graph, "pc_relative_fixups_x86", stats) {} + PcRelativeFixups(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats) + : HOptimization(graph, "pc_relative_fixups_x86", stats), + codegen_(codegen) {} void Run() OVERRIDE; + + private: + CodeGenerator* codegen_; }; } // namespace x86 diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h index c8b8b0dcfa..c90724c251 100644 --- a/compiler/optimizing/prepare_for_register_allocation.h +++ b/compiler/optimizing/prepare_for_register_allocation.h @@ -32,6 +32,9 @@ class PrepareForRegisterAllocation : public HGraphDelegateVisitor { void Run(); + static constexpr const char* kPrepareForRegisterAllocationPassName = + "prepare_for_register_allocation"; + private: void VisitNullCheck(HNullCheck* check) OVERRIDE; void VisitDivZeroCheck(HDivZeroCheck* check) OVERRIDE; diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index deaa415ed4..75356c848b 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -432,11 +432,10 @@ void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* inst } else if (klass != nullptr) { ScopedObjectAccess soa(Thread::Current()); ReferenceTypeInfo::TypeHandle handle = handle_cache_->NewHandle(klass); - is_exact = is_exact || klass->CannotBeAssignedFromOtherTypes(); + is_exact = is_exact || handle->CannotBeAssignedFromOtherTypes(); instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(handle, is_exact)); } else { - instr->SetReferenceTypeInfo( - ReferenceTypeInfo::Create(handle_cache_->GetObjectClassHandle(), /* is_exact */ false)); + instr->SetReferenceTypeInfo(instr->GetBlock()->GetGraph()->GetInexactObjectRti()); } } @@ -518,8 +517,7 @@ void ReferenceTypePropagation::RTPVisitor::VisitUnresolvedInstanceFieldGet( HUnresolvedInstanceFieldGet* instr) { // TODO: Use descriptor to get the actual type. if (instr->GetFieldType() == Primitive::kPrimNot) { - instr->SetReferenceTypeInfo( - ReferenceTypeInfo::Create(handle_cache_->GetObjectClassHandle(), /* is_exact */ false)); + instr->SetReferenceTypeInfo(instr->GetBlock()->GetGraph()->GetInexactObjectRti()); } } @@ -527,8 +525,7 @@ void ReferenceTypePropagation::RTPVisitor::VisitUnresolvedStaticFieldGet( HUnresolvedStaticFieldGet* instr) { // TODO: Use descriptor to get the actual type. if (instr->GetFieldType() == Primitive::kPrimNot) { - instr->SetReferenceTypeInfo( - ReferenceTypeInfo::Create(handle_cache_->GetObjectClassHandle(), /* is_exact */ false)); + instr->SetReferenceTypeInfo(instr->GetBlock()->GetGraph()->GetInexactObjectRti()); } } @@ -724,12 +721,11 @@ void ReferenceTypePropagation::UpdateArrayGet(HArrayGet* instr, HandleCache* han if (handle->IsObjectArrayClass()) { ReferenceTypeInfo::TypeHandle component_handle = handle_cache->NewHandle(handle->GetComponentType()); - instr->SetReferenceTypeInfo( - ReferenceTypeInfo::Create(component_handle, /* is_exact */ false)); + bool is_exact = component_handle->CannotBeAssignedFromOtherTypes(); + instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(component_handle, is_exact)); } else { // We don't know what the parent actually is, so we fallback to object. - instr->SetReferenceTypeInfo( - ReferenceTypeInfo::Create(handle_cache->GetObjectClassHandle(), /* is_exact */ false)); + instr->SetReferenceTypeInfo(instr->GetBlock()->GetGraph()->GetInexactObjectRti()); } } @@ -811,8 +807,7 @@ void ReferenceTypePropagation::UpdatePhi(HPhi* instr) { if (first_input_index_not_null == input_count) { // All inputs are NullConstants, set the type to object. // This may happen in the presence of inlining. - instr->SetReferenceTypeInfo( - ReferenceTypeInfo::Create(handle_cache_.GetObjectClassHandle(), /* is_exact */ false)); + instr->SetReferenceTypeInfo(instr->GetBlock()->GetGraph()->GetInexactObjectRti()); return; } diff --git a/compiler/utils/swap_space.cc b/compiler/utils/swap_space.cc index 244a5fedbe..1a8f567aa1 100644 --- a/compiler/utils/swap_space.cc +++ b/compiler/utils/swap_space.cc @@ -152,7 +152,6 @@ SwapSpace::SpaceChunk SwapSpace::NewFileChunk(size_t min_size) { } size_ += next_part; SpaceChunk new_chunk = {ptr, next_part}; - maps_.push_back(new_chunk); return new_chunk; #else UNUSED(min_size, kMininumMapSize); diff --git a/compiler/utils/swap_space.h b/compiler/utils/swap_space.h index b659f1d3c7..bf06675d72 100644 --- a/compiler/utils/swap_space.h +++ b/compiler/utils/swap_space.h @@ -85,7 +85,6 @@ class SwapSpace { int fd_; size_t size_; - std::list<SpaceChunk> maps_; // NOTE: Boost.Bimap would be useful for the two following members. diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index cbce0f6e91..ce4f38a9e3 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -32,9 +32,6 @@ #include <sys/utsname.h> #endif -#define ATRACE_TAG ATRACE_TAG_DALVIK -#include <cutils/trace.h> - #include "arch/instruction_set_features.h" #include "arch/mips/instruction_set_features_mips.h" #include "art_method-inl.h" @@ -49,6 +46,7 @@ #include "class_linker.h" #include "compiler.h" #include "compiler_callbacks.h" +#include "debug/elf_debug_writer.h" #include "debug/method_debug_info.h" #include "dex/pass_manager.h" #include "dex/quick/dex_file_to_method_inliner_map.h" @@ -698,6 +696,12 @@ class Dex2Oat FINAL { Usage("Can't have both --image and (--app-image-fd or --app-image-file)"); } + if (IsBootImage()) { + // We need the boot image to always be debuggable. + // TODO: Remove this once we better deal with full frame deoptimization. + compiler_options_->debuggable_ = true; + } + if (oat_filenames_.empty() && oat_fd_ == -1) { Usage("Output must be supplied with either --oat-file or --oat-fd"); } @@ -1031,6 +1035,9 @@ class Dex2Oat FINAL { key_value_store_->Put( OatHeader::kDebuggableKey, compiler_options_->debuggable_ ? OatHeader::kTrueValue : OatHeader::kFalseValue); + key_value_store_->Put( + OatHeader::kNativeDebuggableKey, + compiler_options_->native_debuggable_ ? OatHeader::kTrueValue : OatHeader::kFalseValue); if (compiler_options_->IsExtractOnly()) { key_value_store_->Put(OatHeader::kCompilationType, OatHeader::kExtractOnlyValue); } else if (UseProfileGuidedCompilation()) { @@ -1684,6 +1691,8 @@ class Dex2Oat FINAL { std::unique_ptr<ElfWriter>& elf_writer = elf_writers_[i]; std::unique_ptr<OatWriter>& oat_writer = oat_writers_[i]; + oat_writer->AddMethodDebugInfos(debug::MakeTrampolineInfos(oat_writer->GetOatHeader())); + // We need to mirror the layout of the ELF file in the compressed debug-info. // Therefore PrepareDebugInfo() relies on the SetLoadedSectionSizes() call further above. elf_writer->PrepareDebugInfo(oat_writer->GetMethodDebugInfo()); @@ -2097,8 +2106,10 @@ class Dex2Oat FINAL { elf_writers_.reserve(oat_files_.size()); oat_writers_.reserve(oat_files_.size()); for (const std::unique_ptr<File>& oat_file : oat_files_) { - elf_writers_.emplace_back( - CreateElfWriterQuick(instruction_set_, compiler_options_.get(), oat_file.get())); + elf_writers_.emplace_back(CreateElfWriterQuick(instruction_set_, + instruction_set_features_.get(), + compiler_options_.get(), + oat_file.get())); elf_writers_.back()->Start(); oat_writers_.emplace_back(new OatWriter(IsBootImage(), timings_)); } diff --git a/disassembler/Android.mk b/disassembler/Android.mk index 039986ce2b..bf563c7660 100644 --- a/disassembler/Android.mk +++ b/disassembler/Android.mk @@ -89,7 +89,7 @@ define build-libart-disassembler LOCAL_NATIVE_COVERAGE := $(ART_COVERAGE) # For disassembler_arm64. ifeq ($$(art_ndebug_or_debug),debug) - LOCAL_SHARED_LIBRARIES += libvixld + LOCAL_SHARED_LIBRARIES += libvixl else LOCAL_SHARED_LIBRARIES += libvixl endif diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc index e30b968a96..3ed57664bc 100644 --- a/oatdump/oatdump.cc +++ b/oatdump/oatdump.cc @@ -32,6 +32,8 @@ #include "base/unix_file/fd_file.h" #include "class_linker.h" #include "class_linker-inl.h" +#include "debug/elf_debug_writer.h" +#include "debug/method_debug_info.h" #include "dex_file-inl.h" #include "dex_instruction.h" #include "disassembler.h" @@ -98,6 +100,7 @@ const DexFile* OpenDexFile(const OatFile::OatDexFile* oat_dex_file, std::string* return ret; } +template <typename ElfTypes> class OatSymbolizer FINAL { public: OatSymbolizer(const OatFile* oat_file, const std::string& output_name) : @@ -105,29 +108,21 @@ class OatSymbolizer FINAL { output_name_(output_name.empty() ? "symbolized.oat" : output_name) { } - typedef void (OatSymbolizer::*Callback)(const DexFile::ClassDef&, - uint32_t, - const OatFile::OatMethod&, - const DexFile&, - uint32_t, - const DexFile::CodeItem*, - uint32_t); - bool Symbolize() { const InstructionSet isa = oat_file_->GetOatHeader().GetInstructionSet(); + const InstructionSetFeatures* features = InstructionSetFeatures::FromBitmap( + isa, oat_file_->GetOatHeader().GetInstructionSetFeaturesBitmap()); File* elf_file = OS::CreateEmptyFile(output_name_.c_str()); std::unique_ptr<BufferedOutputStream> output_stream( MakeUnique<BufferedOutputStream>(MakeUnique<FileOutputStream>(elf_file))); - builder_.reset(new ElfBuilder<ElfTypes32>(isa, output_stream.get())); + builder_.reset(new ElfBuilder<ElfTypes>(isa, features, output_stream.get())); builder_->Start(); auto* rodata = builder_->GetRoData(); auto* text = builder_->GetText(); auto* bss = builder_->GetBss(); - auto* strtab = builder_->GetStrTab(); - auto* symtab = builder_->GetSymTab(); rodata->Start(); const uint8_t* rodata_begin = oat_file_->Begin(); @@ -145,68 +140,38 @@ class OatSymbolizer FINAL { bss->WriteNoBitsSection(oat_file_->BssSize()); } + if (isa == kMips || isa == kMips64) { + builder_->WriteMIPSabiflagsSection(); + } builder_->PrepareDynamicSection( elf_file->GetPath(), rodata_size, text_size, oat_file_->BssSize()); builder_->WriteDynamicSection(); - Walk(&art::OatSymbolizer::RegisterForDedup); - - NormalizeState(); - - strtab->Start(); - strtab->Write(""); // strtab should start with empty string. - AddTrampolineSymbols(); - Walk(&art::OatSymbolizer::AddSymbol); - strtab->End(); + Walk(); + for (const auto& trampoline : debug::MakeTrampolineInfos(oat_file_->GetOatHeader())) { + method_debug_infos_.push_back(trampoline); + } - symtab->Start(); - symtab->Write(); - symtab->End(); + debug::WriteDebugInfo(builder_.get(), + ArrayRef<const debug::MethodDebugInfo>(method_debug_infos_), + dwarf::DW_DEBUG_FRAME_FORMAT, + true /* write_oat_patches */); builder_->End(); return builder_->Good(); } - void AddTrampolineSymbol(const char* name, uint32_t code_offset) { - if (code_offset != 0) { - uint32_t name_offset = builder_->GetStrTab()->Write(name); - uint64_t symbol_value = code_offset - oat_file_->GetOatHeader().GetExecutableOffset(); - // Specifying 0 as the symbol size means that the symbol lasts until the next symbol or until - // the end of the section in case of the last symbol. - builder_->GetSymTab()->Add(name_offset, builder_->GetText(), symbol_value, - /* is_relative */ true, /* size */ 0, STB_GLOBAL, STT_FUNC); - } - } - - void AddTrampolineSymbols() { - const OatHeader& oat_header = oat_file_->GetOatHeader(); - AddTrampolineSymbol("interpreterToInterpreterBridge", - oat_header.GetInterpreterToInterpreterBridgeOffset()); - AddTrampolineSymbol("interpreterToCompiledCodeBridge", - oat_header.GetInterpreterToCompiledCodeBridgeOffset()); - AddTrampolineSymbol("jniDlsymLookup", - oat_header.GetJniDlsymLookupOffset()); - AddTrampolineSymbol("quickGenericJniTrampoline", - oat_header.GetQuickGenericJniTrampolineOffset()); - AddTrampolineSymbol("quickImtConflictTrampoline", - oat_header.GetQuickImtConflictTrampolineOffset()); - AddTrampolineSymbol("quickResolutionTrampoline", - oat_header.GetQuickResolutionTrampolineOffset()); - AddTrampolineSymbol("quickToInterpreterBridge", - oat_header.GetQuickToInterpreterBridgeOffset()); - } - - void Walk(Callback callback) { + void Walk() { std::vector<const OatFile::OatDexFile*> oat_dex_files = oat_file_->GetOatDexFiles(); for (size_t i = 0; i < oat_dex_files.size(); i++) { const OatFile::OatDexFile* oat_dex_file = oat_dex_files[i]; CHECK(oat_dex_file != nullptr); - WalkOatDexFile(oat_dex_file, callback); + WalkOatDexFile(oat_dex_file); } } - void WalkOatDexFile(const OatFile::OatDexFile* oat_dex_file, Callback callback) { + void WalkOatDexFile(const OatFile::OatDexFile* oat_dex_file) { std::string error_msg; const DexFile* const dex_file = OpenDexFile(oat_dex_file, &error_msg); if (dex_file == nullptr) { @@ -215,13 +180,12 @@ class OatSymbolizer FINAL { for (size_t class_def_index = 0; class_def_index < dex_file->NumClassDefs(); class_def_index++) { - const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index); const OatFile::OatClass oat_class = oat_dex_file->GetOatClass(class_def_index); OatClassType type = oat_class.GetType(); switch (type) { case kOatClassAllCompiled: case kOatClassSomeCompiled: - WalkOatClass(oat_class, *dex_file, class_def, callback); + WalkOatClass(oat_class, *dex_file, class_def_index); break; case kOatClassNoneCompiled: @@ -232,8 +196,10 @@ class OatSymbolizer FINAL { } } - void WalkOatClass(const OatFile::OatClass& oat_class, const DexFile& dex_file, - const DexFile::ClassDef& class_def, Callback callback) { + void WalkOatClass(const OatFile::OatClass& oat_class, + const DexFile& dex_file, + uint32_t class_def_index) { + const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index); const uint8_t* class_data = dex_file.GetClassData(class_def); if (class_data == nullptr) { // empty class such as a marker interface? return; @@ -241,111 +207,62 @@ class OatSymbolizer FINAL { // Note: even if this is an interface or a native class, we still have to walk it, as there // might be a static initializer. ClassDataItemIterator it(dex_file, class_data); - SkipAllFields(&it); uint32_t class_method_idx = 0; - while (it.HasNextDirectMethod()) { - const OatFile::OatMethod oat_method = oat_class.GetOatMethod(class_method_idx); - WalkOatMethod(class_def, class_method_idx, oat_method, dex_file, it.GetMemberIndex(), - it.GetMethodCodeItem(), it.GetMethodAccessFlags(), callback); - class_method_idx++; - it.Next(); - } - while (it.HasNextVirtualMethod()) { - const OatFile::OatMethod oat_method = oat_class.GetOatMethod(class_method_idx); - WalkOatMethod(class_def, class_method_idx, oat_method, dex_file, it.GetMemberIndex(), - it.GetMethodCodeItem(), it.GetMethodAccessFlags(), callback); - class_method_idx++; - it.Next(); + for (; it.HasNextStaticField(); it.Next()) { /* skip */ } + for (; it.HasNextInstanceField(); it.Next()) { /* skip */ } + for (; it.HasNextDirectMethod() || it.HasNextVirtualMethod(); it.Next()) { + WalkOatMethod(oat_class.GetOatMethod(class_method_idx++), + dex_file, + class_def_index, + it.GetMemberIndex(), + it.GetMethodCodeItem(), + it.GetMethodAccessFlags()); } DCHECK(!it.HasNext()); } - void WalkOatMethod(const DexFile::ClassDef& class_def, uint32_t class_method_index, - const OatFile::OatMethod& oat_method, const DexFile& dex_file, - uint32_t dex_method_idx, const DexFile::CodeItem* code_item, - uint32_t method_access_flags, Callback callback) { + void WalkOatMethod(const OatFile::OatMethod& oat_method, + const DexFile& dex_file, + uint32_t class_def_index, + uint32_t dex_method_index, + const DexFile::CodeItem* code_item, + uint32_t method_access_flags) { if ((method_access_flags & kAccAbstract) != 0) { // Abstract method, no code. return; } - if (oat_method.GetCodeOffset() == 0) { + const OatHeader& oat_header = oat_file_->GetOatHeader(); + const OatQuickMethodHeader* method_header = oat_method.GetOatQuickMethodHeader(); + if (method_header == nullptr || method_header->GetCodeSize() == 0) { // No code. return; } - (this->*callback)(class_def, class_method_index, oat_method, dex_file, dex_method_idx, code_item, - method_access_flags); - } - - void RegisterForDedup(const DexFile::ClassDef& class_def ATTRIBUTE_UNUSED, - uint32_t class_method_index ATTRIBUTE_UNUSED, - const OatFile::OatMethod& oat_method, - const DexFile& dex_file ATTRIBUTE_UNUSED, - uint32_t dex_method_idx ATTRIBUTE_UNUSED, - const DexFile::CodeItem* code_item ATTRIBUTE_UNUSED, - uint32_t method_access_flags ATTRIBUTE_UNUSED) { - state_[oat_method.GetCodeOffset()]++; - } - - void NormalizeState() { - for (auto& x : state_) { - if (x.second == 1) { - state_[x.first] = 0; - } - } - } - - enum class DedupState { // private - kNotDeduplicated, - kDeduplicatedFirst, - kDeduplicatedOther - }; - DedupState IsDuplicated(uint32_t offset) { - if (state_[offset] == 0) { - return DedupState::kNotDeduplicated; - } - if (state_[offset] == 1) { - return DedupState::kDeduplicatedOther; - } - state_[offset] = 1; - return DedupState::kDeduplicatedFirst; - } - - void AddSymbol(const DexFile::ClassDef& class_def ATTRIBUTE_UNUSED, - uint32_t class_method_index ATTRIBUTE_UNUSED, - const OatFile::OatMethod& oat_method, - const DexFile& dex_file, - uint32_t dex_method_idx, - const DexFile::CodeItem* code_item ATTRIBUTE_UNUSED, - uint32_t method_access_flags ATTRIBUTE_UNUSED) { - DedupState dedup = IsDuplicated(oat_method.GetCodeOffset()); - if (dedup != DedupState::kDeduplicatedOther) { - std::string pretty_name = PrettyMethod(dex_method_idx, dex_file, true); - - if (dedup == DedupState::kDeduplicatedFirst) { - pretty_name = "[Dedup]" + pretty_name; - } - - int name_offset = builder_->GetStrTab()->Write(pretty_name); - builder_->GetSymTab()->Add(name_offset, builder_->GetText(), - oat_method.GetCodeOffset() - oat_file_->GetOatHeader().GetExecutableOffset(), - true, oat_method.GetQuickCodeSize(), STB_GLOBAL, STT_FUNC); - } + debug::MethodDebugInfo info = debug::MethodDebugInfo(); + info.trampoline_name = nullptr; + info.dex_file = &dex_file; + info.class_def_index = class_def_index; + info.dex_method_index = dex_method_index; + info.access_flags = method_access_flags; + info.code_item = code_item; + info.isa = oat_header.GetInstructionSet(); + info.deduped = !seen_offsets_.insert(oat_method.GetCodeOffset()).second; + info.is_native_debuggable = oat_header.IsNativeDebuggable(); + info.is_optimized = method_header->IsOptimized(); + info.is_code_address_text_relative = true; + info.code_address = oat_method.GetCodeOffset() - oat_header.GetExecutableOffset(); + info.code_size = method_header->GetCodeSize(); + info.frame_size_in_bytes = method_header->GetFrameSizeInBytes(); + info.code_info = info.is_optimized ? method_header->GetOptimizedCodeInfoPtr() : nullptr; + info.cfi = ArrayRef<uint8_t>(); + method_debug_infos_.push_back(info); } private: - static void SkipAllFields(ClassDataItemIterator* it) { - while (it->HasNextStaticField()) { - it->Next(); - } - while (it->HasNextInstanceField()) { - it->Next(); - } - } - const OatFile* oat_file_; - std::unique_ptr<ElfBuilder<ElfTypes32> > builder_; - std::unordered_map<uint32_t, uint32_t> state_; + std::unique_ptr<ElfBuilder<ElfTypes> > builder_; + std::vector<debug::MethodDebugInfo> method_debug_infos_; + std::unordered_set<uint32_t> seen_offsets_; const std::string output_name_; }; @@ -361,6 +278,7 @@ class OatDumperOptions { const char* method_filter, bool list_classes, bool list_methods, + bool dump_header_only, const char* export_dex_location, uint32_t addr2instr) : dump_raw_mapping_table_(dump_raw_mapping_table), @@ -373,6 +291,7 @@ class OatDumperOptions { method_filter_(method_filter), list_classes_(list_classes), list_methods_(list_methods), + dump_header_only_(dump_header_only), export_dex_location_(export_dex_location), addr2instr_(addr2instr), class_loader_(nullptr) {} @@ -387,6 +306,7 @@ class OatDumperOptions { const char* const method_filter_; const bool list_classes_; const bool list_methods_; + const bool dump_header_only_; const char* const export_dex_location_; uint32_t addr2instr_; Handle<mirror::ClassLoader>* class_loader_; @@ -514,21 +434,24 @@ class OatDumper { os << StringPrintf("0x%08x\n\n", resolved_addr2instr_); } - for (size_t i = 0; i < oat_dex_files_.size(); i++) { - const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i]; - CHECK(oat_dex_file != nullptr); + if (!options_.dump_header_only_) { + for (size_t i = 0; i < oat_dex_files_.size(); i++) { + const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i]; + CHECK(oat_dex_file != nullptr); - // If file export selected skip file analysis - if (options_.export_dex_location_) { - if (!ExportDexFile(os, *oat_dex_file)) { - success = false; - } - } else { - if (!DumpOatDexFile(os, *oat_dex_file)) { - success = false; + // If file export selected skip file analysis + if (options_.export_dex_location_) { + if (!ExportDexFile(os, *oat_dex_file)) { + success = false; + } + } else { + if (!DumpOatDexFile(os, *oat_dex_file)) { + success = false; + } } } } + os << std::flush; return success; } @@ -2536,8 +2459,17 @@ static int SymbolizeOat(const char* oat_filename, std::string& output_name) { return EXIT_FAILURE; } - OatSymbolizer oat_symbolizer(oat_file, output_name); - if (!oat_symbolizer.Symbolize()) { + bool result; + // Try to produce an ELF file of the same type. This is finicky, as we have used 32-bit ELF + // files for 64-bit code in the past. + if (Is64BitInstructionSet(oat_file->GetOatHeader().GetInstructionSet())) { + OatSymbolizer<ElfTypes64> oat_symbolizer(oat_file, output_name); + result = oat_symbolizer.Symbolize(); + } else { + OatSymbolizer<ElfTypes32> oat_symbolizer(oat_file, output_name); + result = oat_symbolizer.Symbolize(); + } + if (!result) { fprintf(stderr, "Failed to symbolize\n"); return EXIT_FAILURE; } @@ -2572,6 +2504,8 @@ struct OatdumpArgs : public CmdlineArgs { dump_code_info_stack_maps_ = true; } else if (option == "--no-disassemble") { disassemble_code_ = false; + } else if (option =="--header-only") { + dump_header_only_ = true; } else if (option.starts_with("--symbolize=")) { oat_filename_ = option.substr(strlen("--symbolize=")).data(); symbolize_ = true; @@ -2655,6 +2589,9 @@ struct OatdumpArgs : public CmdlineArgs { " --no-disassemble may be used to disable disassembly.\n" " Example: --no-disassemble\n" "\n" + " --header-only may be used to print only the oat header.\n" + " Example: --header-only\n" + "\n" " --list-classes may be used to list target file classes (can be used with filters).\n" " Example: --list-classes\n" " Example: --list-classes --class-filter=com.example.foo\n" @@ -2697,6 +2634,7 @@ struct OatdumpArgs : public CmdlineArgs { bool symbolize_ = false; bool list_classes_ = false; bool list_methods_ = false; + bool dump_header_only_ = false; uint32_t addr2instr_ = 0; const char* export_dex_location_ = nullptr; }; @@ -2719,6 +2657,7 @@ struct OatdumpMain : public CmdlineMain<OatdumpArgs> { args_->method_filter_, args_->list_classes_, args_->list_methods_, + args_->dump_header_only_, args_->export_dex_location_, args_->addr2instr_)); diff --git a/runtime/Android.mk b/runtime/Android.mk index 88118501a0..84660a3195 100644 --- a/runtime/Android.mk +++ b/runtime/Android.mk @@ -318,7 +318,8 @@ LIBART_TARGET_SRC_FILES_mips := \ arch/mips/fault_handler_mips.cc LIBART_TARGET_SRC_FILES_mips64 := \ - interpreter/mterp/mterp_stub.cc \ + interpreter/mterp/mterp.cc \ + interpreter/mterp/out/mterp_mips64.S \ arch/mips64/context_mips64.cc \ arch/mips64/entrypoints_init_mips64.cc \ arch/mips64/jni_entrypoints_mips64.S \ @@ -500,7 +501,7 @@ endif ifeq ($$(art_target_or_host),target) $$(eval $$(call set-target-local-clang-vars)) $$(eval $$(call set-target-local-cflags-vars,$(2))) - LOCAL_CLANG_arm64 := true + LOCAL_CLANG_ASFLAGS_arm += -no-integrated-as LOCAL_CFLAGS_$(DEX2OAT_TARGET_ARCH) += -DART_DEFAULT_INSTRUCTION_SET_FEATURES="$(LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES)" LOCAL_CFLAGS_$(2ND_DEX2OAT_TARGET_ARCH) += -DART_DEFAULT_INSTRUCTION_SET_FEATURES="$(2ND_LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES)" else # host diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S index cfcef49084..64135d8f77 100644 --- a/runtime/arch/arm/quick_entrypoints_arm.S +++ b/runtime/arch/arm/quick_entrypoints_arm.S @@ -276,7 +276,7 @@ ENTRY \name bl \entrypoint @ (field_idx, Object*, new_val, referrer, Thread*) add sp, #16 @ release out args .cfi_adjust_cfa_offset -16 - RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME @ TODO: we can clearly save an add here \return END \name .endm @@ -812,14 +812,23 @@ END \name .macro FOUR_ARG_DOWNCALL name, entrypoint, return .extern \entrypoint ENTRY \name + sub sp, #12 @ alignment padding + .cfi_adjust_cfa_offset 12 + push {r3} @ Save r3 as is it used as a temp register in the + .cfi_adjust_cfa_offset 4 @ expansion of the SETUP_REFS_ONLY_CALLEE_SAVE_FRAME + .cfi_rel_offset r3, 0 @ macro below, which clobbers its arguments. SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r3, r12 @ save callee saves in case of GC + ldr r3, [sp, 32] @ restore r3 + .cfi_restore r3 + str r9, [sp, #-16]! @ expand the frame and pass Thread::Current - .pad #16 .cfi_adjust_cfa_offset 16 bl \entrypoint add sp, #16 @ strip the extra frame .cfi_adjust_cfa_offset -16 RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME + add sp, #16 @ pop r3 + padding + .cfi_adjust_cfa_offset -16 \return END \name .endm @@ -943,85 +952,6 @@ ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_R // Generate the allocation entrypoints for each allocator. GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR -// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB). -ENTRY art_quick_alloc_object_tlab - // Fast path tlab allocation. - // r0: type_idx/return value, r1: ArtMethod*, r9: Thread::Current - // r2, r3, r12: free. -#if defined(USE_READ_BARRIER) - eor r0, r0, r0 // Read barrier not supported here. - sub r0, r0, #1 // Return -1. - bx lr -#endif - ldr r2, [r1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_32] // Load dex cache resolved types array - // Load the class (r2) - ldr r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT] - cbz r2, .Lart_quick_alloc_object_tlab_slow_path // Check null class - // Check class status. - ldr r3, [r2, #MIRROR_CLASS_STATUS_OFFSET] - cmp r3, #MIRROR_CLASS_STATUS_INITIALIZED - bne .Lart_quick_alloc_object_tlab_slow_path - // Add a fake dependence from the - // following access flag and size - // loads to the status load. - // This is to prevent those loads - // from being reordered above the - // status load and reading wrong - // values (an alternative is to use - // a load-acquire for the status). - eor r3, r3, r3 - add r2, r2, r3 - // Check access flags has - // kAccClassIsFinalizable. - ldr r3, [r2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET] - tst r3, #ACCESS_FLAGS_CLASS_IS_FINALIZABLE - bne .Lart_quick_alloc_object_tlab_slow_path - // Load thread_local_pos (r12) and - // thread_local_end (r3) with ldrd. - // Check constraints for ldrd. -#if !((THREAD_LOCAL_POS_OFFSET + 4 == THREAD_LOCAL_END_OFFSET) && (THREAD_LOCAL_POS_OFFSET % 8 == 0)) -#error "Thread::thread_local_pos/end must be consecutive and are 8 byte aligned for performance" -#endif - ldrd r12, r3, [r9, #THREAD_LOCAL_POS_OFFSET] - sub r12, r3, r12 // Compute the remaining buf size. - ldr r3, [r2, #MIRROR_CLASS_OBJECT_SIZE_OFFSET] // Load the object size (r3). - cmp r3, r12 // Check if it fits. OK to do this - // before rounding up the object size - // assuming the buf size alignment. - bhi .Lart_quick_alloc_object_tlab_slow_path - // "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1. - // Round up the object size by the - // object alignment. (addr + 7) & ~7. - add r3, r3, #OBJECT_ALIGNMENT_MASK - and r3, r3, #OBJECT_ALIGNMENT_MASK_TOGGLED - // Reload old thread_local_pos (r0) - // for the return value. - ldr r0, [r9, #THREAD_LOCAL_POS_OFFSET] - add r1, r0, r3 - str r1, [r9, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos. - ldr r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects. - add r1, r1, #1 - str r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET] - POISON_HEAP_REF r2 - str r2, [r0, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer. - // Fence. This is "ish" not "ishst" so - // that the code after this allocation - // site will see the right values in - // the fields of the class. - // Alternatively we could use "ishst" - // if we use load-acquire for the - // class status load.) - dmb ish - bx lr -.Lart_quick_alloc_object_tlab_slow_path: - SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 // Save callee saves in case of GC. - mov r2, r9 // Pass Thread::Current. - bl artAllocObjectFromCodeTLAB // (uint32_t type_idx, Method* method, Thread*) - RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME - RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER -END art_quick_alloc_object_tlab - - // A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc). ENTRY art_quick_alloc_object_rosalloc // Fast path rosalloc allocation. @@ -1125,6 +1055,127 @@ ENTRY art_quick_alloc_object_rosalloc RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER END art_quick_alloc_object_rosalloc +// The common fast path code for art_quick_alloc_object_tlab and art_quick_alloc_object_region_tlab. +// +// r0: type_idx/return value, r1: ArtMethod*, r2: class, r9: Thread::Current, r3, r12: free. +// Need to preserve r0 and r1 to the slow path. +.macro ALLOC_OBJECT_TLAB_FAST_PATH slowPathLabel + cbz r2, \slowPathLabel // Check null class + // Check class status. + ldr r3, [r2, #MIRROR_CLASS_STATUS_OFFSET] + cmp r3, #MIRROR_CLASS_STATUS_INITIALIZED + bne \slowPathLabel + // Add a fake dependence from the + // following access flag and size + // loads to the status load. + // This is to prevent those loads + // from being reordered above the + // status load and reading wrong + // values (an alternative is to use + // a load-acquire for the status). + eor r3, r3, r3 + add r2, r2, r3 + // Check access flags has + // kAccClassIsFinalizable. + ldr r3, [r2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET] + tst r3, #ACCESS_FLAGS_CLASS_IS_FINALIZABLE + bne \slowPathLabel + // Load thread_local_pos (r12) and + // thread_local_end (r3) with ldrd. + // Check constraints for ldrd. +#if !((THREAD_LOCAL_POS_OFFSET + 4 == THREAD_LOCAL_END_OFFSET) && (THREAD_LOCAL_POS_OFFSET % 8 == 0)) +#error "Thread::thread_local_pos/end must be consecutive and are 8 byte aligned for performance" +#endif + ldrd r12, r3, [r9, #THREAD_LOCAL_POS_OFFSET] + sub r12, r3, r12 // Compute the remaining buf size. + ldr r3, [r2, #MIRROR_CLASS_OBJECT_SIZE_OFFSET] // Load the object size (r3). + cmp r3, r12 // Check if it fits. OK to do this + // before rounding up the object size + // assuming the buf size alignment. + bhi \slowPathLabel + // "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1. + // Round up the object size by the + // object alignment. (addr + 7) & ~7. + add r3, r3, #OBJECT_ALIGNMENT_MASK + and r3, r3, #OBJECT_ALIGNMENT_MASK_TOGGLED + // Reload old thread_local_pos (r0) + // for the return value. + ldr r0, [r9, #THREAD_LOCAL_POS_OFFSET] + add r1, r0, r3 + str r1, [r9, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos. + ldr r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects. + add r1, r1, #1 + str r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET] + POISON_HEAP_REF r2 + str r2, [r0, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer. + // Fence. This is "ish" not "ishst" so + // that the code after this allocation + // site will see the right values in + // the fields of the class. + // Alternatively we could use "ishst" + // if we use load-acquire for the + // class status load.) + dmb ish + bx lr +.endm + +// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB). +ENTRY art_quick_alloc_object_tlab + // Fast path tlab allocation. + // r0: type_idx/return value, r1: ArtMethod*, r9: Thread::Current + // r2, r3, r12: free. +#if defined(USE_READ_BARRIER) + eor r0, r0, r0 // Read barrier not supported here. + sub r0, r0, #1 // Return -1. + bx lr +#endif + ldr r2, [r1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_32] // Load dex cache resolved types array + // Load the class (r2) + ldr r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT] + ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path +.Lart_quick_alloc_object_tlab_slow_path: + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 // Save callee saves in case of GC. + mov r2, r9 // Pass Thread::Current. + bl artAllocObjectFromCodeTLAB // (uint32_t type_idx, Method* method, Thread*) + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME + RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER +END art_quick_alloc_object_tlab + +// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB) +ENTRY art_quick_alloc_object_region_tlab + // Fast path tlab allocation. + // r0: type_idx/return value, r1: ArtMethod*, r9: Thread::Current, r2, r3, r12: free. +#if !defined(USE_READ_BARRIER) + eor r0, r0, r0 // Read barrier must be enabled here. + sub r0, r0, #1 // Return -1. + bx lr +#endif + ldr r2, [r1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_32] // Load dex cache resolved types array + // Load the class (r2) + ldr r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT] + // Read barrier for class load. + ldr r3, [r9, #THREAD_IS_GC_MARKING_OFFSET] + cbnz r3, .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path +.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit: + ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_region_tlab_slow_path +.Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path: + // The read barrier slow path. Mark + // the class. + push {r0, r1, r3, lr} // Save registers. r3 is pushed only + // to align sp by 16 bytes. + mov r0, r2 // Pass the class as the first param. + bl artReadBarrierMark + mov r2, r0 // Get the (marked) class back. + pop {r0, r1, r3, lr} + b .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit +.Lart_quick_alloc_object_region_tlab_slow_path: + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 // Save callee saves in case of GC. + mov r2, r9 // Pass Thread::Current. + bl artAllocObjectFromCodeRegionTLAB // (uint32_t type_idx, Method* method, Thread*) + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME + RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER +END art_quick_alloc_object_region_tlab + /* * Called by managed code when the value in rSUSPEND has been decremented to 0. */ diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S index e8480087a7..e4c255809b 100644 --- a/runtime/arch/arm64/quick_entrypoints_arm64.S +++ b/runtime/arch/arm64/quick_entrypoints_arm64.S @@ -1537,7 +1537,7 @@ ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_R // Generate the allocation entrypoints for each allocator. GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR -GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB) + // A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc). ENTRY art_quick_alloc_object_rosalloc // Fast path rosalloc allocation. @@ -1638,6 +1638,9 @@ ENTRY art_quick_alloc_object_rosalloc RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER END art_quick_alloc_object_rosalloc +GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB) +GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB) + /* * Called by managed code when the thread has been asked to suspend. */ diff --git a/runtime/arch/mips/asm_support_mips.S b/runtime/arch/mips/asm_support_mips.S index 51e224cbf3..801f708ad3 100644 --- a/runtime/arch/mips/asm_support_mips.S +++ b/runtime/arch/mips/asm_support_mips.S @@ -129,4 +129,43 @@ #endif // USE_HEAP_POISONING .endm +// Based on contents of creg select the minimum integer +// At the end of the macro the original value of creg is lost +.macro MINint dreg,rreg,sreg,creg + .set push + .set noat +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + .ifc \dreg, \rreg + selnez \dreg, \rreg, \creg + seleqz \creg, \sreg, \creg + .else + seleqz \dreg, \sreg, \creg + selnez \creg, \rreg, \creg + .endif + or \dreg, \dreg, \creg +#else + movn \dreg, \rreg, \creg + movz \dreg, \sreg, \creg +#endif + .set pop +.endm + +// Find minimum of two signed registers +.macro MINs dreg,rreg,sreg + .set push + .set noat + slt $at, \rreg, \sreg + MINint \dreg, \rreg, \sreg, $at + .set pop +.endm + +// Find minimum of two unsigned registers +.macro MINu dreg,rreg,sreg + .set push + .set noat + sltu $at, \rreg, \sreg + MINint \dreg, \rreg, \sreg, $at + .set pop +.endm + #endif // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_ diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S index 699ab3e65a..dbf0abb558 100644 --- a/runtime/arch/mips/quick_entrypoints_mips.S +++ b/runtime/arch/mips/quick_entrypoints_mips.S @@ -1313,7 +1313,7 @@ END \name // Generate the allocation entrypoints for each allocator. GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR -GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB) + // A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc). ENTRY art_quick_alloc_object_rosalloc @@ -1416,11 +1416,14 @@ ENTRY art_quick_alloc_object_rosalloc SETUP_REFS_ONLY_CALLEE_SAVE_FRAME jal artAllocObjectFromCodeRosAlloc - move $a2 ,$s1 # Pass self as argument. + move $a2, $s1 # Pass self as argument. RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER END art_quick_alloc_object_rosalloc +GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB) +GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB) + /* * Entry from managed code to resolve a string, this stub will allocate a String and deliver an * exception on error. On success the String is returned. A0 holds the string index. The fast @@ -1744,5 +1747,74 @@ ENTRY_NO_GP art_quick_ushr_long nop END art_quick_ushr_long -UNIMPLEMENTED art_quick_indexof -UNIMPLEMENTED art_quick_string_compareto +/* java.lang.String.indexOf(int ch, int fromIndex=0) */ +ENTRY_NO_GP art_quick_indexof +/* $a0 holds address of "this" */ +/* $a1 holds "ch" */ +/* $a2 holds "fromIndex" */ + lw $t0, MIRROR_STRING_COUNT_OFFSET($a0) # this.length() + slt $at, $a2, $zero # if fromIndex < 0 +#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6) + seleqz $a2, $a2, $at # fromIndex = 0; +#else + movn $a2, $zero, $at # fromIndex = 0; +#endif + subu $t0, $t0, $a2 # this.length() - fromIndex + blez $t0, 6f # if this.length()-fromIndex <= 0 + li $v0, -1 # return -1; + + sll $v0, $a2, 1 # $a0 += $a2 * 2 + addu $a0, $a0, $v0 # " " " " " + move $v0, $a2 # Set i to fromIndex. + +1: + lhu $t3, MIRROR_STRING_VALUE_OFFSET($a0) # if this.charAt(i) == ch + beq $t3, $a1, 6f # return i; + addu $a0, $a0, 2 # i++ + subu $t0, $t0, 1 # this.length() - i + bnez $t0, 1b # while this.length() - i > 0 + addu $v0, $v0, 1 # i++ + + li $v0, -1 # if this.length() - i <= 0 + # return -1; + +6: + j $ra + nop +END art_quick_indexof + + .set push + .set noat +/* java.lang.String.compareTo(String anotherString) */ +ENTRY_NO_GP art_quick_string_compareto +/* $a0 holds address of "this" */ +/* $a1 holds address of "anotherString" */ + beq $a0, $a1, 9f # this and anotherString are the same object + move $v0, $zero + + lw $a2, MIRROR_STRING_COUNT_OFFSET($a0) # this.length() + lw $a3, MIRROR_STRING_COUNT_OFFSET($a1) # anotherString.length() + MINu $t2, $a2, $a3 +# $t2 now holds min(this.length(),anotherString.length()) + + beqz $t2, 9f # while min(this.length(),anotherString.length())-i != 0 + subu $v0, $a2, $a3 # if $t2==0 return + # (this.length() - anotherString.length()) +1: + lhu $t0, MIRROR_STRING_VALUE_OFFSET($a0) # while this.charAt(i) == anotherString.charAt(i) + lhu $t1, MIRROR_STRING_VALUE_OFFSET($a1) + bne $t0, $t1, 9f # if this.charAt(i) != anotherString.charAt(i) + subu $v0, $t0, $t1 # return (this.charAt(i) - anotherString.charAt(i)) + addiu $a0, $a0, 2 # point at this.charAt(i++) + subu $t2, $t2, 1 # new value of + # min(this.length(),anotherString.length())-i + bnez $t2, 1b + addiu $a1, $a1, 2 # point at anotherString.charAt(i++) + subu $v0, $a2, $a3 + +9: + j $ra + nop +END art_quick_string_compareto + + .set pop diff --git a/runtime/arch/mips64/asm_support_mips64.S b/runtime/arch/mips64/asm_support_mips64.S index b859c708ba..786e86043e 100644 --- a/runtime/arch/mips64/asm_support_mips64.S +++ b/runtime/arch/mips64/asm_support_mips64.S @@ -83,4 +83,38 @@ #endif // USE_HEAP_POISONING .endm +// Based on contents of creg select the minimum integer +// At the end of the macro the original value of creg is lost +.macro MINint dreg,rreg,sreg,creg + .set push + .set noat + .ifc \dreg, \rreg + selnez \dreg, \rreg, \creg + seleqz \creg, \sreg, \creg + .else + seleqz \dreg, \sreg, \creg + selnez \creg, \rreg, \creg + .endif + or \dreg, \dreg, \creg + .set pop +.endm + +// Find minimum of two signed registers +.macro MINs dreg,rreg,sreg + .set push + .set noat + slt $at, \rreg, \sreg + MINint \dreg, \rreg, \sreg, $at + .set pop +.endm + +// Find minimum of two unsigned registers +.macro MINu dreg,rreg,sreg + .set push + .set noat + sltu $at, \rreg, \sreg + MINint \dreg, \rreg, \sreg, $at + .set pop +.endm + #endif // ART_RUNTIME_ARCH_MIPS64_ASM_SUPPORT_MIPS64_S_ diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S index d264c9baaf..f1e605ac4a 100644 --- a/runtime/arch/mips64/quick_entrypoints_mips64.S +++ b/runtime/arch/mips64/quick_entrypoints_mips64.S @@ -1367,7 +1367,7 @@ END \name // Generate the allocation entrypoints for each allocator. GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR -GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB) + // A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc). ENTRY art_quick_alloc_object_rosalloc @@ -1467,6 +1467,9 @@ ENTRY art_quick_alloc_object_rosalloc END art_quick_alloc_object_rosalloc +GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB) +GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB) + /* * Entry from managed code to resolve a string, this stub will allocate a String and deliver an * exception on error. On success the String is returned. A0 holds the string index. The fast @@ -1725,10 +1728,8 @@ ENTRY_NO_GP art_quick_string_compareto lw $a2,MIRROR_STRING_COUNT_OFFSET($a0) # this.length() lw $a3,MIRROR_STRING_COUNT_OFFSET($a1) # anotherString.length() - sltu $at,$a2,$a3 - seleqz $t2,$a3,$at - selnez $at,$a2,$at - or $t2,$t2,$at # $t2 now holds min(this.length(),anotherString.length()) + MINu $t2, $a2, $a3 +# $t2 now holds min(this.length(),anotherString.length()) beqz $t2,9f # while min(this.length(),anotherString.length())-i != 0 subu $v0,$a2,$a3 # if $t2==0 return @@ -1753,16 +1754,18 @@ END art_quick_string_compareto /* java.lang.String.indexOf(int ch, int fromIndex=0) */ ENTRY_NO_GP art_quick_indexof /* $a0 holds address of "this" */ -/* $a1 holds address of "ch" */ -/* $a2 holds address of "fromIndex" */ +/* $a1 holds "ch" */ +/* $a2 holds "fromIndex" */ lw $t0,MIRROR_STRING_COUNT_OFFSET($a0) # this.length() - subu $t0,$t0,$a2 # this.length() - offset - blez $t0,6f # if this.length()-offset <= 0 + slt $at, $a2, $zero # if fromIndex < 0 + seleqz $a2, $a2, $at # fromIndex = 0; + subu $t0,$t0,$a2 # this.length() - fromIndex + blez $t0,6f # if this.length()-fromIndex <= 0 li $v0,-1 # return -1; sll $v0,$a2,1 # $a0 += $a2 * 2 daddu $a0,$a0,$v0 # " " " " " - move $v0,$a2 # Set i to offset. + move $v0,$a2 # Set i to fromIndex. 1: lhu $t3,MIRROR_STRING_VALUE_OFFSET($a0) # if this.charAt(i) == ch diff --git a/runtime/arch/quick_alloc_entrypoints.S b/runtime/arch/quick_alloc_entrypoints.S index fbacdbc930..290769b365 100644 --- a/runtime/arch/quick_alloc_entrypoints.S +++ b/runtime/arch/quick_alloc_entrypoints.S @@ -219,7 +219,8 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_instrumented, RegionI GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_instrumented, RegionInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_instrumented, RegionInstrumented) -GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB) +// This is to be separately defined for each architecture to allow a hand-written assembly fast path. +// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB) diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc index d5807e27b5..4236c287de 100644 --- a/runtime/arch/stub_test.cc +++ b/runtime/arch/stub_test.cc @@ -1205,7 +1205,7 @@ TEST_F(StubTest, AllocObjectArray) { TEST_F(StubTest, StringCompareTo) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \ - (defined(__mips__) && defined(__LP64__)) || (defined(__x86_64__) && !defined(__APPLE__)) + defined(__mips__) || (defined(__x86_64__) && !defined(__APPLE__)) // TODO: Check the "Unresolved" allocation stubs Thread* self = Thread::Current(); @@ -2054,7 +2054,7 @@ TEST_F(StubTest, IMT) { } TEST_F(StubTest, StringIndexOf) { -#if defined(__arm__) || defined(__aarch64__) || (defined(__mips__) && defined(__LP64__)) +#if defined(__arm__) || defined(__aarch64__) || defined(__mips__) Thread* self = Thread::Current(); ScopedObjectAccess soa(self); // garbage is created during ClassLinker::Init diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S index 77b8e87c99..3e47209afb 100644 --- a/runtime/arch/x86/asm_support_x86.S +++ b/runtime/arch/x86/asm_support_x86.S @@ -142,6 +142,10 @@ MACRO1(POP, reg) CFI_RESTORE(REG_VAR(reg)) END_MACRO +MACRO1(CFI_RESTORE_REG, reg) + CFI_RESTORE(REG_VAR(reg)) +END_MACRO + #define UNREACHABLE int3 MACRO1(UNIMPLEMENTED,name) diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S index fbee5d7724..125570d5bb 100644 --- a/runtime/arch/x86/quick_entrypoints_x86.S +++ b/runtime/arch/x86/quick_entrypoints_x86.S @@ -686,7 +686,15 @@ END_MACRO MACRO3(FOUR_ARG_DOWNCALL, c_name, cxx_name, return_macro) DEFINE_FUNCTION VAR(c_name) + subl MACRO_LITERAL(12), %esp // alignment padding + CFI_ADJUST_CFA_OFFSET(12) + PUSH ebx // Save ebx as the expansion of the + // SETUP_REFS_ONLY_CALLEE_SAVE_FRAME + // macro below clobbers it. SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC + movl 28(%esp), %ebx // restore ebx + CFI_RESTORE_REG ebx + // Outgoing argument set up subl MACRO_LITERAL(12), %esp // alignment padding CFI_ADJUST_CFA_OFFSET(12) @@ -700,6 +708,8 @@ MACRO3(FOUR_ARG_DOWNCALL, c_name, cxx_name, return_macro) addl MACRO_LITERAL(32), %esp // pop arguments CFI_ADJUST_CFA_OFFSET(-32) RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + addl MACRO_LITERAL(16), %esp // pop ebx + padding + CFI_ADJUST_CFA_OFFSET(-16) CALL_MACRO(return_macro) // return or deliver exception END_FUNCTION VAR(c_name) END_MACRO @@ -887,8 +897,8 @@ DEFINE_FUNCTION art_quick_alloc_object_rosalloc RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception END_FUNCTION art_quick_alloc_object_rosalloc - GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB) +GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB) ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S index 69caec88f0..dee8d3c6f3 100644 --- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S +++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S @@ -809,6 +809,7 @@ END_MACRO // Generate the allocation entrypoints for each allocator. GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR + // A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc). DEFINE_FUNCTION art_quick_alloc_object_rosalloc // Fast path rosalloc allocation. @@ -943,6 +944,8 @@ DEFINE_FUNCTION art_quick_alloc_object_tlab RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception END_FUNCTION art_quick_alloc_object_tlab +GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB) + ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER diff --git a/runtime/art_method.cc b/runtime/art_method.cc index a60f31e52e..f97ad51568 100644 --- a/runtime/art_method.cc +++ b/runtime/art_method.cc @@ -377,7 +377,7 @@ const OatQuickMethodHeader* ArtMethod::GetOatQuickMethodHeader(uintptr_t pc) { Runtime* runtime = Runtime::Current(); const void* existing_entry_point = GetEntryPointFromQuickCompiledCode(); - DCHECK(existing_entry_point != nullptr); + CHECK(existing_entry_point != nullptr) << PrettyMethod(this) << "@" << this; ClassLinker* class_linker = runtime->GetClassLinker(); if (class_linker->IsQuickGenericJniStub(existing_entry_point)) { diff --git a/runtime/asm_support.h b/runtime/asm_support.h index 879364e6d2..d5f0dffc38 100644 --- a/runtime/asm_support.h +++ b/runtime/asm_support.h @@ -101,6 +101,11 @@ ADD_TEST_EQ(THREAD_FLAGS_OFFSET, ADD_TEST_EQ(THREAD_ID_OFFSET, art::Thread::ThinLockIdOffset<__SIZEOF_POINTER__>().Int32Value()) +// Offset of field Thread::tls32_.is_gc_marking. +#define THREAD_IS_GC_MARKING_OFFSET 52 +ADD_TEST_EQ(THREAD_IS_GC_MARKING_OFFSET, + art::Thread::IsGcMarkingOffset<__SIZEOF_POINTER__>().Int32Value()) + // Offset of field Thread::tlsPtr_.card_table. #define THREAD_CARD_TABLE_OFFSET 128 ADD_TEST_EQ(THREAD_CARD_TABLE_OFFSET, diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc index 44af3f75b9..f871543862 100644 --- a/runtime/base/arena_allocator.cc +++ b/runtime/base/arena_allocator.cc @@ -23,6 +23,7 @@ #include "mem_map.h" #include "mutex.h" #include "thread-inl.h" +#include "systrace.h" namespace art { @@ -261,6 +262,7 @@ Arena* ArenaPool::AllocArena(size_t size) { void ArenaPool::TrimMaps() { if (!use_malloc_) { + ScopedTrace trace(__PRETTY_FUNCTION__); // Doesn't work for malloc. MutexLock lock(Thread::Current(), lock_); for (auto* arena = free_arenas_; arena != nullptr; arena = arena->next_) { diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc index 6972b3ef3f..620bf9c8b7 100644 --- a/runtime/base/mutex.cc +++ b/runtime/base/mutex.cc @@ -19,12 +19,10 @@ #include <errno.h> #include <sys/time.h> -#define ATRACE_TAG ATRACE_TAG_DALVIK -#include "cutils/trace.h" - #include "atomic.h" #include "base/logging.h" #include "base/time_utils.h" +#include "base/systrace.h" #include "base/value_object.h" #include "mutex-inl.h" #include "runtime.h" diff --git a/runtime/base/systrace.h b/runtime/base/systrace.h new file mode 100644 index 0000000000..3901f96b45 --- /dev/null +++ b/runtime/base/systrace.h @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_BASE_SYSTRACE_H_ +#define ART_RUNTIME_BASE_SYSTRACE_H_ + +#define ATRACE_TAG ATRACE_TAG_DALVIK +#include <cutils/trace.h> +#include <string> +#include <utils/Trace.h> + +namespace art { + +class ScopedTrace { + public: + explicit ScopedTrace(const char* name) { + ATRACE_BEGIN(name); + } + + explicit ScopedTrace(const std::string& name) : ScopedTrace(name.c_str()) {} + + ~ScopedTrace() { + ATRACE_END(); + } +}; + +} // namespace art + +#endif // ART_RUNTIME_BASE_SYSTRACE_H_ diff --git a/runtime/base/timing_logger.cc b/runtime/base/timing_logger.cc index 1942e1dc1b..9a0e0d02a6 100644 --- a/runtime/base/timing_logger.cc +++ b/runtime/base/timing_logger.cc @@ -15,15 +15,14 @@ */ -#define ATRACE_TAG ATRACE_TAG_DALVIK #include <stdio.h> -#include <cutils/trace.h> #include "timing_logger.h" #include "base/logging.h" #include "base/stl_util.h" #include "base/histogram-inl.h" +#include "base/systrace.h" #include "base/time_utils.h" #include "thread-inl.h" diff --git a/runtime/base/unix_file/fd_file.cc b/runtime/base/unix_file/fd_file.cc index 4672948f31..e4097dd3de 100644 --- a/runtime/base/unix_file/fd_file.cc +++ b/runtime/base/unix_file/fd_file.cc @@ -234,21 +234,34 @@ bool FdFile::PreadFully(void* buffer, size_t byte_count, size_t offset) { return ReadFullyGeneric<pread>(fd_, buffer, byte_count, offset); } -bool FdFile::WriteFully(const void* buffer, size_t byte_count) { +template <bool kUseOffset> +bool FdFile::WriteFullyGeneric(const void* buffer, size_t byte_count, size_t offset) { DCHECK(!read_only_mode_); - const char* ptr = static_cast<const char*>(buffer); moveTo(GuardState::kBase, GuardState::kClosed, "Writing into closed file."); + DCHECK(kUseOffset || offset == 0u); + const char* ptr = static_cast<const char*>(buffer); while (byte_count > 0) { - ssize_t bytes_written = TEMP_FAILURE_RETRY(write(fd_, ptr, byte_count)); + ssize_t bytes_written = kUseOffset + ? TEMP_FAILURE_RETRY(pwrite(fd_, ptr, byte_count, offset)) + : TEMP_FAILURE_RETRY(write(fd_, ptr, byte_count)); if (bytes_written == -1) { return false; } byte_count -= bytes_written; // Reduce the number of remaining bytes. ptr += bytes_written; // Move the buffer forward. + offset += static_cast<size_t>(bytes_written); } return true; } +bool FdFile::PwriteFully(const void* buffer, size_t byte_count, size_t offset) { + return WriteFullyGeneric<true>(buffer, byte_count, offset); +} + +bool FdFile::WriteFully(const void* buffer, size_t byte_count) { + return WriteFullyGeneric<false>(buffer, byte_count, 0u); +} + bool FdFile::Copy(FdFile* input_file, int64_t offset, int64_t size) { DCHECK(!read_only_mode_); off_t off = static_cast<off_t>(offset); diff --git a/runtime/base/unix_file/fd_file.h b/runtime/base/unix_file/fd_file.h index 8040afe9b7..16cd44f4ef 100644 --- a/runtime/base/unix_file/fd_file.h +++ b/runtime/base/unix_file/fd_file.h @@ -79,6 +79,7 @@ class FdFile : public RandomAccessFile { bool ReadFully(void* buffer, size_t byte_count) WARN_UNUSED; bool PreadFully(void* buffer, size_t byte_count, size_t offset) WARN_UNUSED; bool WriteFully(const void* buffer, size_t byte_count) WARN_UNUSED; + bool PwriteFully(const void* buffer, size_t byte_count, size_t offset) WARN_UNUSED; // Copy data from another file. bool Copy(FdFile* input_file, int64_t offset, int64_t size); @@ -119,6 +120,9 @@ class FdFile : public RandomAccessFile { GuardState guard_state_; private: + template <bool kUseOffset> + bool WriteFullyGeneric(const void* buffer, size_t byte_count, size_t offset); + int fd_; std::string file_path_; bool auto_close_; diff --git a/runtime/base/unix_file/fd_file_test.cc b/runtime/base/unix_file/fd_file_test.cc index ecf607c892..9bc87e5bb9 100644 --- a/runtime/base/unix_file/fd_file_test.cc +++ b/runtime/base/unix_file/fd_file_test.cc @@ -110,6 +110,34 @@ TEST_F(FdFileTest, ReadFullyWithOffset) { ASSERT_EQ(file.Close(), 0); } +TEST_F(FdFileTest, ReadWriteFullyWithOffset) { + // New scratch file, zero-length. + art::ScratchFile tmp; + FdFile file; + ASSERT_TRUE(file.Open(tmp.GetFilename(), O_RDWR)); + EXPECT_GE(file.Fd(), 0); + EXPECT_TRUE(file.IsOpened()); + + const char* test_string = "This is a test string"; + size_t length = strlen(test_string) + 1; + const size_t offset = 12; + std::unique_ptr<char[]> offset_read_string(new char[length]); + std::unique_ptr<char[]> read_string(new char[length]); + + // Write scratch data to file that we can read back into. + EXPECT_TRUE(file.PwriteFully(test_string, length, offset)); + ASSERT_EQ(file.Flush(), 0); + + // Test reading both the offsets. + EXPECT_TRUE(file.PreadFully(&offset_read_string[0], length, offset)); + EXPECT_STREQ(test_string, &offset_read_string[0]); + + EXPECT_TRUE(file.PreadFully(&read_string[0], length, 0u)); + EXPECT_NE(memcmp(&read_string[0], test_string, length), 0); + + ASSERT_EQ(file.Close(), 0); +} + TEST_F(FdFileTest, Copy) { art::ScratchFile src_tmp; FdFile src; diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index b5e6532b6e..d51a1f7ecc 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -36,6 +36,7 @@ #include "base/scoped_arena_containers.h" #include "base/scoped_flock.h" #include "base/stl_util.h" +#include "base/systrace.h" #include "base/time_utils.h" #include "base/unix_file/fd_file.h" #include "base/value_object.h" @@ -1198,7 +1199,7 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches( gc::space::ImageSpace* space, Handle<mirror::ClassLoader> class_loader, Handle<mirror::ObjectArray<mirror::DexCache>> dex_caches, - bool added_class_table, + ClassTable::ClassSet* new_class_set, bool* out_forward_dex_cache_array, std::string* out_error_msg) { DCHECK(out_forward_dex_cache_array != nullptr); @@ -1206,207 +1207,228 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches( Thread* const self = Thread::Current(); gc::Heap* const heap = Runtime::Current()->GetHeap(); const ImageHeader& header = space->GetImageHeader(); - // Add image classes into the class table for the class loader, and fixup the dex caches and - // class loader fields. - WriterMutexLock mu(self, *Locks::classlinker_classes_lock_); - ClassTable* table = InsertClassTableForClassLoader(class_loader.Get()); - // Dex cache array fixup is all or nothing, we must reject app images that have mixed since we - // rely on clobering the dex cache arrays in the image to forward to bss. - size_t num_dex_caches_with_bss_arrays = 0; - const size_t num_dex_caches = dex_caches->GetLength(); - for (size_t i = 0; i < num_dex_caches; i++) { - mirror::DexCache* const dex_cache = dex_caches->Get(i); - const DexFile* const dex_file = dex_cache->GetDexFile(); - // If the oat file expects the dex cache arrays to be in the BSS, then allocate there and - // copy over the arrays. - DCHECK(dex_file != nullptr); - const size_t num_strings = dex_file->NumStringIds(); - const size_t num_types = dex_file->NumTypeIds(); - const size_t num_methods = dex_file->NumMethodIds(); - const size_t num_fields = dex_file->NumFieldIds(); - CHECK_EQ(num_strings, dex_cache->NumStrings()); - CHECK_EQ(num_types, dex_cache->NumResolvedTypes()); - CHECK_EQ(num_methods, dex_cache->NumResolvedMethods()); - CHECK_EQ(num_fields, dex_cache->NumResolvedFields()); - const OatFile::OatDexFile* oat_dex_file = dex_file->GetOatDexFile(); - if (oat_dex_file != nullptr && oat_dex_file->GetDexCacheArrays() != nullptr) { - ++num_dex_caches_with_bss_arrays; - DexCacheArraysLayout layout(image_pointer_size_, dex_file); - uint8_t* const raw_arrays = oat_dex_file->GetDexCacheArrays(); - // The space is not yet visible to the GC, we can avoid the read barriers and use std::copy_n. - if (num_strings != 0u) { - GcRoot<mirror::String>* const image_resolved_strings = dex_cache->GetStrings(); - GcRoot<mirror::String>* const strings = - reinterpret_cast<GcRoot<mirror::String>*>(raw_arrays + layout.StringsOffset()); - for (size_t j = 0; kIsDebugBuild && j < num_strings; ++j) { - DCHECK(strings[j].IsNull()); + { + // Add image classes into the class table for the class loader, and fixup the dex caches and + // class loader fields. + WriterMutexLock mu(self, *Locks::classlinker_classes_lock_); + ClassTable* table = InsertClassTableForClassLoader(class_loader.Get()); + // Dex cache array fixup is all or nothing, we must reject app images that have mixed since we + // rely on clobering the dex cache arrays in the image to forward to bss. + size_t num_dex_caches_with_bss_arrays = 0; + const size_t num_dex_caches = dex_caches->GetLength(); + for (size_t i = 0; i < num_dex_caches; i++) { + mirror::DexCache* const dex_cache = dex_caches->Get(i); + const DexFile* const dex_file = dex_cache->GetDexFile(); + const OatFile::OatDexFile* oat_dex_file = dex_file->GetOatDexFile(); + if (oat_dex_file != nullptr && oat_dex_file->GetDexCacheArrays() != nullptr) { + ++num_dex_caches_with_bss_arrays; + } + } + *out_forward_dex_cache_array = num_dex_caches_with_bss_arrays != 0; + if (*out_forward_dex_cache_array) { + if (num_dex_caches_with_bss_arrays != num_dex_caches) { + // Reject application image since we cannot forward only some of the dex cache arrays. + // TODO: We could get around this by having a dedicated forwarding slot. It should be an + // uncommon case. + *out_error_msg = StringPrintf("Dex caches in bss does not match total: %zu vs %zu", + num_dex_caches_with_bss_arrays, + num_dex_caches); + return false; + } + } + // Only add the classes to the class loader after the points where we can return false. + for (size_t i = 0; i < num_dex_caches; i++) { + mirror::DexCache* const dex_cache = dex_caches->Get(i); + const DexFile* const dex_file = dex_cache->GetDexFile(); + const OatFile::OatDexFile* oat_dex_file = dex_file->GetOatDexFile(); + if (oat_dex_file != nullptr && oat_dex_file->GetDexCacheArrays() != nullptr) { + // If the oat file expects the dex cache arrays to be in the BSS, then allocate there and + // copy over the arrays. + DCHECK(dex_file != nullptr); + const size_t num_strings = dex_file->NumStringIds(); + const size_t num_types = dex_file->NumTypeIds(); + const size_t num_methods = dex_file->NumMethodIds(); + const size_t num_fields = dex_file->NumFieldIds(); + CHECK_EQ(num_strings, dex_cache->NumStrings()); + CHECK_EQ(num_types, dex_cache->NumResolvedTypes()); + CHECK_EQ(num_methods, dex_cache->NumResolvedMethods()); + CHECK_EQ(num_fields, dex_cache->NumResolvedFields()); + DexCacheArraysLayout layout(image_pointer_size_, dex_file); + uint8_t* const raw_arrays = oat_dex_file->GetDexCacheArrays(); + // The space is not yet visible to the GC, we can avoid the read barriers and use + // std::copy_n. + if (num_strings != 0u) { + GcRoot<mirror::String>* const image_resolved_strings = dex_cache->GetStrings(); + GcRoot<mirror::String>* const strings = + reinterpret_cast<GcRoot<mirror::String>*>(raw_arrays + layout.StringsOffset()); + for (size_t j = 0; kIsDebugBuild && j < num_strings; ++j) { + DCHECK(strings[j].IsNull()); + } + std::copy_n(image_resolved_strings, num_strings, strings); + dex_cache->SetStrings(strings); } - std::copy_n(image_resolved_strings, num_strings, strings); - dex_cache->SetStrings(strings); - } - if (num_types != 0u) { - GcRoot<mirror::Class>* const image_resolved_types = dex_cache->GetResolvedTypes(); - GcRoot<mirror::Class>* const types = - reinterpret_cast<GcRoot<mirror::Class>*>(raw_arrays + layout.TypesOffset()); - for (size_t j = 0; kIsDebugBuild && j < num_types; ++j) { - DCHECK(types[j].IsNull()); + if (num_types != 0u) { + GcRoot<mirror::Class>* const image_resolved_types = dex_cache->GetResolvedTypes(); + GcRoot<mirror::Class>* const types = + reinterpret_cast<GcRoot<mirror::Class>*>(raw_arrays + layout.TypesOffset()); + for (size_t j = 0; kIsDebugBuild && j < num_types; ++j) { + DCHECK(types[j].IsNull()); + } + std::copy_n(image_resolved_types, num_types, types); + // Store a pointer to the new location for fast ArtMethod patching without requiring map. + // This leaves random garbage at the start of the dex cache array, but nobody should ever + // read from it again. + *reinterpret_cast<GcRoot<mirror::Class>**>(image_resolved_types) = types; + dex_cache->SetResolvedTypes(types); } - std::copy_n(image_resolved_types, num_types, types); - // Store a pointer to the new location for fast ArtMethod patching without requiring map. - // This leaves random garbage at the start of the dex cache array, but nobody should ever - // read from it again. - *reinterpret_cast<GcRoot<mirror::Class>**>(image_resolved_types) = types; - dex_cache->SetResolvedTypes(types); - } - if (num_methods != 0u) { - ArtMethod** const methods = reinterpret_cast<ArtMethod**>( - raw_arrays + layout.MethodsOffset()); - ArtMethod** const image_resolved_methods = dex_cache->GetResolvedMethods(); - for (size_t j = 0; kIsDebugBuild && j < num_methods; ++j) { - DCHECK(methods[j] == nullptr); + if (num_methods != 0u) { + ArtMethod** const methods = reinterpret_cast<ArtMethod**>( + raw_arrays + layout.MethodsOffset()); + ArtMethod** const image_resolved_methods = dex_cache->GetResolvedMethods(); + for (size_t j = 0; kIsDebugBuild && j < num_methods; ++j) { + DCHECK(methods[j] == nullptr); + } + std::copy_n(image_resolved_methods, num_methods, methods); + // Store a pointer to the new location for fast ArtMethod patching without requiring map. + *reinterpret_cast<ArtMethod***>(image_resolved_methods) = methods; + dex_cache->SetResolvedMethods(methods); } - std::copy_n(image_resolved_methods, num_methods, methods); - // Store a pointer to the new location for fast ArtMethod patching without requiring map. - *reinterpret_cast<ArtMethod***>(image_resolved_methods) = methods; - dex_cache->SetResolvedMethods(methods); - } - if (num_fields != 0u) { - ArtField** const fields = reinterpret_cast<ArtField**>(raw_arrays + layout.FieldsOffset()); - for (size_t j = 0; kIsDebugBuild && j < num_fields; ++j) { - DCHECK(fields[j] == nullptr); + if (num_fields != 0u) { + ArtField** const fields = + reinterpret_cast<ArtField**>(raw_arrays + layout.FieldsOffset()); + for (size_t j = 0; kIsDebugBuild && j < num_fields; ++j) { + DCHECK(fields[j] == nullptr); + } + std::copy_n(dex_cache->GetResolvedFields(), num_fields, fields); + dex_cache->SetResolvedFields(fields); } - std::copy_n(dex_cache->GetResolvedFields(), num_fields, fields); - dex_cache->SetResolvedFields(fields); } - } - { - WriterMutexLock mu2(self, dex_lock_); - // Make sure to do this after we update the arrays since we store the resolved types array - // in DexCacheData in RegisterDexFileLocked. We need the array pointer to be the one in the - // BSS. - mirror::DexCache* existing_dex_cache = FindDexCacheLocked(self, - *dex_file, - /*allow_failure*/true); - CHECK(existing_dex_cache == nullptr); - StackHandleScope<1> hs3(self); - RegisterDexFileLocked(*dex_file, hs3.NewHandle(dex_cache)); - } - GcRoot<mirror::Class>* const types = dex_cache->GetResolvedTypes(); - if (!added_class_table) { - for (int32_t j = 0; j < static_cast<int32_t>(num_types); j++) { - // The image space is not yet added to the heap, avoid read barriers. - mirror::Class* klass = types[j].Read<kWithoutReadBarrier>(); - if (klass != nullptr) { - DCHECK_NE(klass->GetStatus(), mirror::Class::kStatusError); - // Update the class loader from the one in the image class loader to the one that loaded - // the app image. - klass->SetClassLoader(class_loader.Get()); - // The resolved type could be from another dex cache, go through the dex cache just in - // case. May be null for array classes. - if (klass->GetDexCacheStrings() != nullptr) { - DCHECK(!klass->IsArrayClass()); - klass->SetDexCacheStrings(klass->GetDexCache()->GetStrings()); - } - // If there are multiple dex caches, there may be the same class multiple times - // in different dex caches. Check for this since inserting will add duplicates - // otherwise. - if (num_dex_caches > 1) { - mirror::Class* existing = table->LookupByDescriptor(klass); - if (existing != nullptr) { - DCHECK_EQ(existing, klass) << PrettyClass(klass); + { + WriterMutexLock mu2(self, dex_lock_); + // Make sure to do this after we update the arrays since we store the resolved types array + // in DexCacheData in RegisterDexFileLocked. We need the array pointer to be the one in the + // BSS. + mirror::DexCache* existing_dex_cache = FindDexCacheLocked(self, + *dex_file, + /*allow_failure*/true); + CHECK(existing_dex_cache == nullptr); + StackHandleScope<1> hs3(self); + RegisterDexFileLocked(*dex_file, hs3.NewHandle(dex_cache)); + } + GcRoot<mirror::Class>* const types = dex_cache->GetResolvedTypes(); + const size_t num_types = dex_cache->NumResolvedTypes(); + if (new_class_set == nullptr) { + for (int32_t j = 0; j < static_cast<int32_t>(num_types); j++) { + // The image space is not yet added to the heap, avoid read barriers. + mirror::Class* klass = types[j].Read(); + if (klass != nullptr) { + DCHECK_NE(klass->GetStatus(), mirror::Class::kStatusError); + // Update the class loader from the one in the image class loader to the one that loaded + // the app image. + klass->SetClassLoader(class_loader.Get()); + // The resolved type could be from another dex cache, go through the dex cache just in + // case. May be null for array classes. + if (klass->GetDexCacheStrings() != nullptr) { + DCHECK(!klass->IsArrayClass()); + klass->SetDexCacheStrings(klass->GetDexCache()->GetStrings()); + } + // If there are multiple dex caches, there may be the same class multiple times + // in different dex caches. Check for this since inserting will add duplicates + // otherwise. + if (num_dex_caches > 1) { + mirror::Class* existing = table->LookupByDescriptor(klass); + if (existing != nullptr) { + DCHECK_EQ(existing, klass) << PrettyClass(klass); + } else { + table->Insert(klass); + } } else { table->Insert(klass); } - } else { - table->Insert(klass); - } - // Double checked VLOG to avoid overhead. - if (VLOG_IS_ON(image)) { - VLOG(image) << PrettyClass(klass) << " " << klass->GetStatus(); - if (!klass->IsArrayClass()) { - VLOG(image) << "From " << klass->GetDexCache()->GetDexFile()->GetBaseLocation(); - } - VLOG(image) << "Direct methods"; - for (ArtMethod& m : klass->GetDirectMethods(sizeof(void*))) { - VLOG(image) << PrettyMethod(&m); - } - VLOG(image) << "Virtual methods"; - for (ArtMethod& m : klass->GetVirtualMethods(sizeof(void*))) { - VLOG(image) << PrettyMethod(&m); + // Double checked VLOG to avoid overhead. + if (VLOG_IS_ON(image)) { + VLOG(image) << PrettyClass(klass) << " " << klass->GetStatus(); + if (!klass->IsArrayClass()) { + VLOG(image) << "From " << klass->GetDexCache()->GetDexFile()->GetBaseLocation(); + } + VLOG(image) << "Direct methods"; + for (ArtMethod& m : klass->GetDirectMethods(sizeof(void*))) { + VLOG(image) << PrettyMethod(&m); + } + VLOG(image) << "Virtual methods"; + for (ArtMethod& m : klass->GetVirtualMethods(sizeof(void*))) { + VLOG(image) << PrettyMethod(&m); + } } } } } - } - if (kIsDebugBuild) { - for (int32_t j = 0; j < static_cast<int32_t>(num_types); j++) { - // The image space is not yet added to the heap, avoid read barriers. - mirror::Class* klass = types[j].Read<kWithoutReadBarrier>(); - if (klass != nullptr) { - DCHECK_NE(klass->GetStatus(), mirror::Class::kStatusError); - if (kIsDebugBuild) { - DCHECK_EQ(table->LookupByDescriptor(klass), klass); - mirror::Class* super_class = klass->GetSuperClass(); - if (super_class != nullptr && !heap->ObjectIsInBootImageSpace(super_class)) { - CHECK_EQ(table->LookupByDescriptor(super_class), super_class); - } - } - if (kIsDebugBuild) { - for (ArtMethod& m : klass->GetDirectMethods(sizeof(void*))) { - const void* code = m.GetEntryPointFromQuickCompiledCode(); - const void* oat_code = m.IsInvokable() ? GetQuickOatCodeFor(&m) : code; - if (!IsQuickResolutionStub(code) && - !IsQuickGenericJniStub(code) && - !IsQuickToInterpreterBridge(code) && - !m.IsNative()) { - DCHECK_EQ(code, oat_code) << PrettyMethod(&m); + if (kIsDebugBuild) { + for (int32_t j = 0; j < static_cast<int32_t>(num_types); j++) { + // The image space is not yet added to the heap, avoid read barriers. + mirror::Class* klass = types[j].Read(); + if (klass != nullptr) { + DCHECK_NE(klass->GetStatus(), mirror::Class::kStatusError); + if (kIsDebugBuild) { + if (new_class_set != nullptr) { + auto it = new_class_set->Find(GcRoot<mirror::Class>(klass)); + DCHECK(it != new_class_set->end()); + DCHECK_EQ(it->Read(), klass); + mirror::Class* super_class = klass->GetSuperClass(); + if (super_class != nullptr && !heap->ObjectIsInBootImageSpace(super_class)) { + auto it2 = new_class_set->Find(GcRoot<mirror::Class>(super_class)); + DCHECK(it2 != new_class_set->end()); + DCHECK_EQ(it2->Read(), super_class); + } + } else { + DCHECK_EQ(table->LookupByDescriptor(klass), klass); + mirror::Class* super_class = klass->GetSuperClass(); + if (super_class != nullptr && !heap->ObjectIsInBootImageSpace(super_class)) { + CHECK_EQ(table->LookupByDescriptor(super_class), super_class); + } } } - VLOG(image) << "Virtual methods"; - for (ArtMethod& m : klass->GetVirtualMethods(sizeof(void*))) { - const void* code = m.GetEntryPointFromQuickCompiledCode(); - const void* oat_code = m.IsInvokable() ? GetQuickOatCodeFor(&m) : code; - if (!IsQuickResolutionStub(code) && - !IsQuickGenericJniStub(code) && - !IsQuickToInterpreterBridge(code) && - !m.IsNative()) { - DCHECK_EQ(code, oat_code) << PrettyMethod(&m); + if (kIsDebugBuild) { + for (ArtMethod& m : klass->GetDirectMethods(sizeof(void*))) { + const void* code = m.GetEntryPointFromQuickCompiledCode(); + const void* oat_code = m.IsInvokable() ? GetQuickOatCodeFor(&m) : code; + if (!IsQuickResolutionStub(code) && + !IsQuickGenericJniStub(code) && + !IsQuickToInterpreterBridge(code) && + !m.IsNative()) { + DCHECK_EQ(code, oat_code) << PrettyMethod(&m); + } + } + for (ArtMethod& m : klass->GetVirtualMethods(sizeof(void*))) { + const void* code = m.GetEntryPointFromQuickCompiledCode(); + const void* oat_code = m.IsInvokable() ? GetQuickOatCodeFor(&m) : code; + if (!IsQuickResolutionStub(code) && + !IsQuickGenericJniStub(code) && + !IsQuickToInterpreterBridge(code) && + !m.IsNative()) { + DCHECK_EQ(code, oat_code) << PrettyMethod(&m); + } } } } } } } - } - *out_forward_dex_cache_array = num_dex_caches_with_bss_arrays != 0; - if (*out_forward_dex_cache_array) { - if (num_dex_caches_with_bss_arrays != num_dex_caches) { - // Reject application image since we cannot forward only some of the dex cache arrays. - // TODO: We could get around this by having a dedicated forwarding slot. It should be an - // uncommon case. - *out_error_msg = StringPrintf("Dex caches in bss does not match total: %zu vs %zu", - num_dex_caches_with_bss_arrays, - num_dex_caches); - return false; + if (*out_forward_dex_cache_array) { + ScopedTrace timing("Fixup ArtMethod dex cache arrays"); + FixupArtMethodArrayVisitor visitor(header); + header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods( + &visitor, + space->Begin(), + sizeof(void*)); + Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader.Get()); } - FixupArtMethodArrayVisitor visitor(header); - header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods( - &visitor, - space->Begin(), - sizeof(void*)); - Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader.Get()); - } - if (kIsDebugBuild) { - ClassTable* const class_table = class_loader.Get()->GetClassTable(); - VerifyClassInTableArtMethodVisitor visitor2(class_table); - header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods( - &visitor2, - space->Begin(), - sizeof(void*)); } return true; } +// Update the class loader and resolved string dex cache array of classes. Should only be used on +// classes in the image space. class UpdateClassLoaderAndResolvedStringsVisitor { public: UpdateClassLoaderAndResolvedStringsVisitor(gc::space::ImageSpace* space, @@ -1644,43 +1666,46 @@ bool ClassLinker::AddImageSpace( methods.VisitPackedArtMethods(&visitor, space->Begin(), image_pointer_size_); } - const ImageSection& class_table_section = header.GetImageSection(ImageHeader::kSectionClassTable); - bool added_class_table = false; - if (app_image) { - GetOrCreateAllocatorForClassLoader(class_loader.Get()); // Make sure we have a linear alloc. - } ClassTable* class_table = nullptr; { WriterMutexLock mu(self, *Locks::classlinker_classes_lock_); class_table = InsertClassTableForClassLoader(class_loader.Get()); - if (class_table_section.Size() > 0u) { - const uint64_t start_time2 = NanoTime(); - class_table->ReadFromMemory(space->Begin() + class_table_section.Offset()); - if (!app_image) { - dex_cache_boot_image_class_lookup_required_ = false; - } - VLOG(image) << "Adding class table classes took " << PrettyDuration(NanoTime() - start_time2); - added_class_table = true; + } + // If we have a class table section, read it and use it for verification in + // UpdateAppImageClassLoadersAndDexCaches. + ClassTable::ClassSet temp_set; + const ImageSection& class_table_section = header.GetImageSection(ImageHeader::kSectionClassTable); + const bool added_class_table = class_table_section.Size() > 0u; + if (added_class_table) { + const uint64_t start_time2 = NanoTime(); + size_t read_count = 0; + temp_set = ClassTable::ClassSet(space->Begin() + class_table_section.Offset(), + /*make copy*/false, + &read_count); + if (!app_image) { + dex_cache_boot_image_class_lookup_required_ = false; } + VLOG(image) << "Adding class table classes took " << PrettyDuration(NanoTime() - start_time2); } if (app_image) { bool forward_dex_cache_arrays = false; if (!UpdateAppImageClassLoadersAndDexCaches(space, class_loader, dex_caches, - added_class_table, + added_class_table ? &temp_set : nullptr, /*out*/&forward_dex_cache_arrays, /*out*/error_msg)) { return false; } + // Update class loader and resolved strings. If added_class_table is false, the resolved + // strings were forwarded UpdateAppImageClassLoadersAndDexCaches. + UpdateClassLoaderAndResolvedStringsVisitor visitor(space, + class_loader.Get(), + forward_dex_cache_arrays); if (added_class_table) { - WriterMutexLock mu(self, *Locks::classlinker_classes_lock_); - // Update class loader and resolved strings. If added_class_table is false, the resolved - // strings were already updated in UpdateAppImageClassLoadersAndDexCaches. - UpdateClassLoaderAndResolvedStringsVisitor visitor(space, - class_loader.Get(), - forward_dex_cache_arrays); - class_table->Visit(visitor); + for (GcRoot<mirror::Class>& root : temp_set) { + visitor(root.Read()); + } } // forward_dex_cache_arrays is true iff we copied all of the dex cache arrays into the .bss. // In this case, madvise away the dex cache arrays section of the image to reduce RAM usage and @@ -1699,6 +1724,19 @@ bool ClassLinker::AddImageSpace( } } } + if (added_class_table) { + WriterMutexLock mu(self, *Locks::classlinker_classes_lock_); + class_table->AddClassSet(std::move(temp_set)); + } + if (kIsDebugBuild && app_image) { + // This verification needs to happen after the classes have been added to the class loader. + // Since it ensures classes are in the class table. + VerifyClassInTableArtMethodVisitor visitor2(class_table); + header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods( + &visitor2, + space->Begin(), + sizeof(void*)); + } VLOG(class_linker) << "Adding image space took " << PrettyDuration(NanoTime() - start_time); return true; } @@ -2855,8 +2893,9 @@ LinearAlloc* ClassLinker::GetOrCreateAllocatorForClassLoader(mirror::ClassLoader WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); LinearAlloc* allocator = class_loader->GetAllocator(); if (allocator == nullptr) { - allocator = Runtime::Current()->CreateLinearAlloc(); - class_loader->SetAllocator(allocator); + RegisterClassLoader(class_loader); + allocator = class_loader->GetAllocator(); + CHECK(allocator != nullptr); } return allocator; } @@ -4817,24 +4856,31 @@ void ClassLinker::FixupTemporaryDeclaringClass(mirror::Class* temp_class, Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(new_class); } +void ClassLinker::RegisterClassLoader(mirror::ClassLoader* class_loader) { + CHECK(class_loader->GetAllocator() == nullptr); + CHECK(class_loader->GetClassTable() == nullptr); + Thread* const self = Thread::Current(); + ClassLoaderData data; + data.weak_root = self->GetJniEnv()->vm->AddWeakGlobalRef(self, class_loader); + // Create and set the class table. + data.class_table = new ClassTable; + class_loader->SetClassTable(data.class_table); + // Create and set the linear allocator. + data.allocator = Runtime::Current()->CreateLinearAlloc(); + class_loader->SetAllocator(data.allocator); + // Add to the list so that we know to free the data later. + class_loaders_.push_back(data); +} + ClassTable* ClassLinker::InsertClassTableForClassLoader(mirror::ClassLoader* class_loader) { if (class_loader == nullptr) { return &boot_class_table_; } ClassTable* class_table = class_loader->GetClassTable(); if (class_table == nullptr) { - class_table = new ClassTable; - Thread* const self = Thread::Current(); - ClassLoaderData data; - data.weak_root = self->GetJniEnv()->vm->AddWeakGlobalRef(self, class_loader); - data.class_table = class_table; - // Don't already have a class table, add it to the class loader. - CHECK(class_loader->GetClassTable() == nullptr); - class_loader->SetClassTable(data.class_table); - // Should have been set when we registered the dex file. - data.allocator = class_loader->GetAllocator(); - CHECK(data.allocator != nullptr); - class_loaders_.push_back(data); + RegisterClassLoader(class_loader); + class_table = class_loader->GetClassTable(); + DCHECK(class_table != nullptr); } return class_table; } @@ -7631,6 +7677,7 @@ void ClassLinker::CleanupClassLoaders() { } std::set<DexCacheResolvedClasses> ClassLinker::GetResolvedClasses(bool ignore_boot_classes) { + ScopedTrace trace(__PRETTY_FUNCTION__); ScopedObjectAccess soa(Thread::Current()); ScopedAssertNoThreadSuspension ants(soa.Self(), __FUNCTION__); std::set<DexCacheResolvedClasses> ret; @@ -7667,7 +7714,10 @@ std::set<DexCacheResolvedClasses> ClassLinker::GetResolvedClasses(bool ignore_bo } ++num_resolved; DCHECK(!klass->IsProxyClass()); - DCHECK(klass->IsResolved()); + if (!klass->IsResolved()) { + DCHECK(klass->IsErroneous()); + continue; + } mirror::DexCache* klass_dex_cache = klass->GetDexCache(); if (klass_dex_cache == dex_cache) { const size_t class_def_idx = klass->GetDexClassDefIndex(); @@ -7697,6 +7747,7 @@ std::set<DexCacheResolvedClasses> ClassLinker::GetResolvedClasses(bool ignore_bo std::unordered_set<std::string> ClassLinker::GetClassDescriptorsForProfileKeys( const std::set<DexCacheResolvedClasses>& classes) { + ScopedTrace trace(__PRETTY_FUNCTION__); std::unordered_set<std::string> ret; Thread* const self = Thread::Current(); std::unordered_map<std::string, const DexFile*> location_to_dex_file; diff --git a/runtime/class_linker.h b/runtime/class_linker.h index 729617ddab..492a228522 100644 --- a/runtime/class_linker.h +++ b/runtime/class_linker.h @@ -576,12 +576,12 @@ class ClassLinker { // Unlike GetOrCreateAllocatorForClassLoader, GetAllocatorForClassLoader asserts that the // allocator for this class loader is already created. - static LinearAlloc* GetAllocatorForClassLoader(mirror::ClassLoader* class_loader) + LinearAlloc* GetAllocatorForClassLoader(mirror::ClassLoader* class_loader) SHARED_REQUIRES(Locks::mutator_lock_); // Return the linear alloc for a class loader if it is already allocated, otherwise allocate and // set it. TODO: Consider using a lock other than classlinker_classes_lock_. - static LinearAlloc* GetOrCreateAllocatorForClassLoader(mirror::ClassLoader* class_loader) + LinearAlloc* GetOrCreateAllocatorForClassLoader(mirror::ClassLoader* class_loader) REQUIRES(!Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); @@ -980,9 +980,16 @@ class ClassLinker { mirror::Class* LookupClassFromBootImage(const char* descriptor) SHARED_REQUIRES(Locks::mutator_lock_); + // Register a class loader and create its class table and allocator. Should not be called if + // these are already created. + void RegisterClassLoader(mirror::ClassLoader* class_loader) + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(Locks::classlinker_classes_lock_); + // Returns null if not found. ClassTable* ClassTableForClassLoader(mirror::ClassLoader* class_loader) SHARED_REQUIRES(Locks::mutator_lock_, Locks::classlinker_classes_lock_); + // Insert a new class table if not found. ClassTable* InsertClassTableForClassLoader(mirror::ClassLoader* class_loader) SHARED_REQUIRES(Locks::mutator_lock_) @@ -1022,11 +1029,13 @@ class ClassLinker { SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::classlinker_classes_lock_); + // new_class_set is the set of classes that were read from the class table section in the image. + // If there was no class table section, it is null. bool UpdateAppImageClassLoadersAndDexCaches( gc::space::ImageSpace* space, Handle<mirror::ClassLoader> class_loader, Handle<mirror::ObjectArray<mirror::DexCache>> dex_caches, - bool added_class_table, + ClassTable::ClassSet* new_class_set, bool* out_forward_dex_cache_array, std::string* out_error_msg) REQUIRES(!dex_lock_) diff --git a/runtime/class_table.cc b/runtime/class_table.cc index afb0556e1e..d815b1a7a3 100644 --- a/runtime/class_table.cc +++ b/runtime/class_table.cc @@ -168,8 +168,12 @@ size_t ClassTable::WriteToMemory(uint8_t* ptr) const { size_t ClassTable::ReadFromMemory(uint8_t* ptr) { size_t read_count = 0; - classes_.insert(classes_.begin(), ClassSet(ptr, /*make copy*/false, &read_count)); + AddClassSet(ClassSet(ptr, /*make copy*/false, &read_count)); return read_count; } +void ClassTable::AddClassSet(ClassSet&& set) { + classes_.insert(classes_.begin(), std::move(set)); +} + } // namespace art diff --git a/runtime/class_table.h b/runtime/class_table.h index 5f2eb48d55..0e0e860b4f 100644 --- a/runtime/class_table.h +++ b/runtime/class_table.h @@ -39,6 +39,34 @@ namespace mirror { // Each loader has a ClassTable class ClassTable { public: + class ClassDescriptorHashEquals { + public: + // uint32_t for cross compilation. + uint32_t operator()(const GcRoot<mirror::Class>& root) const NO_THREAD_SAFETY_ANALYSIS; + // Same class loader and descriptor. + bool operator()(const GcRoot<mirror::Class>& a, const GcRoot<mirror::Class>& b) const + NO_THREAD_SAFETY_ANALYSIS;; + // Same descriptor. + bool operator()(const GcRoot<mirror::Class>& a, const char* descriptor) const + NO_THREAD_SAFETY_ANALYSIS; + // uint32_t for cross compilation. + uint32_t operator()(const char* descriptor) const NO_THREAD_SAFETY_ANALYSIS; + }; + class GcRootEmptyFn { + public: + void MakeEmpty(GcRoot<mirror::Class>& item) const { + item = GcRoot<mirror::Class>(); + } + bool IsEmpty(const GcRoot<mirror::Class>& item) const { + return item.IsNull(); + } + }; + // hash set which hashes class descriptor, and compares descriptors and class loaders. Results + // should be compared for a matching Class descriptor and class loader. + typedef HashSet<GcRoot<mirror::Class>, GcRootEmptyFn, ClassDescriptorHashEquals, + ClassDescriptorHashEquals, TrackingAllocator<GcRoot<mirror::Class>, kAllocatorTagClassTable>> + ClassSet; + ClassTable(); // Used by image writer for checking. @@ -112,35 +140,12 @@ class ClassTable { REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); - private: - class ClassDescriptorHashEquals { - public: - // uint32_t for cross compilation. - uint32_t operator()(const GcRoot<mirror::Class>& root) const NO_THREAD_SAFETY_ANALYSIS; - // Same class loader and descriptor. - bool operator()(const GcRoot<mirror::Class>& a, const GcRoot<mirror::Class>& b) const - NO_THREAD_SAFETY_ANALYSIS;; - // Same descriptor. - bool operator()(const GcRoot<mirror::Class>& a, const char* descriptor) const - NO_THREAD_SAFETY_ANALYSIS; - // uint32_t for cross compilation. - uint32_t operator()(const char* descriptor) const NO_THREAD_SAFETY_ANALYSIS; - }; - class GcRootEmptyFn { - public: - void MakeEmpty(GcRoot<mirror::Class>& item) const { - item = GcRoot<mirror::Class>(); - } - bool IsEmpty(const GcRoot<mirror::Class>& item) const { - return item.IsNull(); - } - }; - // hash set which hashes class descriptor, and compares descriptors and class loaders. Results - // should be compared for a matching Class descriptor and class loader. - typedef HashSet<GcRoot<mirror::Class>, GcRootEmptyFn, ClassDescriptorHashEquals, - ClassDescriptorHashEquals, TrackingAllocator<GcRoot<mirror::Class>, kAllocatorTagClassTable>> - ClassSet; + // Add a class set to the front of classes. + void AddClassSet(ClassSet&& set) + REQUIRES(Locks::classlinker_classes_lock_) + SHARED_REQUIRES(Locks::mutator_lock_); + private: // TODO: shard lock to have one per class loader. // We have a vector to help prevent dirty pages after the zygote forks by calling FreezeSnapshot. std::vector<ClassSet> classes_ GUARDED_BY(Locks::classlinker_classes_lock_); diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc index 3df9101613..729957f318 100644 --- a/runtime/common_runtime_test.cc +++ b/runtime/common_runtime_test.cc @@ -406,6 +406,7 @@ void CommonRuntimeTestImpl::TearDown() { int rmdir_cache_result = rmdir(dalvik_cache_.c_str()); ASSERT_EQ(0, rmdir_cache_result); TearDownAndroidData(android_data_, true); + dalvik_cache_.clear(); // icu4c has a fixed 10-element array "gCommonICUDataArray". // If we run > 10 tests, we fill that array and u_setCommonData fails. diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc index 81a3e4b08c..4a0a6fc8ad 100644 --- a/runtime/dex_file.cc +++ b/runtime/dex_file.cc @@ -34,6 +34,7 @@ #include "base/logging.h" #include "base/stl_util.h" #include "base/stringprintf.h" +#include "base/systrace.h" #include "class_linker-inl.h" #include "dex_file-inl.h" #include "dex_file_verifier.h" @@ -116,6 +117,7 @@ bool DexFile::GetChecksum(const char* filename, uint32_t* checksum, std::string* bool DexFile::Open(const char* filename, const char* location, std::string* error_msg, std::vector<std::unique_ptr<const DexFile>>* dex_files) { + ScopedTrace trace(std::string("Open dex file ") + location); DCHECK(dex_files != nullptr) << "DexFile::Open: out-param is nullptr"; uint32_t magic; ScopedFd fd(OpenAndReadMagic(filename, &magic, error_msg)); @@ -201,6 +203,7 @@ std::unique_ptr<const DexFile> DexFile::Open(const uint8_t* base, size_t size, const OatDexFile* oat_dex_file, bool verify, std::string* error_msg) { + ScopedTrace trace(std::string("Open dex file from RAM ") + location); std::unique_ptr<const DexFile> dex_file = OpenMemory(base, size, location, @@ -221,6 +224,7 @@ std::unique_ptr<const DexFile> DexFile::Open(const uint8_t* base, size_t size, std::unique_ptr<const DexFile> DexFile::OpenFile(int fd, const char* location, bool verify, std::string* error_msg) { + ScopedTrace trace(std::string("Open dex file ") + location); CHECK(location != nullptr); std::unique_ptr<MemMap> map; { @@ -278,6 +282,7 @@ const char* DexFile::kClassesDex = "classes.dex"; bool DexFile::OpenZip(int fd, const std::string& location, std::string* error_msg, std::vector<std::unique_ptr<const DexFile>>* dex_files) { + ScopedTrace trace("Dex file open Zip " + std::string(location)); DCHECK(dex_files != nullptr) << "DexFile::OpenZip: out-param is nullptr"; std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd, location.c_str(), error_msg)); if (zip_archive.get() == nullptr) { @@ -303,6 +308,7 @@ std::unique_ptr<const DexFile> DexFile::OpenMemory(const std::string& location, std::unique_ptr<const DexFile> DexFile::Open(const ZipArchive& zip_archive, const char* entry_name, const std::string& location, std::string* error_msg, ZipOpenErrorCode* error_code) { + ScopedTrace trace("Dex file open from Zip Archive " + std::string(location)); CHECK(!location.empty()); std::unique_ptr<ZipEntry> zip_entry(zip_archive.Find(entry_name, error_msg)); if (zip_entry.get() == nullptr) { @@ -348,6 +354,7 @@ static constexpr size_t kWarnOnManyDexFilesThreshold = 100; bool DexFile::OpenFromZip(const ZipArchive& zip_archive, const std::string& location, std::string* error_msg, std::vector<std::unique_ptr<const DexFile>>* dex_files) { + ScopedTrace trace("Dex file open from Zip " + std::string(location)); DCHECK(dex_files != nullptr) << "DexFile::OpenFromZip: out-param is nullptr"; ZipOpenErrorCode error_code; std::unique_ptr<const DexFile> dex_file(Open(zip_archive, kClassesDex, location, error_msg, diff --git a/runtime/elf.h b/runtime/elf.h index d1efc92c30..63b18c5d34 100644 --- a/runtime/elf.h +++ b/runtime/elf.h @@ -1284,6 +1284,7 @@ enum : unsigned { SHT_MIPS_REGINFO = 0x70000006, // Register usage information SHT_MIPS_OPTIONS = 0x7000000d, // General options + SHT_MIPS_ABIFLAGS = 0x7000002a, // Abiflags options SHT_HIPROC = 0x7fffffff, // Highest processor arch-specific type. SHT_LOUSER = 0x80000000, // Lowest type reserved for applications. @@ -1606,7 +1607,8 @@ enum { // MIPS program header types. PT_MIPS_REGINFO = 0x70000000, // Register usage information. PT_MIPS_RTPROC = 0x70000001, // Runtime procedure table. - PT_MIPS_OPTIONS = 0x70000002 // Options segment. + PT_MIPS_OPTIONS = 0x70000002, // Options segment. + PT_MIPS_ABIFLAGS = 0x70000003 // Abiflags segment. }; // Segment flag bits. diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc index 1a7b1a374e..121da37389 100644 --- a/runtime/gc/accounting/card_table.cc +++ b/runtime/gc/accounting/card_table.cc @@ -17,6 +17,7 @@ #include "card_table.h" #include "base/logging.h" +#include "base/systrace.h" #include "card_table-inl.h" #include "gc/heap.h" #include "gc/space/space.h" @@ -57,6 +58,7 @@ constexpr uint8_t CardTable::kCardDirty; */ CardTable* CardTable::Create(const uint8_t* heap_begin, size_t heap_capacity) { + ScopedTrace trace(__PRETTY_FUNCTION__); /* Set up the card table */ size_t capacity = heap_capacity / kCardSize; /* Allocate an extra 256 bytes to allow fixed low-byte of base */ diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc index 4de5388d8c..4672483308 100644 --- a/runtime/gc/allocation_record.cc +++ b/runtime/gc/allocation_record.cc @@ -245,6 +245,9 @@ void AllocRecordObjectMap::SetAllocTrackingEnabled(bool enable) { heap->SetAllocTrackingEnabled(true); } } else { + // Delete outside of the critical section to avoid possible lock violations like the runtime + // shutdown lock. + std::unique_ptr<AllocRecordObjectMap> map; { MutexLock mu(self, *Locks::alloc_tracker_lock_); if (!heap->IsAllocTrackingEnabled()) { @@ -252,7 +255,7 @@ void AllocRecordObjectMap::SetAllocTrackingEnabled(bool enable) { } heap->SetAllocTrackingEnabled(false); LOG(INFO) << "Disabling alloc tracker"; - heap->SetAllocationRecords(nullptr); + map = heap->ReleaseAllocationRecords(); } // If an allocation comes in before we uninstrument, we will safely drop it on the floor. Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints(); diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc index afd0a30fe4..18c4adf608 100644 --- a/runtime/gc/collector/garbage_collector.cc +++ b/runtime/gc/collector/garbage_collector.cc @@ -18,13 +18,11 @@ #include "garbage_collector.h" -#define ATRACE_TAG ATRACE_TAG_DALVIK -#include "cutils/trace.h" - #include "base/dumpable.h" #include "base/histogram-inl.h" #include "base/logging.h" #include "base/mutex-inl.h" +#include "base/systrace.h" #include "base/time_utils.h" #include "gc/accounting/heap_bitmap.h" #include "gc/space/large_object_space.h" @@ -81,7 +79,7 @@ void GarbageCollector::ResetCumulativeStatistics() { } void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) { - ATRACE_BEGIN(StringPrintf("%s %s GC", PrettyCause(gc_cause), GetName()).c_str()); + ScopedTrace trace(StringPrintf("%s %s GC", PrettyCause(gc_cause), GetName())); Thread* self = Thread::Current(); uint64_t start_time = NanoTime(); Iteration* current_iteration = GetCurrentIteration(); @@ -107,7 +105,6 @@ void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) { MutexLock mu(self, pause_histogram_lock_); pause_histogram_.AdjustAndAddValue(pause_time); } - ATRACE_END(); } void GarbageCollector::SwapBitmaps() { diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc index 7727b2da18..6beb60608c 100644 --- a/runtime/gc/collector/mark_compact.cc +++ b/runtime/gc/collector/mark_compact.cc @@ -131,7 +131,7 @@ void MarkCompact::ProcessReferences(Thread* self) { class BitmapSetSlowPathVisitor { public: - void operator()(const mirror::Object* obj) const { + void operator()(const mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) { // Marking a large object, make sure its aligned as a sanity check. if (!IsAligned<kPageSize>(obj)) { Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR)); diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc index 64c8e9af04..6073fc8a78 100644 --- a/runtime/gc/collector/mark_sweep.cc +++ b/runtime/gc/collector/mark_sweep.cc @@ -22,13 +22,11 @@ #include <climits> #include <vector> -#define ATRACE_TAG ATRACE_TAG_DALVIK -#include "cutils/trace.h" - #include "base/bounded_fifo.h" #include "base/logging.h" #include "base/macros.h" #include "base/mutex-inl.h" +#include "base/systrace.h" #include "base/time_utils.h" #include "base/timing_logger.h" #include "gc/accounting/card_table-inl.h" @@ -1137,17 +1135,15 @@ class CheckpointMarkThreadRoots : public Closure, public RootVisitor { } virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { - ATRACE_BEGIN("Marking thread roots"); + ScopedTrace trace("Marking thread roots"); // Note: self is not necessarily equal to thread since thread may be suspended. Thread* const self = Thread::Current(); CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc) << thread->GetState() << " thread " << thread << " self " << self; thread->VisitRoots(this); - ATRACE_END(); if (revoke_ros_alloc_thread_local_buffers_at_checkpoint_) { - ATRACE_BEGIN("RevokeRosAllocThreadLocalBuffers"); + ScopedTrace trace2("RevokeRosAllocThreadLocalBuffers"); mark_sweep_->GetHeap()->RevokeRosAllocThreadLocalBuffers(thread); - ATRACE_END(); } // If thread is a running mutator, then act on behalf of the garbage collector. // See the code in ThreadList::RunCheckpoint. diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h index 12cf3dbf98..e87b5ff332 100644 --- a/runtime/gc/collector/semi_space-inl.h +++ b/runtime/gc/collector/semi_space-inl.h @@ -75,6 +75,7 @@ inline void SemiSpace::MarkObject( } obj_ptr->Assign(forward_address); } else if (!collect_from_space_only_ && !immune_spaces_.IsInImmuneRegion(obj)) { + DCHECK(!to_space_->HasAddress(obj)) << "Tried to mark " << obj << " in to-space"; BitmapSetSlowPathVisitor visitor(this); if (!mark_bitmap_->Set(obj, visitor)) { // This object was not previously marked. diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc index 278469329f..f37daa54e9 100644 --- a/runtime/gc/collector/semi_space.cc +++ b/runtime/gc/collector/semi_space.cc @@ -619,7 +619,7 @@ mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) { mirror::Object* SemiSpace::MarkObject(mirror::Object* root) { auto ref = StackReference<mirror::Object>::FromMirrorPtr(root); - MarkObject(&ref); + MarkObjectIfNotInToSpace(&ref); return ref.AsMirrorPtr(); } diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 1b4cbeccda..2e5b599940 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -16,9 +16,6 @@ #include "heap.h" -#define ATRACE_TAG ATRACE_TAG_DALVIK -#include <cutils/trace.h> - #include <limits> #include <memory> #include <unwind.h> // For GC verification. @@ -30,6 +27,7 @@ #include "base/dumpable.h" #include "base/histogram-inl.h" #include "base/stl_util.h" +#include "base/systrace.h" #include "base/time_utils.h" #include "common_throws.h" #include "cutils/sched_policy.h" @@ -61,6 +59,8 @@ #include "heap-inl.h" #include "image.h" #include "intern_table.h" +#include "jit/jit.h" +#include "jit/jit_code_cache.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" @@ -240,6 +240,7 @@ Heap::Heap(size_t initial_size, if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { LOG(INFO) << "Heap() entering"; } + ScopedTrace trace(__FUNCTION__); Runtime* const runtime = Runtime::Current(); // If we aren't the zygote, switch to the default non zygote allocator. This may update the // entrypoints. @@ -274,14 +275,12 @@ Heap::Heap(size_t initial_size, uint8_t* const original_requested_alloc_space_begin = requested_alloc_space_begin; for (size_t index = 0; index < image_file_names.size(); ++index) { std::string& image_name = image_file_names[index]; - ATRACE_BEGIN("ImageSpace::Create"); std::string error_msg; space::ImageSpace* boot_image_space = space::ImageSpace::CreateBootImage( image_name.c_str(), image_instruction_set, index > 0, &error_msg); - ATRACE_END(); if (boot_image_space != nullptr) { AddSpace(boot_image_space); added_image_spaces.push_back(boot_image_space); @@ -373,8 +372,8 @@ Heap::Heap(size_t initial_size, } std::string error_str; std::unique_ptr<MemMap> non_moving_space_mem_map; - ATRACE_BEGIN("Create heap maps"); if (separate_non_moving_space) { + ScopedTrace trace2("Create separate non moving space"); // If we are the zygote, the non moving space becomes the zygote space when we run // PreZygoteFork the first time. In this case, call the map "zygote space" since we can't // rename the mem map later. @@ -391,6 +390,7 @@ Heap::Heap(size_t initial_size, } // Attempt to create 2 mem maps at or after the requested begin. if (foreground_collector_type_ != kCollectorTypeCC) { + ScopedTrace trace2("Create main mem map"); if (separate_non_moving_space || !is_zygote) { main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0], request_begin, @@ -409,14 +409,15 @@ Heap::Heap(size_t initial_size, if (support_homogeneous_space_compaction || background_collector_type_ == kCollectorTypeSS || foreground_collector_type_ == kCollectorTypeSS) { + ScopedTrace trace2("Create main mem map 2"); main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(), capacity_, &error_str)); CHECK(main_mem_map_2.get() != nullptr) << error_str; } - ATRACE_END(); - ATRACE_BEGIN("Create spaces"); + // Create the non moving space first so that bitmaps don't take up the address range. if (separate_non_moving_space) { + ScopedTrace trace2("Add non moving space"); // Non moving space is always dlmalloc since we currently don't have support for multiple // active rosalloc spaces. const size_t size = non_moving_space_mem_map->Size(); @@ -504,9 +505,7 @@ Heap::Heap(size_t initial_size, if (main_space_backup_.get() != nullptr) { RemoveSpace(main_space_backup_.get()); } - ATRACE_END(); // Allocate the card table. - ATRACE_BEGIN("Create card table"); // We currently don't support dynamically resizing the card table. // Since we don't know where in the low_4gb the app image will be located, make the card table // cover the whole low_4gb. TODO: Extend the card table in AddSpace. @@ -517,7 +516,6 @@ Heap::Heap(size_t initial_size, card_table_.reset(accounting::CardTable::Create(reinterpret_cast<uint8_t*>(kMinHeapAddress), 4 * GB - kMinHeapAddress)); CHECK(card_table_.get() != nullptr) << "Failed to create card table"; - ATRACE_END(); if (foreground_collector_type_ == kCollectorTypeCC && kUseTableLookupReadBarrier) { rb_table_.reset(new accounting::ReadBarrierTable()); DCHECK(rb_table_->IsAllCleared()); @@ -1340,24 +1338,19 @@ void Heap::DoPendingCollectorTransition() { void Heap::Trim(Thread* self) { Runtime* const runtime = Runtime::Current(); if (!CareAboutPauseTimes()) { - ATRACE_BEGIN("Deflating monitors"); // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care // about pauses. - { - ScopedSuspendAll ssa(__FUNCTION__); - uint64_t start_time = NanoTime(); - size_t count = runtime->GetMonitorList()->DeflateMonitors(); - VLOG(heap) << "Deflating " << count << " monitors took " - << PrettyDuration(NanoTime() - start_time); - } - ATRACE_END(); + ScopedTrace trace("Deflating monitors"); + ScopedSuspendAll ssa(__FUNCTION__); + uint64_t start_time = NanoTime(); + size_t count = runtime->GetMonitorList()->DeflateMonitors(); + VLOG(heap) << "Deflating " << count << " monitors took " + << PrettyDuration(NanoTime() - start_time); } TrimIndirectReferenceTables(self); TrimSpaces(self); // Trim arenas that may have been used by JIT or verifier. - ATRACE_BEGIN("Trimming arena maps"); runtime->GetArenaPool()->TrimMaps(); - ATRACE_END(); } class TrimIndirectReferenceTableClosure : public Closure { @@ -1365,9 +1358,7 @@ class TrimIndirectReferenceTableClosure : public Closure { explicit TrimIndirectReferenceTableClosure(Barrier* barrier) : barrier_(barrier) { } virtual void Run(Thread* thread) OVERRIDE NO_THREAD_SAFETY_ANALYSIS { - ATRACE_BEGIN("Trimming reference table"); thread->GetJniEnv()->locals.Trim(); - ATRACE_END(); // If thread is a running mutator, then act on behalf of the trim thread. // See the code in ThreadList::RunCheckpoint. barrier_->Pass(Thread::Current()); @@ -1379,7 +1370,7 @@ class TrimIndirectReferenceTableClosure : public Closure { void Heap::TrimIndirectReferenceTables(Thread* self) { ScopedObjectAccess soa(self); - ATRACE_BEGIN(__FUNCTION__); + ScopedTrace trace(__PRETTY_FUNCTION__); JavaVMExt* vm = soa.Vm(); // Trim globals indirect reference table. vm->TrimGlobals(); @@ -1391,7 +1382,6 @@ void Heap::TrimIndirectReferenceTables(Thread* self) { if (barrier_count != 0) { barrier.Increment(self, barrier_count); } - ATRACE_END(); } void Heap::StartGC(Thread* self, GcCause cause, CollectorType collector_type) { @@ -1410,7 +1400,7 @@ void Heap::TrimSpaces(Thread* self) { // trimming. StartGC(self, kGcCauseTrim, kCollectorTypeHeapTrim); } - ATRACE_BEGIN(__FUNCTION__); + ScopedTrace trace(__PRETTY_FUNCTION__); const uint64_t start_ns = NanoTime(); // Trim the managed spaces. uint64_t total_alloc_space_allocated = 0; @@ -1449,7 +1439,6 @@ void Heap::TrimSpaces(Thread* self) { VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns) << ", advised=" << PrettySize(managed_reclaimed) << ") heap. Managed heap utilization of " << static_cast<int>(100 * managed_utilization) << "%."; - ATRACE_END(); } bool Heap::IsValidObjectAddress(const mirror::Object* obj) const { @@ -1558,7 +1547,6 @@ std::string Heap::DumpSpaces() const { } void Heap::DumpSpaces(std::ostream& stream) const { - ScopedObjectAccess soa(Thread::Current()); for (const auto& space : continuous_spaces_) { accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap(); accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap(); @@ -2682,6 +2670,12 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, // permanantly disabled. b/17942071 concurrent_start_bytes_ = std::numeric_limits<size_t>::max(); } + + if ((gc_type == collector::kGcTypeFull) && runtime->UseJit()) { + // It's time to clear all inline caches, in case some classes can be unloaded. + runtime->GetJit()->GetCodeCache()->ClearGcRootsInInlineCaches(self); + } + CHECK(collector != nullptr) << "Could not find garbage collector with collector_type=" << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type; @@ -3434,11 +3428,10 @@ collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) { running_collection_is_blocking_ = true; VLOG(gc) << "Waiting for a blocking GC " << cause; } - ATRACE_BEGIN("GC: Wait For Completion"); + ScopedTrace trace("GC: Wait For Completion"); // We must wait, change thread state then sleep on gc_complete_cond_; gc_complete_cond_->Wait(self); last_gc_type = last_gc_type_; - ATRACE_END(); } uint64_t wait_time = NanoTime() - wait_start; total_wait_time_ += wait_time; @@ -3947,6 +3940,10 @@ void Heap::SetAllocationRecords(AllocRecordObjectMap* records) { allocation_records_.reset(records); } +std::unique_ptr<AllocRecordObjectMap> Heap::ReleaseAllocationRecords() { + return std::move(allocation_records_); +} + void Heap::VisitAllocationRecords(RootVisitor* visitor) const { if (IsAllocTrackingEnabled()) { MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index 46dce04325..e0a53a0cc8 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -651,8 +651,8 @@ class Heap { } } - std::string DumpSpaces() const WARN_UNUSED; - void DumpSpaces(std::ostream& stream) const; + void DumpSpaces(std::ostream& stream) const SHARED_REQUIRES(Locks::mutator_lock_); + std::string DumpSpaces() const SHARED_REQUIRES(Locks::mutator_lock_); // Dump object should only be used by the signal handler. void DumpObject(std::ostream& stream, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS; @@ -766,6 +766,10 @@ class Heap { return allocation_records_.get(); } + // Release ownership of the allocation records. + std::unique_ptr<AllocRecordObjectMap> ReleaseAllocationRecords() + REQUIRES(Locks::alloc_tracker_lock_); + void SetAllocationRecords(AllocRecordObjectMap* records) REQUIRES(Locks::alloc_tracker_lock_); diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc index 5aaf1045f9..9ecd391e4d 100644 --- a/runtime/gc/space/image_space.cc +++ b/runtime/gc/space/image_space.cc @@ -27,6 +27,7 @@ #include "base/macros.h" #include "base/stl_util.h" #include "base/scoped_flock.h" +#include "base/systrace.h" #include "base/time_utils.h" #include "base/unix_file/fd_file.h" #include "gc/accounting/space_bitmap-inl.h" @@ -471,6 +472,7 @@ ImageSpace* ImageSpace::CreateBootImage(const char* image_location, const InstructionSet image_isa, bool secondary_image, std::string* error_msg) { + ScopedTrace trace(__FUNCTION__); std::string system_filename; bool has_system = false; std::string cache_filename; @@ -1167,7 +1169,7 @@ ImageSpace* ImageSpace::Init(const char* image_filename, CHECK(image_filename != nullptr); CHECK(image_location != nullptr); - TimingLogger logger(__FUNCTION__, true, false); + TimingLogger logger(__PRETTY_FUNCTION__, true, VLOG_IS_ON(image)); VLOG(image) << "ImageSpace::Init entering image_filename=" << image_filename; std::unique_ptr<File> file; @@ -1533,50 +1535,31 @@ void ImageSpace::CreateMultiImageLocations(const std::string& input_image_file_n // images[0] is f/c/d/e.art // ---------------------------------------------- // images[1] is g/h/i/j.art -> /a/b/h/i/j.art - - // Derive pattern. - std::vector<std::string> left; - Split(input_image_file_name, '/', &left); - std::vector<std::string> right; - Split(images[0], '/', &right); - - size_t common = 1; - while (common < left.size() && common < right.size()) { - if (left[left.size() - common - 1] != right[right.size() - common - 1]) { - break; - } - common++; - } - - std::vector<std::string> prefix_vector(left.begin(), left.end() - common); - std::string common_prefix = Join(prefix_vector, '/'); - if (!common_prefix.empty() && common_prefix[0] != '/' && input_image_file_name[0] == '/') { - common_prefix = "/" + common_prefix; - } + const std::string& first_image = images[0]; + // Length of common suffix. + size_t common = 0; + while (common < input_image_file_name.size() && + common < first_image.size() && + *(input_image_file_name.end() - common - 1) == *(first_image.end() - common - 1)) { + ++common; + } + // We want to replace the prefix of the input image with the prefix of the boot class path. + // This handles the case where the image file contains @ separators. + // Example image_file_name is oats/system@framework@boot.art + // images[0] is .../arm/boot.art + // means that the image name prefix will be oats/system@framework@ + // so that the other images are openable. + const size_t old_prefix_length = first_image.size() - common; + const std::string new_prefix = input_image_file_name.substr( + 0, + input_image_file_name.size() - common); // Apply pattern to images[1] .. images[n]. for (size_t i = 1; i < images.size(); ++i) { - std::string image = images[i]; - - size_t rslash = std::string::npos; - for (size_t j = 0; j < common; ++j) { - if (rslash != std::string::npos) { - rslash--; - } - - rslash = image.rfind('/', rslash); - if (rslash == std::string::npos) { - rslash = 0; - } - if (rslash == 0) { - break; - } - } - std::string image_part = image.substr(rslash); - - std::string new_image = common_prefix + (StartsWith(image_part, "/") ? "" : "/") + - image_part; - image_file_names->push_back(new_image); + const std::string& image = images[i]; + CHECK_GT(image.length(), old_prefix_length); + std::string suffix = image.substr(old_prefix_length); + image_file_names->push_back(new_prefix + suffix); } } diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc index e70fe215ab..010f677885 100644 --- a/runtime/gc/space/large_object_space.cc +++ b/runtime/gc/space/large_object_space.cc @@ -27,6 +27,7 @@ #include "base/stl_util.h" #include "image.h" #include "os.h" +#include "scoped_thread_state_change.h" #include "space-inl.h" #include "thread-inl.h" @@ -190,6 +191,7 @@ size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) { MutexLock mu(self, lock_); auto it = large_objects_.find(ptr); if (UNLIKELY(it == large_objects_.end())) { + ScopedObjectAccess soa(self); Runtime::Current()->GetHeap()->DumpSpaces(LOG(INTERNAL_FATAL)); LOG(FATAL) << "Attempted to free large object " << ptr << " which was not live"; } diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc index fd4d0a1a47..203d3bcfe9 100644 --- a/runtime/gc/space/rosalloc_space.cc +++ b/runtime/gc/space/rosalloc_space.cc @@ -17,9 +17,6 @@ #include "rosalloc_space-inl.h" -#define ATRACE_TAG ATRACE_TAG_DALVIK -#include "cutils/trace.h" - #include "base/time_utils.h" #include "gc/accounting/card_table.h" #include "gc/accounting/space_bitmap-inl.h" diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc index a5b63b4271..8e49492cf4 100644 --- a/runtime/indirect_reference_table.cc +++ b/runtime/indirect_reference_table.cc @@ -16,6 +16,7 @@ #include "indirect_reference_table-inl.h" +#include "base/systrace.h" #include "jni_internal.h" #include "nth_caller_visitor.h" #include "reference_table.h" @@ -261,6 +262,7 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) { } void IndirectReferenceTable::Trim() { + ScopedTrace trace(__PRETTY_FUNCTION__); const size_t top_index = Capacity(); auto* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table_[top_index]), kPageSize); uint8_t* release_end = table_mem_map_->End(); diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc index bfb1f9de06..baf4afea18 100644 --- a/runtime/interpreter/interpreter.cc +++ b/runtime/interpreter/interpreter.cc @@ -240,20 +240,10 @@ static std::ostream& operator<<(std::ostream& os, const InterpreterImplKind& rhs return os; } -#if !defined(__clang__) -#if (defined(__arm__) || defined(__i386__) || defined(__aarch64__) || defined(__x86_64__) || (defined(__mips__) && !defined(__LP64__))) -// TODO: remove when all targets implemented. static constexpr InterpreterImplKind kInterpreterImplKind = kMterpImplKind; -#else -static constexpr InterpreterImplKind kInterpreterImplKind = kComputedGotoImplKind; -#endif -#else + +#if defined(__clang__) // Clang 3.4 fails to build the goto interpreter implementation. -#if (defined(__arm__) || defined(__i386__) || defined(__aarch64__) || defined(__x86_64__) || (defined(__mips__) && !defined(__LP64__))) -static constexpr InterpreterImplKind kInterpreterImplKind = kMterpImplKind; -#else -static constexpr InterpreterImplKind kInterpreterImplKind = kSwitchImplKind; -#endif template<bool do_access_check, bool transaction_active> JValue ExecuteGotoImpl(Thread*, const DexFile::CodeItem*, ShadowFrame&, JValue) { LOG(FATAL) << "UNREACHABLE"; @@ -295,9 +285,7 @@ static inline JValue Execute(Thread* self, const DexFile::CodeItem* code_item, } jit::Jit* jit = Runtime::Current()->GetJit(); - if (UNLIKELY(jit != nullptr && - jit->JitAtFirstUse() && - jit->GetCodeCache()->ContainsMethod(method))) { + if (jit != nullptr && jit->CanInvokeCompiledCode(method)) { JValue result; // Pop the shadow frame before calling into compiled code. @@ -327,12 +315,8 @@ static inline JValue Execute(Thread* self, const DexFile::CodeItem* code_item, while (true) { // Mterp does not support all instrumentation/debugging. if (MterpShouldSwitchInterpreters()) { -#if !defined(__clang__) - return ExecuteGotoImpl<false, false>(self, code_item, shadow_frame, result_register); -#else return ExecuteSwitchImpl<false, false>(self, code_item, shadow_frame, result_register, false); -#endif } bool returned = ExecuteMterpImpl(self, code_item, &shadow_frame, &result_register); if (returned) { diff --git a/runtime/interpreter/mterp/arm/binopLit8.S b/runtime/interpreter/mterp/arm/binopLit8.S index ec0b3c445d..b8f0d925cd 100644 --- a/runtime/interpreter/mterp/arm/binopLit8.S +++ b/runtime/interpreter/mterp/arm/binopLit8.S @@ -13,7 +13,7 @@ * shl-int/lit8, shr-int/lit8, ushr-int/lit8 */ /* binop/lit8 vAA, vBB, #+CC */ - FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC + FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC) mov r9, rINST, lsr #8 @ r9<- AA and r2, r3, #255 @ r2<- BB GET_VREG r0, r2 @ r0<- vBB diff --git a/runtime/interpreter/mterp/arm/binopWide.S b/runtime/interpreter/mterp/arm/binopWide.S index 1d511ecfb0..4d880015c8 100644 --- a/runtime/interpreter/mterp/arm/binopWide.S +++ b/runtime/interpreter/mterp/arm/binopWide.S @@ -19,9 +19,9 @@ mov rINST, rINST, lsr #8 @ rINST<- AA and r2, r0, #255 @ r2<- BB mov r3, r0, lsr #8 @ r3<- CC - add r9, rFP, rINST, lsl #2 @ r9<- &fp[AA] - add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] - add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA] + VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC] ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 .if $chkzero diff --git a/runtime/interpreter/mterp/arm/binopWide2addr.S b/runtime/interpreter/mterp/arm/binopWide2addr.S index 81db48bade..bb16335c34 100644 --- a/runtime/interpreter/mterp/arm/binopWide2addr.S +++ b/runtime/interpreter/mterp/arm/binopWide2addr.S @@ -16,8 +16,8 @@ /* binop/2addr vA, vB */ mov r1, rINST, lsr #12 @ r1<- B ubfx rINST, rINST, #8, #4 @ rINST<- A - add r1, rFP, r1, lsl #2 @ r1<- &fp[B] - add r9, rFP, rINST, lsl #2 @ r9<- &fp[A] + VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A] ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 .if $chkzero diff --git a/runtime/interpreter/mterp/arm/entry.S b/runtime/interpreter/mterp/arm/entry.S index 4c5ffc5b4e..981c03659f 100644 --- a/runtime/interpreter/mterp/arm/entry.S +++ b/runtime/interpreter/mterp/arm/entry.S @@ -47,8 +47,8 @@ ExecuteMterpImpl: /* set up "named" registers */ mov rSELF, r0 ldr r0, [r2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET] - add rFP, r2, #SHADOWFRAME_VREGS_OFFSET @ point to insns[] (i.e. - the dalivk byte code). - add rREFS, rFP, r0, lsl #2 @ point to reference array in shadow frame + add rFP, r2, #SHADOWFRAME_VREGS_OFFSET @ point to vregs. + VREG_INDEX_TO_ADDR rREFS, r0 @ point to reference array in shadow frame ldr r0, [r2, #SHADOWFRAME_DEX_PC_OFFSET] @ Get starting dex_pc. add rPC, r1, #CODEITEM_INSNS_OFFSET @ Point to base of insns[] add rPC, rPC, r0, lsl #1 @ Create direct pointer to 1st dex opcode diff --git a/runtime/interpreter/mterp/arm/fbinop2addr.S b/runtime/interpreter/mterp/arm/fbinop2addr.S index b052a29a88..53c87a08f3 100644 --- a/runtime/interpreter/mterp/arm/fbinop2addr.S +++ b/runtime/interpreter/mterp/arm/fbinop2addr.S @@ -7,14 +7,12 @@ */ /* binop/2addr vA, vB */ mov r3, rINST, lsr #12 @ r3<- B - mov r9, rINST, lsr #8 @ r9<- A+ + ubfx r9, rINST, #8, #4 @ r9<- A VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB - and r9, r9, #15 @ r9<- A - flds s1, [r3] @ s1<- vB VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA + flds s1, [r3] @ s1<- vB FETCH_ADVANCE_INST 1 @ advance rPC, load rINST flds s0, [r9] @ s0<- vA - $instr @ s2<- op GET_INST_OPCODE ip @ extract opcode from rINST fsts s2, [r9] @ vAA<- s2 diff --git a/runtime/interpreter/mterp/arm/fbinopWide2addr.S b/runtime/interpreter/mterp/arm/fbinopWide2addr.S index 4e7401dae3..9766e2c0c4 100644 --- a/runtime/interpreter/mterp/arm/fbinopWide2addr.S +++ b/runtime/interpreter/mterp/arm/fbinopWide2addr.S @@ -8,11 +8,10 @@ */ /* binop/2addr vA, vB */ mov r3, rINST, lsr #12 @ r3<- B - mov r9, rINST, lsr #8 @ r9<- A+ + ubfx r9, rINST, #8, #4 @ r9<- A VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB - and r9, r9, #15 @ r9<- A - fldd d1, [r3] @ d1<- vB CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs + fldd d1, [r3] @ d1<- vB VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA FETCH_ADVANCE_INST 1 @ advance rPC, load rINST fldd d0, [r9] @ d0<- vA diff --git a/runtime/interpreter/mterp/arm/funop.S b/runtime/interpreter/mterp/arm/funop.S index d7a0859cdc..1b8bb8bac6 100644 --- a/runtime/interpreter/mterp/arm/funop.S +++ b/runtime/interpreter/mterp/arm/funop.S @@ -6,11 +6,10 @@ */ /* unop vA, vB */ mov r3, rINST, lsr #12 @ r3<- B - mov r9, rINST, lsr #8 @ r9<- A+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB flds s0, [r3] @ s0<- vB + ubfx r9, rINST, #8, #4 @ r9<- A FETCH_ADVANCE_INST 1 @ advance rPC, load rINST - and r9, r9, #15 @ r9<- A $instr @ s1<- op GET_INST_OPCODE ip @ extract opcode from rINST VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA diff --git a/runtime/interpreter/mterp/arm/funopNarrower.S b/runtime/interpreter/mterp/arm/funopNarrower.S index 9daec28556..b9f758ba86 100644 --- a/runtime/interpreter/mterp/arm/funopNarrower.S +++ b/runtime/interpreter/mterp/arm/funopNarrower.S @@ -6,11 +6,10 @@ */ /* unop vA, vB */ mov r3, rINST, lsr #12 @ r3<- B - mov r9, rINST, lsr #8 @ r9<- A+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB fldd d0, [r3] @ d0<- vB + ubfx r9, rINST, #8, #4 @ r9<- A FETCH_ADVANCE_INST 1 @ advance rPC, load rINST - and r9, r9, #15 @ r9<- A $instr @ s0<- op GET_INST_OPCODE ip @ extract opcode from rINST VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA diff --git a/runtime/interpreter/mterp/arm/funopWider.S b/runtime/interpreter/mterp/arm/funopWider.S index 450ba3a157..854cdc9b66 100644 --- a/runtime/interpreter/mterp/arm/funopWider.S +++ b/runtime/interpreter/mterp/arm/funopWider.S @@ -6,11 +6,10 @@ */ /* unop vA, vB */ mov r3, rINST, lsr #12 @ r3<- B - mov r9, rINST, lsr #8 @ r9<- A+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB flds s0, [r3] @ s0<- vB + ubfx r9, rINST, #8, #4 @ r9<- A FETCH_ADVANCE_INST 1 @ advance rPC, load rINST - and r9, r9, #15 @ r9<- A $instr @ d0<- op CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs GET_INST_OPCODE ip @ extract opcode from rINST diff --git a/runtime/interpreter/mterp/arm/op_aget_wide.S b/runtime/interpreter/mterp/arm/op_aget_wide.S index e1430b44f2..853a7a4e79 100644 --- a/runtime/interpreter/mterp/arm/op_aget_wide.S +++ b/runtime/interpreter/mterp/arm/op_aget_wide.S @@ -19,7 +19,7 @@ bcs common_errArrayIndex @ index >= length, bail FETCH_ADVANCE_INST 2 @ advance rPC, load rINST ldrd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC] - add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] + VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA] GET_INST_OPCODE ip @ extract opcode from rINST stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3 GOTO_OPCODE ip @ jump to next instruction diff --git a/runtime/interpreter/mterp/arm/op_aput_wide.S b/runtime/interpreter/mterp/arm/op_aput_wide.S index 49839d1b24..005750752f 100644 --- a/runtime/interpreter/mterp/arm/op_aput_wide.S +++ b/runtime/interpreter/mterp/arm/op_aput_wide.S @@ -15,7 +15,7 @@ ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width cmp r1, r3 @ compare unsigned index, length - add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] + VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA] bcs common_errArrayIndex @ index >= length, bail FETCH_ADVANCE_INST 2 @ advance rPC, load rINST ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1 diff --git a/runtime/interpreter/mterp/arm/op_cmp_long.S b/runtime/interpreter/mterp/arm/op_cmp_long.S index 2b4c0ea5bb..e57b19c5cc 100644 --- a/runtime/interpreter/mterp/arm/op_cmp_long.S +++ b/runtime/interpreter/mterp/arm/op_cmp_long.S @@ -23,8 +23,8 @@ mov r9, rINST, lsr #8 @ r9<- AA and r2, r0, #255 @ r2<- BB mov r3, r0, lsr #8 @ r3<- CC - add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] - add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] + VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC] ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 cmp r1, r3 @ compare (vBB+1, vCC+1) diff --git a/runtime/interpreter/mterp/arm/op_cmpg_double.S b/runtime/interpreter/mterp/arm/op_cmpg_double.S index 4b05c44beb..602a4b1bfd 100644 --- a/runtime/interpreter/mterp/arm/op_cmpg_double.S +++ b/runtime/interpreter/mterp/arm/op_cmpg_double.S @@ -23,7 +23,7 @@ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC fldd d0, [r2] @ d0<- vBB fldd d1, [r3] @ d1<- vCC - fcmped d0, d1 @ compare (vBB, vCC) + vcmpe.f64 d0, d1 @ compare (vBB, vCC) FETCH_ADVANCE_INST 2 @ advance rPC, load rINST mov r0, #1 @ r0<- 1 (default) GET_INST_OPCODE ip @ extract opcode from rINST diff --git a/runtime/interpreter/mterp/arm/op_cmpg_float.S b/runtime/interpreter/mterp/arm/op_cmpg_float.S index d5d2df2ef5..965091f82d 100644 --- a/runtime/interpreter/mterp/arm/op_cmpg_float.S +++ b/runtime/interpreter/mterp/arm/op_cmpg_float.S @@ -23,7 +23,7 @@ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC flds s0, [r2] @ s0<- vBB flds s1, [r3] @ s1<- vCC - fcmpes s0, s1 @ compare (vBB, vCC) + vcmpe.f32 s0, s1 @ compare (vBB, vCC) FETCH_ADVANCE_INST 2 @ advance rPC, load rINST mov r0, #1 @ r0<- 1 (default) GET_INST_OPCODE ip @ extract opcode from rINST diff --git a/runtime/interpreter/mterp/arm/op_cmpl_double.S b/runtime/interpreter/mterp/arm/op_cmpl_double.S index 6ee53b301e..8a5e509ee8 100644 --- a/runtime/interpreter/mterp/arm/op_cmpl_double.S +++ b/runtime/interpreter/mterp/arm/op_cmpl_double.S @@ -23,7 +23,7 @@ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC fldd d0, [r2] @ d0<- vBB fldd d1, [r3] @ d1<- vCC - fcmped d0, d1 @ compare (vBB, vCC) + vcmpe.f64 d0, d1 @ compare (vBB, vCC) FETCH_ADVANCE_INST 2 @ advance rPC, load rINST mvn r0, #0 @ r0<- -1 (default) GET_INST_OPCODE ip @ extract opcode from rINST diff --git a/runtime/interpreter/mterp/arm/op_cmpl_float.S b/runtime/interpreter/mterp/arm/op_cmpl_float.S index 64535b68ae..9df0c2c171 100644 --- a/runtime/interpreter/mterp/arm/op_cmpl_float.S +++ b/runtime/interpreter/mterp/arm/op_cmpl_float.S @@ -23,7 +23,7 @@ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC flds s0, [r2] @ s0<- vBB flds s1, [r3] @ s1<- vCC - fcmpes s0, s1 @ compare (vBB, vCC) + vcmpe.f32 s0, s1 @ compare (vBB, vCC) FETCH_ADVANCE_INST 2 @ advance rPC, load rINST mvn r0, #0 @ r0<- -1 (default) GET_INST_OPCODE ip @ extract opcode from rINST diff --git a/runtime/interpreter/mterp/arm/op_const.S b/runtime/interpreter/mterp/arm/op_const.S index de3e3c3c88..39890a085a 100644 --- a/runtime/interpreter/mterp/arm/op_const.S +++ b/runtime/interpreter/mterp/arm/op_const.S @@ -1,7 +1,7 @@ /* const vAA, #+BBBBbbbb */ mov r3, rINST, lsr #8 @ r3<- AA - FETCH r0, 1 @ r0<- bbbb (low - FETCH r1, 2 @ r1<- BBBB (high + FETCH r0, 1 @ r0<- bbbb (low) + FETCH r1, 2 @ r1<- BBBB (high) FETCH_ADVANCE_INST 3 @ advance rPC, load rINST orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb GET_INST_OPCODE ip @ extract opcode from rINST diff --git a/runtime/interpreter/mterp/arm/op_const_16.S b/runtime/interpreter/mterp/arm/op_const_16.S index 59c6dac10a..a30cf3a0db 100644 --- a/runtime/interpreter/mterp/arm/op_const_16.S +++ b/runtime/interpreter/mterp/arm/op_const_16.S @@ -1,5 +1,5 @@ /* const/16 vAA, #+BBBB */ - FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended + FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended) mov r3, rINST, lsr #8 @ r3<- AA FETCH_ADVANCE_INST 2 @ advance rPC, load rINST SET_VREG r0, r3 @ vAA<- r0 diff --git a/runtime/interpreter/mterp/arm/op_const_4.S b/runtime/interpreter/mterp/arm/op_const_4.S index c177bb9eb3..c97b0e91f5 100644 --- a/runtime/interpreter/mterp/arm/op_const_4.S +++ b/runtime/interpreter/mterp/arm/op_const_4.S @@ -1,8 +1,7 @@ /* const/4 vA, #+B */ - mov r1, rINST, lsl #16 @ r1<- Bxxx0000 + sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended) ubfx r0, rINST, #8, #4 @ r0<- A FETCH_ADVANCE_INST 1 @ advance rPC, load rINST - mov r1, r1, asr #28 @ r1<- sssssssB (sign-extended) GET_INST_OPCODE ip @ ip<- opcode from rINST SET_VREG r1, r0 @ fp[A]<- r1 GOTO_OPCODE ip @ execute next instruction diff --git a/runtime/interpreter/mterp/arm/op_const_high16.S b/runtime/interpreter/mterp/arm/op_const_high16.S index 460d546f3b..536276d52d 100644 --- a/runtime/interpreter/mterp/arm/op_const_high16.S +++ b/runtime/interpreter/mterp/arm/op_const_high16.S @@ -1,5 +1,5 @@ /* const/high16 vAA, #+BBBB0000 */ - FETCH r0, 1 @ r0<- 0000BBBB (zero-extended + FETCH r0, 1 @ r0<- 0000BBBB (zero-extended) mov r3, rINST, lsr #8 @ r3<- AA mov r0, r0, lsl #16 @ r0<- BBBB0000 FETCH_ADVANCE_INST 2 @ advance rPC, load rINST diff --git a/runtime/interpreter/mterp/arm/op_const_string_jumbo.S b/runtime/interpreter/mterp/arm/op_const_string_jumbo.S index 1a3d0b2542..1255c0768d 100644 --- a/runtime/interpreter/mterp/arm/op_const_string_jumbo.S +++ b/runtime/interpreter/mterp/arm/op_const_string_jumbo.S @@ -1,7 +1,7 @@ /* const/string vAA, String@BBBBBBBB */ EXPORT_PC - FETCH r0, 1 @ r0<- bbbb (low - FETCH r2, 2 @ r2<- BBBB (high + FETCH r0, 1 @ r0<- bbbb (low) + FETCH r2, 2 @ r2<- BBBB (high) mov r1, rINST, lsr #8 @ r1<- AA orr r0, r0, r2, lsl #16 @ r1<- BBBBbbbb add r2, rFP, #OFF_FP_SHADOWFRAME diff --git a/runtime/interpreter/mterp/arm/op_const_wide.S b/runtime/interpreter/mterp/arm/op_const_wide.S index 12394b6cbe..8310a4c129 100644 --- a/runtime/interpreter/mterp/arm/op_const_wide.S +++ b/runtime/interpreter/mterp/arm/op_const_wide.S @@ -8,7 +8,7 @@ orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word) CLEAR_SHADOW_PAIR r9, r2, r3 @ Zero out the shadow regs FETCH_ADVANCE_INST 5 @ advance rPC, load rINST - add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] + VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA] GET_INST_OPCODE ip @ extract opcode from rINST stmia r9, {r0-r1} @ vAA<- r0/r1 GOTO_OPCODE ip @ jump to next instruction diff --git a/runtime/interpreter/mterp/arm/op_const_wide_16.S b/runtime/interpreter/mterp/arm/op_const_wide_16.S index 3811d8641b..28abb512f0 100644 --- a/runtime/interpreter/mterp/arm/op_const_wide_16.S +++ b/runtime/interpreter/mterp/arm/op_const_wide_16.S @@ -1,10 +1,10 @@ /* const-wide/16 vAA, #+BBBB */ - FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended + FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended) mov r3, rINST, lsr #8 @ r3<- AA mov r1, r0, asr #31 @ r1<- ssssssss FETCH_ADVANCE_INST 2 @ advance rPC, load rINST CLEAR_SHADOW_PAIR r3, r2, lr @ Zero out the shadow regs - add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA] GET_INST_OPCODE ip @ extract opcode from rINST stmia r3, {r0-r1} @ vAA<- r0/r1 GOTO_OPCODE ip @ jump to next instruction diff --git a/runtime/interpreter/mterp/arm/op_const_wide_32.S b/runtime/interpreter/mterp/arm/op_const_wide_32.S index 0b6f1cc384..c10bb0461a 100644 --- a/runtime/interpreter/mterp/arm/op_const_wide_32.S +++ b/runtime/interpreter/mterp/arm/op_const_wide_32.S @@ -5,7 +5,7 @@ FETCH_ADVANCE_INST 3 @ advance rPC, load rINST orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb CLEAR_SHADOW_PAIR r3, r2, lr @ Zero out the shadow regs - add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA] mov r1, r0, asr #31 @ r1<- ssssssss GET_INST_OPCODE ip @ extract opcode from rINST stmia r3, {r0-r1} @ vAA<- r0/r1 diff --git a/runtime/interpreter/mterp/arm/op_const_wide_high16.S b/runtime/interpreter/mterp/arm/op_const_wide_high16.S index b9796eb561..d7e38ecc20 100644 --- a/runtime/interpreter/mterp/arm/op_const_wide_high16.S +++ b/runtime/interpreter/mterp/arm/op_const_wide_high16.S @@ -5,7 +5,7 @@ mov r1, r1, lsl #16 @ r1<- BBBB0000 FETCH_ADVANCE_INST 2 @ advance rPC, load rINST CLEAR_SHADOW_PAIR r3, r0, r2 @ Zero shadow regs - add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA] GET_INST_OPCODE ip @ extract opcode from rINST stmia r3, {r0-r1} @ vAA<- r0/r1 GOTO_OPCODE ip @ jump to next instruction diff --git a/runtime/interpreter/mterp/arm/op_double_to_float.S b/runtime/interpreter/mterp/arm/op_double_to_float.S index e327000409..98fdfbc64e 100644 --- a/runtime/interpreter/mterp/arm/op_double_to_float.S +++ b/runtime/interpreter/mterp/arm/op_double_to_float.S @@ -1 +1 @@ -%include "arm/funopNarrower.S" {"instr":"fcvtsd s0, d0"} +%include "arm/funopNarrower.S" {"instr":"vcvt.f32.f64 s0, d0"} diff --git a/runtime/interpreter/mterp/arm/op_float_to_double.S b/runtime/interpreter/mterp/arm/op_float_to_double.S index fb1892b6d0..b1e12bdc7a 100644 --- a/runtime/interpreter/mterp/arm/op_float_to_double.S +++ b/runtime/interpreter/mterp/arm/op_float_to_double.S @@ -1 +1 @@ -%include "arm/funopWider.S" {"instr":"fcvtds d0, s0"} +%include "arm/funopWider.S" {"instr":"vcvt.f64.f32 d0, s0"} diff --git a/runtime/interpreter/mterp/arm/op_float_to_long.S b/runtime/interpreter/mterp/arm/op_float_to_long.S index 24416d33d2..5c8680f133 100644 --- a/runtime/interpreter/mterp/arm/op_float_to_long.S +++ b/runtime/interpreter/mterp/arm/op_float_to_long.S @@ -17,7 +17,7 @@ f2l_doconv: cmp r0, #0 @ nonzero == yes mvnne r0, #0 @ return maxlong (7fffffff) mvnne r1, #0x80000000 - ldmnefd sp!, {r4, pc} + popne {r4, pc} mov r0, r4 @ recover arg mov r1, #0xdf000000 @ (float)minlong @@ -25,14 +25,14 @@ f2l_doconv: cmp r0, #0 @ nonzero == yes movne r0, #0 @ return minlong (80000000) movne r1, #0x80000000 - ldmnefd sp!, {r4, pc} + popne {r4, pc} mov r0, r4 @ recover arg mov r1, r4 bl __aeabi_fcmpeq @ is arg == self? cmp r0, #0 @ zero == no moveq r1, #0 @ return zero for NaN - ldmeqfd sp!, {r4, pc} + popeq {r4, pc} mov r0, r4 @ recover arg bl __aeabi_f2lz @ convert float to long diff --git a/runtime/interpreter/mterp/arm/op_iget_wide.S b/runtime/interpreter/mterp/arm/op_iget_wide.S index 859ffac038..e287d519ad 100644 --- a/runtime/interpreter/mterp/arm/op_iget_wide.S +++ b/runtime/interpreter/mterp/arm/op_iget_wide.S @@ -16,7 +16,7 @@ cmp r3, #0 bne MterpException @ bail out CLEAR_SHADOW_PAIR r2, ip, lr @ Zero out the shadow regs - add r3, rFP, r2, lsl #2 @ r3<- &fp[A] + VREG_INDEX_TO_ADDR r3, r2 @ r3<- &fp[A] stmia r3, {r0-r1} @ fp[A]<- r0/r1 ADVANCE 2 GET_INST_OPCODE ip @ extract opcode from rINST diff --git a/runtime/interpreter/mterp/arm/op_iget_wide_quick.S b/runtime/interpreter/mterp/arm/op_iget_wide_quick.S index 07f854adf4..5a7177d8f5 100644 --- a/runtime/interpreter/mterp/arm/op_iget_wide_quick.S +++ b/runtime/interpreter/mterp/arm/op_iget_wide_quick.S @@ -7,7 +7,7 @@ beq common_errNullObject @ object was null ldrd r0, [r3, ip] @ r0<- obj.field (64 bits, aligned) FETCH_ADVANCE_INST 2 @ advance rPC, load rINST - add r3, rFP, r2, lsl #2 @ r3<- &fp[A] + VREG_INDEX_TO_ADDR r3, r2 @ r3<- &fp[A] CLEAR_SHADOW_PAIR r2, ip, lr @ Zero out the shadow regs GET_INST_OPCODE ip @ extract opcode from rINST stmia r3, {r0-r1} @ fp[A]<- r0/r1 diff --git a/runtime/interpreter/mterp/arm/op_instance_of.S b/runtime/interpreter/mterp/arm/op_instance_of.S index d76f0b09fe..019929edf9 100644 --- a/runtime/interpreter/mterp/arm/op_instance_of.S +++ b/runtime/interpreter/mterp/arm/op_instance_of.S @@ -11,10 +11,9 @@ VREG_INDEX_TO_ADDR r1, r1 @ r1<- &object ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- method mov r3, rSELF @ r3<- self - mov r9, rINST, lsr #8 @ r9<- A+ - and r9, r9, #15 @ r9<- A bl MterpInstanceOf @ (index, &obj, method, self) ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET] + ubfx r9, rINST, #8, #4 @ r9<- A PREFETCH_INST 2 cmp r1, #0 @ exception pending? bne MterpException diff --git a/runtime/interpreter/mterp/arm/op_iput_wide.S b/runtime/interpreter/mterp/arm/op_iput_wide.S index 8bbd63efc9..3dda1877b5 100644 --- a/runtime/interpreter/mterp/arm/op_iput_wide.S +++ b/runtime/interpreter/mterp/arm/op_iput_wide.S @@ -5,7 +5,7 @@ mov r1, rINST, lsr #12 @ r1<- B GET_VREG r1, r1 @ r1<- fp[B], the object pointer ubfx r2, rINST, #8, #4 @ r2<- A - add r2, rFP, r2, lsl #2 @ r2<- &fp[A] + VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[A] ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer PREFETCH_INST 2 bl artSet64InstanceFromMterp diff --git a/runtime/interpreter/mterp/arm/op_iput_wide_quick.S b/runtime/interpreter/mterp/arm/op_iput_wide_quick.S index a2fc9e11ed..88e6ea102c 100644 --- a/runtime/interpreter/mterp/arm/op_iput_wide_quick.S +++ b/runtime/interpreter/mterp/arm/op_iput_wide_quick.S @@ -5,7 +5,7 @@ ubfx r0, rINST, #8, #4 @ r0<- A cmp r2, #0 @ check object for null beq common_errNullObject @ object was null - add r0, rFP, r0, lsl #2 @ r0<- &fp[A] + VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[A] ldmia r0, {r0-r1} @ r0/r1<- fp[A]/fp[A+1] FETCH_ADVANCE_INST 2 @ advance rPC, load rINST strd r0, [r2, r3] @ obj.field<- r0/r1 diff --git a/runtime/interpreter/mterp/arm/op_long_to_double.S b/runtime/interpreter/mterp/arm/op_long_to_double.S index 1d48a2acf2..cac12d48d4 100644 --- a/runtime/interpreter/mterp/arm/op_long_to_double.S +++ b/runtime/interpreter/mterp/arm/op_long_to_double.S @@ -8,8 +8,8 @@ */ mov r3, rINST, lsr #12 @ r3<- B ubfx r9, rINST, #8, #4 @ r9<- A - add r3, rFP, r3, lsl #2 @ r3<- &fp[B] - add r9, rFP, r9, lsl #2 @ r9<- &fp[A] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B] + VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A] vldr d0, [r3] @ d0<- vAA FETCH_ADVANCE_INST 1 @ advance rPC, load rINST diff --git a/runtime/interpreter/mterp/arm/op_move_result_wide.S b/runtime/interpreter/mterp/arm/op_move_result_wide.S index 1845ccf69f..87929eaeeb 100644 --- a/runtime/interpreter/mterp/arm/op_move_result_wide.S +++ b/runtime/interpreter/mterp/arm/op_move_result_wide.S @@ -1,7 +1,7 @@ /* move-result-wide vAA */ mov rINST, rINST, lsr #8 @ rINST<- AA ldr r3, [rFP, #OFF_FP_RESULT_REGISTER] - add r2, rFP, rINST, lsl #2 @ r2<- &fp[AA] + VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[AA] ldmia r3, {r0-r1} @ r0/r1<- retval.j CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs FETCH_ADVANCE_INST 1 @ advance rPC, load rINST diff --git a/runtime/interpreter/mterp/arm/op_move_wide.S b/runtime/interpreter/mterp/arm/op_move_wide.S index f5d156d732..ff353ea5d9 100644 --- a/runtime/interpreter/mterp/arm/op_move_wide.S +++ b/runtime/interpreter/mterp/arm/op_move_wide.S @@ -2,8 +2,8 @@ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ mov r3, rINST, lsr #12 @ r3<- B ubfx rINST, rINST, #8, #4 @ rINST<- A - add r3, rFP, r3, lsl #2 @ r3<- &fp[B] - add r2, rFP, rINST, lsl #2 @ r2<- &fp[A] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B] + VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[A] ldmia r3, {r0-r1} @ r0/r1<- fp[B] CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs FETCH_ADVANCE_INST 1 @ advance rPC, load rINST diff --git a/runtime/interpreter/mterp/arm/op_move_wide_16.S b/runtime/interpreter/mterp/arm/op_move_wide_16.S index 8a55c4b13b..9812b66e97 100644 --- a/runtime/interpreter/mterp/arm/op_move_wide_16.S +++ b/runtime/interpreter/mterp/arm/op_move_wide_16.S @@ -2,8 +2,8 @@ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ FETCH r3, 2 @ r3<- BBBB FETCH r2, 1 @ r2<- AAAA - add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] - add lr, rFP, r2, lsl #2 @ r2<- &fp[AAAA] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BBBB] + VREG_INDEX_TO_ADDR lr, r2 @ r2<- &fp[AAAA] ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] FETCH_ADVANCE_INST 3 @ advance rPC, load rINST CLEAR_SHADOW_PAIR r2, r3, ip @ Zero out the shadow regs diff --git a/runtime/interpreter/mterp/arm/op_move_wide_from16.S b/runtime/interpreter/mterp/arm/op_move_wide_from16.S index b65259db50..d2cc60ca9d 100644 --- a/runtime/interpreter/mterp/arm/op_move_wide_from16.S +++ b/runtime/interpreter/mterp/arm/op_move_wide_from16.S @@ -2,8 +2,8 @@ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ FETCH r3, 1 @ r3<- BBBB mov rINST, rINST, lsr #8 @ rINST<- AA - add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] - add r2, rFP, rINST, lsl #2 @ r2<- &fp[AA] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BBBB] + VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[AA] ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs FETCH_ADVANCE_INST 2 @ advance rPC, load rINST diff --git a/runtime/interpreter/mterp/arm/op_mul_long.S b/runtime/interpreter/mterp/arm/op_mul_long.S index 9e83778e2f..8f40f1976b 100644 --- a/runtime/interpreter/mterp/arm/op_mul_long.S +++ b/runtime/interpreter/mterp/arm/op_mul_long.S @@ -20,8 +20,8 @@ FETCH r0, 1 @ r0<- CCBB and r2, r0, #255 @ r2<- BB mov r3, r0, lsr #8 @ r3<- CC - add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] - add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] + VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC] ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 mul ip, r2, r1 @ ip<- ZxW @@ -29,7 +29,7 @@ mla r2, r0, r3, ip @ r2<- YxX + (ZxW) mov r0, rINST, lsr #8 @ r0<- AA add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) - add r0, rFP, r0, lsl #2 @ r0<- &fp[AA] + VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[AA] FETCH_ADVANCE_INST 2 @ advance rPC, load rINST GET_INST_OPCODE ip @ extract opcode from rINST stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 diff --git a/runtime/interpreter/mterp/arm/op_mul_long_2addr.S b/runtime/interpreter/mterp/arm/op_mul_long_2addr.S index 789dbd3025..7ef24c5142 100644 --- a/runtime/interpreter/mterp/arm/op_mul_long_2addr.S +++ b/runtime/interpreter/mterp/arm/op_mul_long_2addr.S @@ -9,8 +9,8 @@ /* mul-long/2addr vA, vB */ mov r1, rINST, lsr #12 @ r1<- B ubfx r9, rINST, #8, #4 @ r9<- A - add r1, rFP, r1, lsl #2 @ r1<- &fp[B] - add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A] + VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B] + VREG_INDEX_TO_ADDR rINST, r9 @ rINST<- &fp[A] ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1 mul ip, r2, r1 @ ip<- ZxW diff --git a/runtime/interpreter/mterp/arm/op_return_wide.S b/runtime/interpreter/mterp/arm/op_return_wide.S index cfab5301e5..ceae878fa4 100644 --- a/runtime/interpreter/mterp/arm/op_return_wide.S +++ b/runtime/interpreter/mterp/arm/op_return_wide.S @@ -9,6 +9,6 @@ ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) blne MterpSuspendCheck @ (self) mov r2, rINST, lsr #8 @ r2<- AA - add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] + VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[AA] ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1 b MterpReturn diff --git a/runtime/interpreter/mterp/arm/op_sget_wide.S b/runtime/interpreter/mterp/arm/op_sget_wide.S index 3a5090866a..4f2f89d6c3 100644 --- a/runtime/interpreter/mterp/arm/op_sget_wide.S +++ b/runtime/interpreter/mterp/arm/op_sget_wide.S @@ -12,7 +12,7 @@ bl artGet64StaticFromCode ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET] mov r9, rINST, lsr #8 @ r9<- AA - add lr, rFP, r9, lsl #2 @ r9<- &fp[AA] + VREG_INDEX_TO_ADDR lr, r9 @ r9<- &fp[AA] cmp r3, #0 @ Fail to resolve? bne MterpException @ bail out FETCH_ADVANCE_INST 2 @ advance rPC, load rINST diff --git a/runtime/interpreter/mterp/arm/op_shl_long.S b/runtime/interpreter/mterp/arm/op_shl_long.S index 12ea24883f..82ec6ed09f 100644 --- a/runtime/interpreter/mterp/arm/op_shl_long.S +++ b/runtime/interpreter/mterp/arm/op_shl_long.S @@ -9,12 +9,12 @@ mov r9, rINST, lsr #8 @ r9<- AA and r3, r0, #255 @ r3<- BB mov r0, r0, lsr #8 @ r0<- CC - add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB] GET_VREG r2, r0 @ r2<- vCC ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs and r2, r2, #63 @ r2<- r2 & 0x3f - add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] + VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA] mov r1, r1, asl r2 @ r1<- r1 << r2 rsb r3, r2, #32 @ r3<- 32 - r2 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) diff --git a/runtime/interpreter/mterp/arm/op_shl_long_2addr.S b/runtime/interpreter/mterp/arm/op_shl_long_2addr.S index 4799e77213..f361a7d29c 100644 --- a/runtime/interpreter/mterp/arm/op_shl_long_2addr.S +++ b/runtime/interpreter/mterp/arm/op_shl_long_2addr.S @@ -7,7 +7,7 @@ ubfx r9, rINST, #8, #4 @ r9<- A GET_VREG r2, r3 @ r2<- vB CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs - add r9, rFP, r9, lsl #2 @ r9<- &fp[A] + VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A] and r2, r2, #63 @ r2<- r2 & 0x3f ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 mov r1, r1, asl r2 @ r1<- r1 << r2 diff --git a/runtime/interpreter/mterp/arm/op_shr_long.S b/runtime/interpreter/mterp/arm/op_shr_long.S index 88a13d6072..a0afe5b040 100644 --- a/runtime/interpreter/mterp/arm/op_shr_long.S +++ b/runtime/interpreter/mterp/arm/op_shr_long.S @@ -9,12 +9,12 @@ mov r9, rINST, lsr #8 @ r9<- AA and r3, r0, #255 @ r3<- BB mov r0, r0, lsr #8 @ r0<- CC - add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB] GET_VREG r2, r0 @ r2<- vCC ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs and r2, r2, #63 @ r0<- r0 & 0x3f - add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] + VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA] mov r0, r0, lsr r2 @ r0<- r2 >> r2 rsb r3, r2, #32 @ r3<- 32 - r2 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) diff --git a/runtime/interpreter/mterp/arm/op_shr_long_2addr.S b/runtime/interpreter/mterp/arm/op_shr_long_2addr.S index 78d8bb7dba..976110efd4 100644 --- a/runtime/interpreter/mterp/arm/op_shr_long_2addr.S +++ b/runtime/interpreter/mterp/arm/op_shr_long_2addr.S @@ -7,7 +7,7 @@ ubfx r9, rINST, #8, #4 @ r9<- A GET_VREG r2, r3 @ r2<- vB CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs - add r9, rFP, r9, lsl #2 @ r9<- &fp[A] + VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A] and r2, r2, #63 @ r2<- r2 & 0x3f ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 mov r0, r0, lsr r2 @ r0<- r2 >> r2 diff --git a/runtime/interpreter/mterp/arm/op_sput_wide.S b/runtime/interpreter/mterp/arm/op_sput_wide.S index adbcffa5a9..8d8ed8c4a2 100644 --- a/runtime/interpreter/mterp/arm/op_sput_wide.S +++ b/runtime/interpreter/mterp/arm/op_sput_wide.S @@ -8,7 +8,7 @@ FETCH r0, 1 @ r0<- field ref BBBB ldr r1, [rFP, #OFF_FP_METHOD] mov r2, rINST, lsr #8 @ r3<- AA - add r2, rFP, r2, lsl #2 + VREG_INDEX_TO_ADDR r2, r2 mov r3, rSELF PREFETCH_INST 2 @ Get next inst, but don't advance rPC bl artSet64IndirectStaticFromMterp diff --git a/runtime/interpreter/mterp/arm/op_ushr_long.S b/runtime/interpreter/mterp/arm/op_ushr_long.S index f98ec639fa..c817bc9fb9 100644 --- a/runtime/interpreter/mterp/arm/op_ushr_long.S +++ b/runtime/interpreter/mterp/arm/op_ushr_long.S @@ -9,12 +9,12 @@ mov r9, rINST, lsr #8 @ r9<- AA and r3, r0, #255 @ r3<- BB mov r0, r0, lsr #8 @ r0<- CC - add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB] GET_VREG r2, r0 @ r2<- vCC ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs and r2, r2, #63 @ r0<- r0 & 0x3f - add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] + VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA] mov r0, r0, lsr r2 @ r0<- r2 >> r2 rsb r3, r2, #32 @ r3<- 32 - r2 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) diff --git a/runtime/interpreter/mterp/arm/op_ushr_long_2addr.S b/runtime/interpreter/mterp/arm/op_ushr_long_2addr.S index 840283dd58..2735f8733a 100644 --- a/runtime/interpreter/mterp/arm/op_ushr_long_2addr.S +++ b/runtime/interpreter/mterp/arm/op_ushr_long_2addr.S @@ -7,7 +7,7 @@ ubfx r9, rINST, #8, #4 @ r9<- A GET_VREG r2, r3 @ r2<- vB CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs - add r9, rFP, r9, lsl #2 @ r9<- &fp[A] + VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A] and r2, r2, #63 @ r2<- r2 & 0x3f ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 mov r0, r0, lsr r2 @ r0<- r2 >> r2 diff --git a/runtime/interpreter/mterp/arm/unopNarrower.S b/runtime/interpreter/mterp/arm/unopNarrower.S index a5fc02797d..2d0453aeb1 100644 --- a/runtime/interpreter/mterp/arm/unopNarrower.S +++ b/runtime/interpreter/mterp/arm/unopNarrower.S @@ -12,7 +12,7 @@ /* unop vA, vB */ mov r3, rINST, lsr #12 @ r3<- B ubfx r9, rINST, #8, #4 @ r9<- A - add r3, rFP, r3, lsl #2 @ r3<- &fp[B] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B] ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 FETCH_ADVANCE_INST 1 @ advance rPC, load rINST $preinstr @ optional op; may set condition codes diff --git a/runtime/interpreter/mterp/arm/unopWide.S b/runtime/interpreter/mterp/arm/unopWide.S index a07423468d..cd5defd616 100644 --- a/runtime/interpreter/mterp/arm/unopWide.S +++ b/runtime/interpreter/mterp/arm/unopWide.S @@ -9,8 +9,8 @@ /* unop vA, vB */ mov r3, rINST, lsr #12 @ r3<- B ubfx rINST, rINST, #8, #4 @ rINST<- A - add r3, rFP, r3, lsl #2 @ r3<- &fp[B] - add r9, rFP, rINST, lsl #2 @ r9<- &fp[A] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A] ldmia r3, {r0-r1} @ r0/r1<- vAA CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs FETCH_ADVANCE_INST 1 @ advance rPC, load rINST diff --git a/runtime/interpreter/mterp/arm/unopWider.S b/runtime/interpreter/mterp/arm/unopWider.S index 23b6b9d2f5..9d504899b8 100644 --- a/runtime/interpreter/mterp/arm/unopWider.S +++ b/runtime/interpreter/mterp/arm/unopWider.S @@ -10,7 +10,7 @@ mov r3, rINST, lsr #12 @ r3<- B ubfx rINST, rINST, #8, #4 @ rINST<- A GET_VREG r0, r3 @ r0<- vB - add r9, rFP, rINST, lsl #2 @ r9<- &fp[A] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A] $preinstr @ optional op; may set condition codes CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs FETCH_ADVANCE_INST 1 @ advance rPC, load rINST diff --git a/runtime/interpreter/mterp/arm64/entry.S b/runtime/interpreter/mterp/arm64/entry.S index f9073ab5d9..23e656e826 100644 --- a/runtime/interpreter/mterp/arm64/entry.S +++ b/runtime/interpreter/mterp/arm64/entry.S @@ -46,7 +46,7 @@ ExecuteMterpImpl: /* set up "named" registers */ mov xSELF, x0 ldr w0, [x2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET] - add xFP, x2, #SHADOWFRAME_VREGS_OFFSET // point to insns[] (i.e. - the dalivk byte code). + add xFP, x2, #SHADOWFRAME_VREGS_OFFSET // point to vregs. add xREFS, xFP, w0, lsl #2 // point to reference array in shadow frame ldr w0, [x2, #SHADOWFRAME_DEX_PC_OFFSET] // Get starting dex_pc. add xPC, x1, #CODEITEM_INSNS_OFFSET // Point to base of insns[] diff --git a/runtime/interpreter/mterp/arm64/header.S b/runtime/interpreter/mterp/arm64/header.S index 722375002b..7101ba972c 100644 --- a/runtime/interpreter/mterp/arm64/header.S +++ b/runtime/interpreter/mterp/arm64/header.S @@ -272,7 +272,7 @@ codes. * Convert a virtual register index into an address. */ .macro VREG_INDEX_TO_ADDR reg, vreg - add \reg, xFP, \vreg, lsl #2 /* WARNING/FIXME: handle shadow frame vreg zero if store */ + add \reg, xFP, \vreg, lsl #2 /* WARNING: handle shadow frame vreg zero if store */ .endm /* diff --git a/runtime/interpreter/mterp/arm64/op_iput_wide.S b/runtime/interpreter/mterp/arm64/op_iput_wide.S index 4ce95251f6..e1ab1271f5 100644 --- a/runtime/interpreter/mterp/arm64/op_iput_wide.S +++ b/runtime/interpreter/mterp/arm64/op_iput_wide.S @@ -5,7 +5,7 @@ lsr w1, wINST, #12 // w1<- B GET_VREG w1, w1 // w1<- fp[B], the object pointer ubfx w2, wINST, #8, #4 // w2<- A - add x2, xFP, x2, lsl #2 // w2<- &fp[A] + VREG_INDEX_TO_ADDR x2, x2 // w2<- &fp[A] ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer PREFETCH_INST 2 bl artSet64InstanceFromMterp diff --git a/runtime/interpreter/mterp/arm64/op_sput_wide.S b/runtime/interpreter/mterp/arm64/op_sput_wide.S index 1d034ecf2f..a79b1a6172 100644 --- a/runtime/interpreter/mterp/arm64/op_sput_wide.S +++ b/runtime/interpreter/mterp/arm64/op_sput_wide.S @@ -8,7 +8,7 @@ FETCH w0, 1 // w0<- field ref BBBB ldr x1, [xFP, #OFF_FP_METHOD] lsr w2, wINST, #8 // w3<- AA - add x2, xFP, w2, lsl #2 + VREG_INDEX_TO_ADDR x2, w2 mov x3, xSELF PREFETCH_INST 2 // Get next inst, but don't advance rPC bl artSet64IndirectStaticFromMterp diff --git a/runtime/interpreter/mterp/config_mips64 b/runtime/interpreter/mterp/config_mips64 index f804ce5566..c40c007eec 100644 --- a/runtime/interpreter/mterp/config_mips64 +++ b/runtime/interpreter/mterp/config_mips64 @@ -36,262 +36,262 @@ op-start mips64 # (override example:) op OP_SUB_FLOAT_2ADDR arm-vfp # (fallback example:) op OP_SUB_FLOAT_2ADDR FALLBACK - op op_nop FALLBACK - op op_move FALLBACK - op op_move_from16 FALLBACK - op op_move_16 FALLBACK - op op_move_wide FALLBACK - op op_move_wide_from16 FALLBACK - op op_move_wide_16 FALLBACK - op op_move_object FALLBACK - op op_move_object_from16 FALLBACK - op op_move_object_16 FALLBACK - op op_move_result FALLBACK - op op_move_result_wide FALLBACK - op op_move_result_object FALLBACK - op op_move_exception FALLBACK - op op_return_void FALLBACK - op op_return FALLBACK - op op_return_wide FALLBACK - op op_return_object FALLBACK - op op_const_4 FALLBACK - op op_const_16 FALLBACK - op op_const FALLBACK - op op_const_high16 FALLBACK - op op_const_wide_16 FALLBACK - op op_const_wide_32 FALLBACK - op op_const_wide FALLBACK - op op_const_wide_high16 FALLBACK - op op_const_string FALLBACK - op op_const_string_jumbo FALLBACK - op op_const_class FALLBACK - op op_monitor_enter FALLBACK - op op_monitor_exit FALLBACK - op op_check_cast FALLBACK - op op_instance_of FALLBACK - op op_array_length FALLBACK - op op_new_instance FALLBACK - op op_new_array FALLBACK - op op_filled_new_array FALLBACK - op op_filled_new_array_range FALLBACK - op op_fill_array_data FALLBACK - op op_throw FALLBACK - op op_goto FALLBACK - op op_goto_16 FALLBACK - op op_goto_32 FALLBACK - op op_packed_switch FALLBACK - op op_sparse_switch FALLBACK - op op_cmpl_float FALLBACK - op op_cmpg_float FALLBACK - op op_cmpl_double FALLBACK - op op_cmpg_double FALLBACK - op op_cmp_long FALLBACK - op op_if_eq FALLBACK - op op_if_ne FALLBACK - op op_if_lt FALLBACK - op op_if_ge FALLBACK - op op_if_gt FALLBACK - op op_if_le FALLBACK - op op_if_eqz FALLBACK - op op_if_nez FALLBACK - op op_if_ltz FALLBACK - op op_if_gez FALLBACK - op op_if_gtz FALLBACK - op op_if_lez FALLBACK - op_unused_3e FALLBACK - op_unused_3f FALLBACK - op_unused_40 FALLBACK - op_unused_41 FALLBACK - op_unused_42 FALLBACK - op_unused_43 FALLBACK - op op_aget FALLBACK - op op_aget_wide FALLBACK - op op_aget_object FALLBACK - op op_aget_boolean FALLBACK - op op_aget_byte FALLBACK - op op_aget_char FALLBACK - op op_aget_short FALLBACK - op op_aput FALLBACK - op op_aput_wide FALLBACK - op op_aput_object FALLBACK - op op_aput_boolean FALLBACK - op op_aput_byte FALLBACK - op op_aput_char FALLBACK - op op_aput_short FALLBACK - op op_iget FALLBACK - op op_iget_wide FALLBACK - op op_iget_object FALLBACK - op op_iget_boolean FALLBACK - op op_iget_byte FALLBACK - op op_iget_char FALLBACK - op op_iget_short FALLBACK - op op_iput FALLBACK - op op_iput_wide FALLBACK - op op_iput_object FALLBACK - op op_iput_boolean FALLBACK - op op_iput_byte FALLBACK - op op_iput_char FALLBACK - op op_iput_short FALLBACK - op op_sget FALLBACK - op op_sget_wide FALLBACK - op op_sget_object FALLBACK - op op_sget_boolean FALLBACK - op op_sget_byte FALLBACK - op op_sget_char FALLBACK - op op_sget_short FALLBACK - op op_sput FALLBACK - op op_sput_wide FALLBACK - op op_sput_object FALLBACK - op op_sput_boolean FALLBACK - op op_sput_byte FALLBACK - op op_sput_char FALLBACK - op op_sput_short FALLBACK - op op_invoke_virtual FALLBACK - op op_invoke_super FALLBACK - op op_invoke_direct FALLBACK - op op_invoke_static FALLBACK - op op_invoke_interface FALLBACK - op op_return_void_no_barrier FALLBACK - op op_invoke_virtual_range FALLBACK - op op_invoke_super_range FALLBACK - op op_invoke_direct_range FALLBACK - op op_invoke_static_range FALLBACK - op op_invoke_interface_range FALLBACK - op_unused_79 FALLBACK - op_unused_7a FALLBACK - op op_neg_int FALLBACK - op op_not_int FALLBACK - op op_neg_long FALLBACK - op op_not_long FALLBACK - op op_neg_float FALLBACK - op op_neg_double FALLBACK - op op_int_to_long FALLBACK - op op_int_to_float FALLBACK - op op_int_to_double FALLBACK - op op_long_to_int FALLBACK - op op_long_to_float FALLBACK - op op_long_to_double FALLBACK - op op_float_to_int FALLBACK - op op_float_to_long FALLBACK - op op_float_to_double FALLBACK - op op_double_to_int FALLBACK - op op_double_to_long FALLBACK - op op_double_to_float FALLBACK - op op_int_to_byte FALLBACK - op op_int_to_char FALLBACK - op op_int_to_short FALLBACK - op op_add_int FALLBACK - op op_sub_int FALLBACK - op op_mul_int FALLBACK - op op_div_int FALLBACK - op op_rem_int FALLBACK - op op_and_int FALLBACK - op op_or_int FALLBACK - op op_xor_int FALLBACK - op op_shl_int FALLBACK - op op_shr_int FALLBACK - op op_ushr_int FALLBACK - op op_add_long FALLBACK - op op_sub_long FALLBACK - op op_mul_long FALLBACK - op op_div_long FALLBACK - op op_rem_long FALLBACK - op op_and_long FALLBACK - op op_or_long FALLBACK - op op_xor_long FALLBACK - op op_shl_long FALLBACK - op op_shr_long FALLBACK - op op_ushr_long FALLBACK - op op_add_float FALLBACK - op op_sub_float FALLBACK - op op_mul_float FALLBACK - op op_div_float FALLBACK - op op_rem_float FALLBACK - op op_add_double FALLBACK - op op_sub_double FALLBACK - op op_mul_double FALLBACK - op op_div_double FALLBACK - op op_rem_double FALLBACK - op op_add_int_2addr FALLBACK - op op_sub_int_2addr FALLBACK - op op_mul_int_2addr FALLBACK - op op_div_int_2addr FALLBACK - op op_rem_int_2addr FALLBACK - op op_and_int_2addr FALLBACK - op op_or_int_2addr FALLBACK - op op_xor_int_2addr FALLBACK - op op_shl_int_2addr FALLBACK - op op_shr_int_2addr FALLBACK - op op_ushr_int_2addr FALLBACK - op op_add_long_2addr FALLBACK - op op_sub_long_2addr FALLBACK - op op_mul_long_2addr FALLBACK - op op_div_long_2addr FALLBACK - op op_rem_long_2addr FALLBACK - op op_and_long_2addr FALLBACK - op op_or_long_2addr FALLBACK - op op_xor_long_2addr FALLBACK - op op_shl_long_2addr FALLBACK - op op_shr_long_2addr FALLBACK - op op_ushr_long_2addr FALLBACK - op op_add_float_2addr FALLBACK - op op_sub_float_2addr FALLBACK - op op_mul_float_2addr FALLBACK - op op_div_float_2addr FALLBACK - op op_rem_float_2addr FALLBACK - op op_add_double_2addr FALLBACK - op op_sub_double_2addr FALLBACK - op op_mul_double_2addr FALLBACK - op op_div_double_2addr FALLBACK - op op_rem_double_2addr FALLBACK - op op_add_int_lit16 FALLBACK - op op_rsub_int FALLBACK - op op_mul_int_lit16 FALLBACK - op op_div_int_lit16 FALLBACK - op op_rem_int_lit16 FALLBACK - op op_and_int_lit16 FALLBACK - op op_or_int_lit16 FALLBACK - op op_xor_int_lit16 FALLBACK - op op_add_int_lit8 FALLBACK - op op_rsub_int_lit8 FALLBACK - op op_mul_int_lit8 FALLBACK - op op_div_int_lit8 FALLBACK - op op_rem_int_lit8 FALLBACK - op op_and_int_lit8 FALLBACK - op op_or_int_lit8 FALLBACK - op op_xor_int_lit8 FALLBACK - op op_shl_int_lit8 FALLBACK - op op_shr_int_lit8 FALLBACK - op op_ushr_int_lit8 FALLBACK - op op_iget_quick FALLBACK - op op_iget_wide_quick FALLBACK - op op_iget_object_quick FALLBACK - op op_iput_quick FALLBACK - op op_iput_wide_quick FALLBACK - op op_iput_object_quick FALLBACK - op op_invoke_virtual_quick FALLBACK - op op_invoke_virtual_range_quick FALLBACK - op op_iput_boolean_quick FALLBACK - op op_iput_byte_quick FALLBACK - op op_iput_char_quick FALLBACK - op op_iput_short_quick FALLBACK - op op_iget_boolean_quick FALLBACK - op op_iget_byte_quick FALLBACK - op op_iget_char_quick FALLBACK - op op_iget_short_quick FALLBACK - op_unused_f3 FALLBACK - op_unused_f4 FALLBACK - op_unused_f5 FALLBACK - op_unused_f6 FALLBACK - op_unused_f7 FALLBACK - op_unused_f8 FALLBACK - op_unused_f9 FALLBACK - op_unused_fa FALLBACK - op_unused_fb FALLBACK - op_unused_fc FALLBACK - op_unused_fd FALLBACK - op_unused_fe FALLBACK - op_unused_ff FALLBACK + # op op_nop FALLBACK + # op op_move FALLBACK + # op op_move_from16 FALLBACK + # op op_move_16 FALLBACK + # op op_move_wide FALLBACK + # op op_move_wide_from16 FALLBACK + # op op_move_wide_16 FALLBACK + # op op_move_object FALLBACK + # op op_move_object_from16 FALLBACK + # op op_move_object_16 FALLBACK + # op op_move_result FALLBACK + # op op_move_result_wide FALLBACK + # op op_move_result_object FALLBACK + # op op_move_exception FALLBACK + # op op_return_void FALLBACK + # op op_return FALLBACK + # op op_return_wide FALLBACK + # op op_return_object FALLBACK + # op op_const_4 FALLBACK + # op op_const_16 FALLBACK + # op op_const FALLBACK + # op op_const_high16 FALLBACK + # op op_const_wide_16 FALLBACK + # op op_const_wide_32 FALLBACK + # op op_const_wide FALLBACK + # op op_const_wide_high16 FALLBACK + # op op_const_string FALLBACK + # op op_const_string_jumbo FALLBACK + # op op_const_class FALLBACK + # op op_monitor_enter FALLBACK + # op op_monitor_exit FALLBACK + # op op_check_cast FALLBACK + # op op_instance_of FALLBACK + # op op_array_length FALLBACK + # op op_new_instance FALLBACK + # op op_new_array FALLBACK + # op op_filled_new_array FALLBACK + # op op_filled_new_array_range FALLBACK + # op op_fill_array_data FALLBACK + # op op_throw FALLBACK + # op op_goto FALLBACK + # op op_goto_16 FALLBACK + # op op_goto_32 FALLBACK + # op op_packed_switch FALLBACK + # op op_sparse_switch FALLBACK + # op op_cmpl_float FALLBACK + # op op_cmpg_float FALLBACK + # op op_cmpl_double FALLBACK + # op op_cmpg_double FALLBACK + # op op_cmp_long FALLBACK + # op op_if_eq FALLBACK + # op op_if_ne FALLBACK + # op op_if_lt FALLBACK + # op op_if_ge FALLBACK + # op op_if_gt FALLBACK + # op op_if_le FALLBACK + # op op_if_eqz FALLBACK + # op op_if_nez FALLBACK + # op op_if_ltz FALLBACK + # op op_if_gez FALLBACK + # op op_if_gtz FALLBACK + # op op_if_lez FALLBACK + # op op_unused_3e FALLBACK + # op op_unused_3f FALLBACK + # op op_unused_40 FALLBACK + # op op_unused_41 FALLBACK + # op op_unused_42 FALLBACK + # op op_unused_43 FALLBACK + # op op_aget FALLBACK + # op op_aget_wide FALLBACK + # op op_aget_object FALLBACK + # op op_aget_boolean FALLBACK + # op op_aget_byte FALLBACK + # op op_aget_char FALLBACK + # op op_aget_short FALLBACK + # op op_aput FALLBACK + # op op_aput_wide FALLBACK + # op op_aput_object FALLBACK + # op op_aput_boolean FALLBACK + # op op_aput_byte FALLBACK + # op op_aput_char FALLBACK + # op op_aput_short FALLBACK + # op op_iget FALLBACK + # op op_iget_wide FALLBACK + # op op_iget_object FALLBACK + # op op_iget_boolean FALLBACK + # op op_iget_byte FALLBACK + # op op_iget_char FALLBACK + # op op_iget_short FALLBACK + # op op_iput FALLBACK + # op op_iput_wide FALLBACK + # op op_iput_object FALLBACK + # op op_iput_boolean FALLBACK + # op op_iput_byte FALLBACK + # op op_iput_char FALLBACK + # op op_iput_short FALLBACK + # op op_sget FALLBACK + # op op_sget_wide FALLBACK + # op op_sget_object FALLBACK + # op op_sget_boolean FALLBACK + # op op_sget_byte FALLBACK + # op op_sget_char FALLBACK + # op op_sget_short FALLBACK + # op op_sput FALLBACK + # op op_sput_wide FALLBACK + # op op_sput_object FALLBACK + # op op_sput_boolean FALLBACK + # op op_sput_byte FALLBACK + # op op_sput_char FALLBACK + # op op_sput_short FALLBACK + # op op_invoke_virtual FALLBACK + # op op_invoke_super FALLBACK + # op op_invoke_direct FALLBACK + # op op_invoke_static FALLBACK + # op op_invoke_interface FALLBACK + # op op_return_void_no_barrier FALLBACK + # op op_invoke_virtual_range FALLBACK + # op op_invoke_super_range FALLBACK + # op op_invoke_direct_range FALLBACK + # op op_invoke_static_range FALLBACK + # op op_invoke_interface_range FALLBACK + # op op_unused_79 FALLBACK + # op op_unused_7a FALLBACK + # op op_neg_int FALLBACK + # op op_not_int FALLBACK + # op op_neg_long FALLBACK + # op op_not_long FALLBACK + # op op_neg_float FALLBACK + # op op_neg_double FALLBACK + # op op_int_to_long FALLBACK + # op op_int_to_float FALLBACK + # op op_int_to_double FALLBACK + # op op_long_to_int FALLBACK + # op op_long_to_float FALLBACK + # op op_long_to_double FALLBACK + # op op_float_to_int FALLBACK + # op op_float_to_long FALLBACK + # op op_float_to_double FALLBACK + # op op_double_to_int FALLBACK + # op op_double_to_long FALLBACK + # op op_double_to_float FALLBACK + # op op_int_to_byte FALLBACK + # op op_int_to_char FALLBACK + # op op_int_to_short FALLBACK + # op op_add_int FALLBACK + # op op_sub_int FALLBACK + # op op_mul_int FALLBACK + # op op_div_int FALLBACK + # op op_rem_int FALLBACK + # op op_and_int FALLBACK + # op op_or_int FALLBACK + # op op_xor_int FALLBACK + # op op_shl_int FALLBACK + # op op_shr_int FALLBACK + # op op_ushr_int FALLBACK + # op op_add_long FALLBACK + # op op_sub_long FALLBACK + # op op_mul_long FALLBACK + # op op_div_long FALLBACK + # op op_rem_long FALLBACK + # op op_and_long FALLBACK + # op op_or_long FALLBACK + # op op_xor_long FALLBACK + # op op_shl_long FALLBACK + # op op_shr_long FALLBACK + # op op_ushr_long FALLBACK + # op op_add_float FALLBACK + # op op_sub_float FALLBACK + # op op_mul_float FALLBACK + # op op_div_float FALLBACK + # op op_rem_float FALLBACK + # op op_add_double FALLBACK + # op op_sub_double FALLBACK + # op op_mul_double FALLBACK + # op op_div_double FALLBACK + # op op_rem_double FALLBACK + # op op_add_int_2addr FALLBACK + # op op_sub_int_2addr FALLBACK + # op op_mul_int_2addr FALLBACK + # op op_div_int_2addr FALLBACK + # op op_rem_int_2addr FALLBACK + # op op_and_int_2addr FALLBACK + # op op_or_int_2addr FALLBACK + # op op_xor_int_2addr FALLBACK + # op op_shl_int_2addr FALLBACK + # op op_shr_int_2addr FALLBACK + # op op_ushr_int_2addr FALLBACK + # op op_add_long_2addr FALLBACK + # op op_sub_long_2addr FALLBACK + # op op_mul_long_2addr FALLBACK + # op op_div_long_2addr FALLBACK + # op op_rem_long_2addr FALLBACK + # op op_and_long_2addr FALLBACK + # op op_or_long_2addr FALLBACK + # op op_xor_long_2addr FALLBACK + # op op_shl_long_2addr FALLBACK + # op op_shr_long_2addr FALLBACK + # op op_ushr_long_2addr FALLBACK + # op op_add_float_2addr FALLBACK + # op op_sub_float_2addr FALLBACK + # op op_mul_float_2addr FALLBACK + # op op_div_float_2addr FALLBACK + # op op_rem_float_2addr FALLBACK + # op op_add_double_2addr FALLBACK + # op op_sub_double_2addr FALLBACK + # op op_mul_double_2addr FALLBACK + # op op_div_double_2addr FALLBACK + # op op_rem_double_2addr FALLBACK + # op op_add_int_lit16 FALLBACK + # op op_rsub_int FALLBACK + # op op_mul_int_lit16 FALLBACK + # op op_div_int_lit16 FALLBACK + # op op_rem_int_lit16 FALLBACK + # op op_and_int_lit16 FALLBACK + # op op_or_int_lit16 FALLBACK + # op op_xor_int_lit16 FALLBACK + # op op_add_int_lit8 FALLBACK + # op op_rsub_int_lit8 FALLBACK + # op op_mul_int_lit8 FALLBACK + # op op_div_int_lit8 FALLBACK + # op op_rem_int_lit8 FALLBACK + # op op_and_int_lit8 FALLBACK + # op op_or_int_lit8 FALLBACK + # op op_xor_int_lit8 FALLBACK + # op op_shl_int_lit8 FALLBACK + # op op_shr_int_lit8 FALLBACK + # op op_ushr_int_lit8 FALLBACK + # op op_iget_quick FALLBACK + # op op_iget_wide_quick FALLBACK + # op op_iget_object_quick FALLBACK + # op op_iput_quick FALLBACK + # op op_iput_wide_quick FALLBACK + # op op_iput_object_quick FALLBACK + # op op_invoke_virtual_quick FALLBACK + # op op_invoke_virtual_range_quick FALLBACK + # op op_iput_boolean_quick FALLBACK + # op op_iput_byte_quick FALLBACK + # op op_iput_char_quick FALLBACK + # op op_iput_short_quick FALLBACK + # op op_iget_boolean_quick FALLBACK + # op op_iget_byte_quick FALLBACK + # op op_iget_char_quick FALLBACK + # op op_iget_short_quick FALLBACK + op op_invoke_lambda FALLBACK + # op op_unused_f4 FALLBACK + op op_capture_variable FALLBACK + op op_create_lambda FALLBACK + op op_liberate_variable FALLBACK + op op_box_lambda FALLBACK + op op_unbox_lambda FALLBACK + # op op_unused_fa FALLBACK + # op op_unused_fb FALLBACK + # op op_unused_fc FALLBACK + # op op_unused_fd FALLBACK + # op op_unused_fe FALLBACK + # op op_unused_ff FALLBACK op-end # common subroutines for asm diff --git a/runtime/interpreter/mterp/mips/binop.S b/runtime/interpreter/mterp/mips/binop.S index ce09da453a..66627e2719 100644 --- a/runtime/interpreter/mterp/mips/binop.S +++ b/runtime/interpreter/mterp/mips/binop.S @@ -7,8 +7,8 @@ * * If "chkzero" is set to 1, we perform a divide-by-zero check on * vCC (a1). Useful for integer division and modulus. Note that we - * *don't* check for (INT_MIN / -1) here, because the ARM math lib - * handles it correctly. + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. * * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, * xor-int, shl-int, shr-int, ushr-int diff --git a/runtime/interpreter/mterp/mips/entry.S b/runtime/interpreter/mterp/mips/entry.S index cef08feaa0..5771a4f402 100644 --- a/runtime/interpreter/mterp/mips/entry.S +++ b/runtime/interpreter/mterp/mips/entry.S @@ -49,7 +49,7 @@ ExecuteMterpImpl: /* set up "named" registers */ move rSELF, a0 lw a0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2) - addu rFP, a2, SHADOWFRAME_VREGS_OFFSET # point to insns[] (i.e. - the dalivk byte code). + addu rFP, a2, SHADOWFRAME_VREGS_OFFSET # point to vregs. EAS2(rREFS, rFP, a0) # point to reference array in shadow frame lw a0, SHADOWFRAME_DEX_PC_OFFSET(a2) # Get starting dex_pc addu rPC, a1, CODEITEM_INSNS_OFFSET # Point to base of insns[] diff --git a/runtime/interpreter/mterp/mips64/alt_stub.S b/runtime/interpreter/mterp/mips64/alt_stub.S new file mode 100644 index 0000000000..bd76a1b464 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/alt_stub.S @@ -0,0 +1,14 @@ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (${opnum} * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. diff --git a/runtime/interpreter/mterp/mips64/bincmp.S b/runtime/interpreter/mterp/mips64/bincmp.S new file mode 100644 index 0000000000..aa5e74b3de --- /dev/null +++ b/runtime/interpreter/mterp/mips64/bincmp.S @@ -0,0 +1,32 @@ + /* + * Generic two-operand compare-and-branch operation. Provide a "condition" + * fragment that specifies the comparison to perform, e.g. for + * "if-le" you would use "le". + * + * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le + */ + /* if-cmp vA, vB, +CCCC */ + .extern MterpProfileBranch + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC) + GET_VREG a0, a2 # a0 <- vA + GET_VREG a1, a3 # a1 <- vB + b${condition}c a0, a1, 1f + li rINST, 2 # offset if branch not taken +1: +#if MTERP_PROFILE_BRANCHES + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rINST + jal MterpProfileBranch # (self, shadow_frame, offset) + bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST +#endif + dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2 + lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue + move a0, rINST # a0 <- offset + FETCH_INST # load rINST + bltz a0, MterpCheckSuspendAndContinue # suspend check if backwards branch + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/binop.S b/runtime/interpreter/mterp/mips64/binop.S new file mode 100644 index 0000000000..fab48b73b3 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/binop.S @@ -0,0 +1,30 @@ +%default {"preinstr":"", "result":"a0", "chkzero":"0"} + /* + * Generic 32-bit binary operation. Provide an "instr" line that + * specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vCC (a1). Useful for integer division and modulus. Note that we + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, + * xor-int, shl-int, shr-int, ushr-int + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG a0, a2 # a0 <- vBB + GET_VREG a1, a3 # a1 <- vCC + .if $chkzero + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + $preinstr # optional op + $instr # $result <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG $result, a4 # vAA <- $result + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/binop2addr.S b/runtime/interpreter/mterp/mips64/binop2addr.S new file mode 100644 index 0000000000..1ae73f51d4 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/binop2addr.S @@ -0,0 +1,30 @@ +%default {"preinstr":"", "result":"a0", "chkzero":"0"} + /* + * Generic 32-bit "/2addr" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vB (a1). Useful for integer division and modulus. Note that we + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, + * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, + * shl-int/2addr, shr-int/2addr, ushr-int/2addr + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a2 # a0 <- vA + GET_VREG a1, a3 # a1 <- vB + .if $chkzero + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + $preinstr # optional op + $instr # $result <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG $result, a2 # vA <- $result + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/binopLit16.S b/runtime/interpreter/mterp/mips64/binopLit16.S new file mode 100644 index 0000000000..925775824c --- /dev/null +++ b/runtime/interpreter/mterp/mips64/binopLit16.S @@ -0,0 +1,28 @@ +%default {"preinstr":"", "result":"a0", "chkzero":"0"} + /* + * Generic 32-bit "lit16" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be an MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * CCCC (a1). Useful for integer division and modulus. + * + * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, + * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 + */ + /* binop/lit16 vA, vB, #+CCCC */ + lh a1, 2(rPC) # a1 <- sign-extended CCCC + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a3 # a0 <- vB + .if $chkzero + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + $preinstr # optional op + $instr # $result <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG $result, a2 # vA <- $result + GOTO_OPCODE v0 # jump to next instruction + diff --git a/runtime/interpreter/mterp/mips64/binopLit8.S b/runtime/interpreter/mterp/mips64/binopLit8.S new file mode 100644 index 0000000000..f4a0bba9b9 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/binopLit8.S @@ -0,0 +1,29 @@ +%default {"preinstr":"", "result":"a0", "chkzero":"0"} + /* + * Generic 32-bit "lit8" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be an MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * CC (a1). Useful for integer division and modulus. + * + * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, + * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, + * shl-int/lit8, shr-int/lit8, ushr-int/lit8 + */ + /* binop/lit8 vAA, vBB, #+CC */ + lbu a3, 2(rPC) # a3 <- BB + lb a1, 3(rPC) # a1 <- sign-extended CC + srl a2, rINST, 8 # a2 <- AA + GET_VREG a0, a3 # a0 <- vBB + .if $chkzero + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + $preinstr # optional op + $instr # $result <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG $result, a2 # vAA <- $result + GOTO_OPCODE v0 # jump to next instruction + diff --git a/runtime/interpreter/mterp/mips64/binopWide.S b/runtime/interpreter/mterp/mips64/binopWide.S new file mode 100644 index 0000000000..732f0d60f9 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/binopWide.S @@ -0,0 +1,30 @@ +%default {"preinstr":"", "result":"a0", "chkzero":"0"} + /* + * Generic 64-bit binary operation. Provide an "instr" line that + * specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vCC (a1). Useful for integer division and modulus. Note that we + * *don't* check for (LONG_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long, + * xor-long, shl-long, shr-long, ushr-long + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_WIDE a0, a2 # a0 <- vBB + GET_VREG_WIDE a1, a3 # a1 <- vCC + .if $chkzero + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + $preinstr # optional op + $instr # $result <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE $result, a4 # vAA <- $result + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/binopWide2addr.S b/runtime/interpreter/mterp/mips64/binopWide2addr.S new file mode 100644 index 0000000000..45d8d82960 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/binopWide2addr.S @@ -0,0 +1,30 @@ +%default {"preinstr":"", "result":"a0", "chkzero":"0"} + /* + * Generic 64-bit "/2addr" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vB (a1). Useful for integer division and modulus. Note that we + * *don't* check for (LONG_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr, + * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr, + * shl-long/2addr, shr-long/2addr, ushr-long/2addr + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_WIDE a0, a2 # a0 <- vA + GET_VREG_WIDE a1, a3 # a1 <- vB + .if $chkzero + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + $preinstr # optional op + $instr # $result <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE $result, a2 # vA <- $result + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/entry.S b/runtime/interpreter/mterp/mips64/entry.S new file mode 100644 index 0000000000..ae6c26b706 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/entry.S @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Interpreter entry point. + */ + + .set reorder + + .text + .global ExecuteMterpImpl + .type ExecuteMterpImpl, %function + .balign 16 +/* + * On entry: + * a0 Thread* self + * a1 code_item + * a2 ShadowFrame + * a3 JValue* result_register + * + */ +ExecuteMterpImpl: + .cfi_startproc + .cpsetup t9, t8, ExecuteMterpImpl + + .cfi_def_cfa sp, 0 + daddu sp, sp, -STACK_SIZE + .cfi_adjust_cfa_offset STACK_SIZE + + sd t8, STACK_OFFSET_GP(sp) + .cfi_rel_offset 28, STACK_OFFSET_GP + sd ra, STACK_OFFSET_RA(sp) + .cfi_rel_offset 31, STACK_OFFSET_RA + + sd s0, STACK_OFFSET_S0(sp) + .cfi_rel_offset 16, STACK_OFFSET_S0 + sd s1, STACK_OFFSET_S1(sp) + .cfi_rel_offset 17, STACK_OFFSET_S1 + sd s2, STACK_OFFSET_S2(sp) + .cfi_rel_offset 18, STACK_OFFSET_S2 + sd s3, STACK_OFFSET_S3(sp) + .cfi_rel_offset 19, STACK_OFFSET_S3 + sd s4, STACK_OFFSET_S4(sp) + .cfi_rel_offset 20, STACK_OFFSET_S4 + sd s5, STACK_OFFSET_S5(sp) + .cfi_rel_offset 21, STACK_OFFSET_S5 + + /* Remember the return register */ + sd a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2) + + /* Remember the code_item */ + sd a1, SHADOWFRAME_CODE_ITEM_OFFSET(a2) + + /* set up "named" registers */ + move rSELF, a0 + daddu rFP, a2, SHADOWFRAME_VREGS_OFFSET + lw v0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2) + dlsa rREFS, v0, rFP, 2 + daddu rPC, a1, CODEITEM_INSNS_OFFSET + lw v0, SHADOWFRAME_DEX_PC_OFFSET(a2) + dlsa rPC, v0, rPC, 1 + EXPORT_PC + + /* Starting ibase */ + REFRESH_IBASE + + /* start executing the instruction at rPC */ + FETCH_INST + GET_INST_OPCODE v0 + GOTO_OPCODE v0 + + /* NOTE: no fallthrough */ diff --git a/runtime/interpreter/mterp/mips64/fallback.S b/runtime/interpreter/mterp/mips64/fallback.S new file mode 100644 index 0000000000..560b994b08 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/fallback.S @@ -0,0 +1,2 @@ +/* Transfer stub to alternate interpreter */ + b MterpFallback diff --git a/runtime/interpreter/mterp/mips64/fbinop.S b/runtime/interpreter/mterp/mips64/fbinop.S new file mode 100644 index 0000000000..f19dd1c3d9 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/fbinop.S @@ -0,0 +1,18 @@ +%default {} + /*: + * Generic 32-bit floating-point operation. + * + * For: add-float, sub-float, mul-float, div-float. + * form: <op> f0, f0, f1 + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_FLOAT f0, a2 # f0 <- vBB + GET_VREG_FLOAT f1, a3 # f1 <- vCC + $instr # f0 <- f0 op f1 + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_FLOAT f0, a4 # vAA <- f0 + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/fbinop2addr.S b/runtime/interpreter/mterp/mips64/fbinop2addr.S new file mode 100644 index 0000000000..2e2cd7e8e9 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/fbinop2addr.S @@ -0,0 +1,17 @@ +%default {} + /*: + * Generic 32-bit "/2addr" floating-point operation. + * + * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr. + * form: <op> f0, f0, f1 + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_FLOAT f0, a2 # f0 <- vA + GET_VREG_FLOAT f1, a3 # f1 <- vB + $instr # f0 <- f0 op f1 + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_FLOAT f0, a2 # vA <- f0 + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/fbinopWide.S b/runtime/interpreter/mterp/mips64/fbinopWide.S new file mode 100644 index 0000000000..8915c9447c --- /dev/null +++ b/runtime/interpreter/mterp/mips64/fbinopWide.S @@ -0,0 +1,18 @@ +%default {} + /*: + * Generic 64-bit floating-point operation. + * + * For: add-double, sub-double, mul-double, div-double. + * form: <op> f0, f0, f1 + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_DOUBLE f0, a2 # f0 <- vBB + GET_VREG_DOUBLE f1, a3 # f1 <- vCC + $instr # f0 <- f0 op f1 + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_DOUBLE f0, a4 # vAA <- f0 + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/fbinopWide2addr.S b/runtime/interpreter/mterp/mips64/fbinopWide2addr.S new file mode 100644 index 0000000000..a3f4eaa8cc --- /dev/null +++ b/runtime/interpreter/mterp/mips64/fbinopWide2addr.S @@ -0,0 +1,17 @@ +%default {} + /*: + * Generic 64-bit "/2addr" floating-point operation. + * + * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr. + * form: <op> f0, f0, f1 + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_DOUBLE f0, a2 # f0 <- vA + GET_VREG_DOUBLE f1, a3 # f1 <- vB + $instr # f0 <- f0 op f1 + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_DOUBLE f0, a2 # vA <- f0 + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/fcmp.S b/runtime/interpreter/mterp/mips64/fcmp.S new file mode 100644 index 0000000000..2e1a3e4c3d --- /dev/null +++ b/runtime/interpreter/mterp/mips64/fcmp.S @@ -0,0 +1,32 @@ +%default {} + /* + * Compare two floating-point values. Puts 0, 1, or -1 into the + * destination register based on the results of the comparison. + * + * For: cmpl-float, cmpg-float + */ + /* op vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_FLOAT f0, a2 # f0 <- vBB + GET_VREG_FLOAT f1, a3 # f1 <- vCC + cmp.eq.s f2, f0, f1 + li a0, 0 + bc1nez f2, 1f # done if vBB == vCC (ordered) + .if $gt_bias + cmp.lt.s f2, f0, f1 + li a0, -1 + bc1nez f2, 1f # done if vBB < vCC (ordered) + li a0, 1 # vBB > vCC or unordered + .else + cmp.lt.s f2, f1, f0 + li a0, 1 + bc1nez f2, 1f # done if vBB > vCC (ordered) + li a0, -1 # vBB < vCC or unordered + .endif +1: + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/fcmpWide.S b/runtime/interpreter/mterp/mips64/fcmpWide.S new file mode 100644 index 0000000000..2a3a341a3d --- /dev/null +++ b/runtime/interpreter/mterp/mips64/fcmpWide.S @@ -0,0 +1,32 @@ +%default {} + /* + * Compare two floating-point values. Puts 0, 1, or -1 into the + * destination register based on the results of the comparison. + * + * For: cmpl-double, cmpg-double + */ + /* op vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_DOUBLE f0, a2 # f0 <- vBB + GET_VREG_DOUBLE f1, a3 # f1 <- vCC + cmp.eq.d f2, f0, f1 + li a0, 0 + bc1nez f2, 1f # done if vBB == vCC (ordered) + .if $gt_bias + cmp.lt.d f2, f0, f1 + li a0, -1 + bc1nez f2, 1f # done if vBB < vCC (ordered) + li a0, 1 # vBB > vCC or unordered + .else + cmp.lt.d f2, f1, f0 + li a0, 1 + bc1nez f2, 1f # done if vBB > vCC (ordered) + li a0, -1 # vBB < vCC or unordered + .endif +1: + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/fcvtFooter.S b/runtime/interpreter/mterp/mips64/fcvtFooter.S new file mode 100644 index 0000000000..06e9507817 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/fcvtFooter.S @@ -0,0 +1,18 @@ + /* + * Stores a specified register containing the result of conversion + * from or to a floating-point type and jumps to the next instruction. + * + * Expects a1 to contain the destination Dalvik register number. + * a1 is set up by fcvtHeader.S. + * + * For: int-to-float, int-to-double, long-to-float, long-to-double, + * float-to-int, float-to-long, float-to-double, double-to-int, + * double-to-long, double-to-float, neg-float, neg-double. + * + * Note that this file can't be included after a break in other files + * and in those files its contents appear as a copy. + * See: float-to-int, float-to-long, double-to-int, double-to-long. + */ + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG$suffix $valreg, a1 + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/fcvtHeader.S b/runtime/interpreter/mterp/mips64/fcvtHeader.S new file mode 100644 index 0000000000..8742e42c39 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/fcvtHeader.S @@ -0,0 +1,15 @@ + /* + * Loads a specified register from vB. Used primarily for conversions + * from or to a floating-point type. + * + * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to + * store the result in vA and jump to the next instruction. + * + * For: int-to-float, int-to-double, long-to-float, long-to-double, + * float-to-int, float-to-long, float-to-double, double-to-int, + * double-to-long, double-to-float, neg-float, neg-double. + */ + ext a1, rINST, 8, 4 # a1 <- A + srl a2, rINST, 12 # a2 <- B + GET_VREG$suffix $valreg, a2 + FETCH_ADVANCE_INST 1 # advance rPC, load rINST diff --git a/runtime/interpreter/mterp/mips64/footer.S b/runtime/interpreter/mterp/mips64/footer.S new file mode 100644 index 0000000000..14d5fe01f5 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/footer.S @@ -0,0 +1,172 @@ +/* + * We've detected a condition that will result in an exception, but the exception + * has not yet been thrown. Just bail out to the reference interpreter to deal with it. + * TUNING: for consistency, we may want to just go ahead and handle these here. + */ + + .extern MterpLogDivideByZeroException +common_errDivideByZero: + EXPORT_PC +#if MTERP_LOGGING + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + jal MterpLogDivideByZeroException +#endif + b MterpCommonFallback + + .extern MterpLogArrayIndexException +common_errArrayIndex: + EXPORT_PC +#if MTERP_LOGGING + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + jal MterpLogArrayIndexException +#endif + b MterpCommonFallback + + .extern MterpLogNullObjectException +common_errNullObject: + EXPORT_PC +#if MTERP_LOGGING + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + jal MterpLogNullObjectException +#endif + b MterpCommonFallback + +/* + * If we're here, something is out of the ordinary. If there is a pending + * exception, handle it. Otherwise, roll back and retry with the reference + * interpreter. + */ +MterpPossibleException: + ld a0, THREAD_EXCEPTION_OFFSET(rSELF) + beqzc a0, MterpFallback # If not, fall back to reference interpreter. + /* intentional fallthrough - handle pending exception. */ +/* + * On return from a runtime helper routine, we've found a pending exception. + * Can we handle it here - or need to bail out to caller? + * + */ + .extern MterpHandleException + .extern MterpShouldSwitchInterpreters +MterpException: + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + jal MterpHandleException # (self, shadow_frame) + beqzc v0, MterpExceptionReturn # no local catch, back to caller. + ld a0, OFF_FP_CODE_ITEM(rFP) + lwu a1, OFF_FP_DEX_PC(rFP) + REFRESH_IBASE + daddu rPC, a0, CODEITEM_INSNS_OFFSET + dlsa rPC, a1, rPC, 1 # generate new dex_pc_ptr + /* Do we need to switch interpreters? */ + jal MterpShouldSwitchInterpreters + bnezc v0, MterpFallback + /* resume execution at catch block */ + EXPORT_PC + FETCH_INST + GET_INST_OPCODE v0 + GOTO_OPCODE v0 + /* NOTE: no fallthrough */ + +/* + * Check for suspend check request. Assumes rINST already loaded, rPC advanced and + * still needs to get the opcode and branch to it, and flags are in ra. + */ + .extern MterpSuspendCheck +MterpCheckSuspendAndContinue: + REFRESH_IBASE + and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + bnez ra, check1 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction +check1: + EXPORT_PC + move a0, rSELF + jal MterpSuspendCheck # (self) + bnezc v0, MterpFallback # Something in the environment changed, switch interpreters + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* + * On-stack replacement has happened, and now we've returned from the compiled method. + */ +MterpOnStackReplacement: +#if MTERP_LOGGING + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rINST # rINST contains offset + jal MterpLogOSR +#endif + li v0, 1 # Signal normal return + b MterpDone + +/* + * Bail out to reference interpreter. + */ + .extern MterpLogFallback +MterpFallback: + EXPORT_PC +#if MTERP_LOGGING + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + jal MterpLogFallback +#endif +MterpCommonFallback: + li v0, 0 # signal retry with reference interpreter. + b MterpDone + +/* + * We pushed some registers on the stack in ExecuteMterpImpl, then saved + * SP and RA. Here we restore SP, restore the registers, and then restore + * RA to PC. + * + * On entry: + * uint32_t* rFP (should still be live, pointer to base of vregs) + */ +MterpExceptionReturn: + li v0, 1 # signal return to caller. + b MterpDone +/* + * Returned value is expected in a0 and if it's not 64-bit, the 32 most + * significant bits of a0 must be 0. + */ +MterpReturn: + ld a2, OFF_FP_RESULT_REGISTER(rFP) + lw ra, THREAD_FLAGS_OFFSET(rSELF) + sd a0, 0(a2) + move a0, rSELF + and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + beqzc ra, check2 + jal MterpSuspendCheck # (self) +check2: + li v0, 1 # signal return to caller. +MterpDone: + ld s5, STACK_OFFSET_S5(sp) + .cfi_restore 21 + ld s4, STACK_OFFSET_S4(sp) + .cfi_restore 20 + ld s3, STACK_OFFSET_S3(sp) + .cfi_restore 19 + ld s2, STACK_OFFSET_S2(sp) + .cfi_restore 18 + ld s1, STACK_OFFSET_S1(sp) + .cfi_restore 17 + ld s0, STACK_OFFSET_S0(sp) + .cfi_restore 16 + + ld ra, STACK_OFFSET_RA(sp) + .cfi_restore 31 + + ld t8, STACK_OFFSET_GP(sp) + .cpreturn + .cfi_restore 28 + + .set noreorder + jr ra + daddu sp, sp, STACK_SIZE + .cfi_adjust_cfa_offset -STACK_SIZE + + .cfi_endproc + .size ExecuteMterpImpl, .-ExecuteMterpImpl diff --git a/runtime/interpreter/mterp/mips64/header.S b/runtime/interpreter/mterp/mips64/header.S new file mode 100644 index 0000000000..dd0fbe0057 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/header.S @@ -0,0 +1,278 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <machine/regdef.h> + +/* TODO: add the missing file and use its FP register definitions. */ +/* #include <machine/fpregdef.h> */ +/* FP register definitions */ +#define f0 $$f0 +#define f1 $$f1 +#define f2 $$f2 +#define f3 $$f3 +#define f12 $$f12 +#define f13 $$f13 + +/* + * It looks like the GNU assembler currently does not support the blec and bgtc + * idioms, which should translate into bgec and bltc respectively with swapped + * left and right register operands. + * TODO: remove these macros when the assembler is fixed. + */ +.macro blec lreg, rreg, target + bgec \rreg, \lreg, \target +.endm +.macro bgtc lreg, rreg, target + bltc \rreg, \lreg, \target +.endm + +/* +Mterp and MIPS64 notes: + +The following registers have fixed assignments: + + reg nick purpose + s0 rPC interpreted program counter, used for fetching instructions + s1 rFP interpreted frame pointer, used for accessing locals and args + s2 rSELF self (Thread) pointer + s3 rINST first 16-bit code unit of current instruction + s4 rIBASE interpreted instruction base pointer, used for computed goto + s5 rREFS base of object references in shadow frame (ideally, we'll get rid of this later). +*/ + +/* During bringup, we'll use the shadow frame model instead of rFP */ +/* single-purpose registers, given names for clarity */ +#define rPC s0 +#define rFP s1 +#define rSELF s2 +#define rINST s3 +#define rIBASE s4 +#define rREFS s5 + +/* + * This is a #include, not a %include, because we want the C pre-processor + * to expand the macros into assembler assignment statements. + */ +#include "asm_support.h" + +/* + * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So, + * to access other shadow frame fields, we need to use a backwards offset. Define those here. + */ +#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET) +#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET) +#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET) +#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET) +#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET) +#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET) +#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET) +#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET) +#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET) + +#define MTERP_PROFILE_BRANCHES 1 +#define MTERP_LOGGING 0 + +/* + * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must + * be done *before* something throws. + * + * It's okay to do this more than once. + * + * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped + * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction + * offset into the code_items_[] array. For effiency, we will "export" the + * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC + * to convert to a dex pc when needed. + */ +.macro EXPORT_PC + sd rPC, OFF_FP_DEX_PC_PTR(rFP) +.endm + +/* + * Refresh handler table. + */ +.macro REFRESH_IBASE + ld rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) +.endm + +/* + * Fetch the next instruction from rPC into rINST. Does not advance rPC. + */ +.macro FETCH_INST + lhu rINST, 0(rPC) +.endm + +/* Advance rPC by some number of code units. */ +.macro ADVANCE count + daddu rPC, rPC, (\count) * 2 +.endm + +/* + * Fetch the next instruction from the specified offset. Advances rPC + * to point to the next instruction. + * + * This must come AFTER anything that can throw an exception, or the + * exception catch may miss. (This also implies that it must come after + * EXPORT_PC.) + */ +.macro FETCH_ADVANCE_INST count + ADVANCE \count + FETCH_INST +.endm + +/* + * Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load + * rINST ahead of possible exception point. Be sure to manually advance rPC + * later. + */ +.macro PREFETCH_INST count + lhu rINST, ((\count) * 2)(rPC) +.endm + +/* + * Put the instruction's opcode field into the specified register. + */ +.macro GET_INST_OPCODE reg + and \reg, rINST, 255 +.endm + +/* + * Begin executing the opcode in _reg. + */ +.macro GOTO_OPCODE reg + .set noat + sll AT, \reg, 7 + daddu AT, rIBASE, AT + jic AT, 0 + .set at +.endm + +/* + * Get/set the 32-bit value from a Dalvik register. + * Note, GET_VREG does sign extension to 64 bits while + * GET_VREG_U does zero extension to 64 bits. + * One is useful for arithmetic while the other is + * useful for storing the result value as 64-bit. + */ +.macro GET_VREG reg, vreg + .set noat + dlsa AT, \vreg, rFP, 2 + lw \reg, 0(AT) + .set at +.endm +.macro GET_VREG_U reg, vreg + .set noat + dlsa AT, \vreg, rFP, 2 + lwu \reg, 0(AT) + .set at +.endm +.macro GET_VREG_FLOAT reg, vreg + .set noat + dlsa AT, \vreg, rFP, 2 + lwc1 \reg, 0(AT) + .set at +.endm +.macro SET_VREG reg, vreg + .set noat + dlsa AT, \vreg, rFP, 2 + sw \reg, 0(AT) + dlsa AT, \vreg, rREFS, 2 + sw zero, 0(AT) + .set at +.endm +.macro SET_VREG_OBJECT reg, vreg + .set noat + dlsa AT, \vreg, rFP, 2 + sw \reg, 0(AT) + dlsa AT, \vreg, rREFS, 2 + sw \reg, 0(AT) + .set at +.endm +.macro SET_VREG_FLOAT reg, vreg + .set noat + dlsa AT, \vreg, rFP, 2 + swc1 \reg, 0(AT) + dlsa AT, \vreg, rREFS, 2 + sw zero, 0(AT) + .set at +.endm + +/* + * Get/set the 64-bit value from a Dalvik register. + * Avoid unaligned memory accesses. + * Note, SET_VREG_WIDE clobbers the register containing the value being stored. + * Note, SET_VREG_DOUBLE clobbers the register containing the Dalvik register number. + */ +.macro GET_VREG_WIDE reg, vreg + .set noat + dlsa AT, \vreg, rFP, 2 + lw \reg, 0(AT) + lw AT, 4(AT) + dinsu \reg, AT, 32, 32 + .set at +.endm +.macro GET_VREG_DOUBLE reg, vreg + .set noat + dlsa AT, \vreg, rFP, 2 + lwc1 \reg, 0(AT) + lw AT, 4(AT) + mthc1 AT, \reg + .set at +.endm +.macro SET_VREG_WIDE reg, vreg + .set noat + dlsa AT, \vreg, rFP, 2 + sw \reg, 0(AT) + drotr32 \reg, \reg, 0 + sw \reg, 4(AT) + dlsa AT, \vreg, rREFS, 2 + sw zero, 0(AT) + sw zero, 4(AT) + .set at +.endm +.macro SET_VREG_DOUBLE reg, vreg + .set noat + dlsa AT, \vreg, rREFS, 2 + sw zero, 0(AT) + sw zero, 4(AT) + dlsa AT, \vreg, rFP, 2 + swc1 \reg, 0(AT) + mfhc1 \vreg, \reg + sw \vreg, 4(AT) + .set at +.endm + +/* + * On-stack offsets for spilling/unspilling callee-saved registers + * and the frame size. + */ +#define STACK_OFFSET_RA 0 +#define STACK_OFFSET_GP 8 +#define STACK_OFFSET_S0 16 +#define STACK_OFFSET_S1 24 +#define STACK_OFFSET_S2 32 +#define STACK_OFFSET_S3 40 +#define STACK_OFFSET_S4 48 +#define STACK_OFFSET_S5 56 +#define STACK_SIZE 64 + +/* Constants for float/double_to_int/long conversions */ +#define INT_MIN 0x80000000 +#define INT_MIN_AS_FLOAT 0xCF000000 +#define INT_MIN_AS_DOUBLE 0xC1E0000000000000 +#define LONG_MIN 0x8000000000000000 +#define LONG_MIN_AS_FLOAT 0xDF000000 +#define LONG_MIN_AS_DOUBLE 0xC3E0000000000000 diff --git a/runtime/interpreter/mterp/mips64/invoke.S b/runtime/interpreter/mterp/mips64/invoke.S new file mode 100644 index 0000000000..be647b618b --- /dev/null +++ b/runtime/interpreter/mterp/mips64/invoke.S @@ -0,0 +1,20 @@ +%default { "helper":"UndefinedInvokeHandler" } + /* + * Generic invoke handler wrapper. + */ + /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ + /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ + .extern $helper + .extern MterpShouldSwitchInterpreters + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rPC + move a3, rINST + jal $helper + beqzc v0, MterpException + FETCH_ADVANCE_INST 3 + jal MterpShouldSwitchInterpreters + bnezc v0, MterpFallback + GET_INST_OPCODE v0 + GOTO_OPCODE v0 diff --git a/runtime/interpreter/mterp/mips64/op_add_double.S b/runtime/interpreter/mterp/mips64/op_add_double.S new file mode 100644 index 0000000000..1520e325f7 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_add_double.S @@ -0,0 +1 @@ +%include "mips64/fbinopWide.S" {"instr":"add.d f0, f0, f1"} diff --git a/runtime/interpreter/mterp/mips64/op_add_double_2addr.S b/runtime/interpreter/mterp/mips64/op_add_double_2addr.S new file mode 100644 index 0000000000..c14382ef20 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_add_double_2addr.S @@ -0,0 +1 @@ +%include "mips64/fbinopWide2addr.S" {"instr":"add.d f0, f0, f1"} diff --git a/runtime/interpreter/mterp/mips64/op_add_float.S b/runtime/interpreter/mterp/mips64/op_add_float.S new file mode 100644 index 0000000000..c6ed558dc3 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_add_float.S @@ -0,0 +1 @@ +%include "mips64/fbinop.S" {"instr":"add.s f0, f0, f1"} diff --git a/runtime/interpreter/mterp/mips64/op_add_float_2addr.S b/runtime/interpreter/mterp/mips64/op_add_float_2addr.S new file mode 100644 index 0000000000..4c20547b22 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_add_float_2addr.S @@ -0,0 +1 @@ +%include "mips64/fbinop2addr.S" {"instr":"add.s f0, f0, f1"} diff --git a/runtime/interpreter/mterp/mips64/op_add_int.S b/runtime/interpreter/mterp/mips64/op_add_int.S new file mode 100644 index 0000000000..6e569de71a --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_add_int.S @@ -0,0 +1 @@ +%include "mips64/binop.S" {"instr":"addu a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_add_int_2addr.S b/runtime/interpreter/mterp/mips64/op_add_int_2addr.S new file mode 100644 index 0000000000..2a84124a3a --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_add_int_2addr.S @@ -0,0 +1 @@ +%include "mips64/binop2addr.S" {"instr":"addu a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_add_int_lit16.S b/runtime/interpreter/mterp/mips64/op_add_int_lit16.S new file mode 100644 index 0000000000..94b053bba3 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_add_int_lit16.S @@ -0,0 +1 @@ +%include "mips64/binopLit16.S" {"instr":"addu a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_add_int_lit8.S b/runtime/interpreter/mterp/mips64/op_add_int_lit8.S new file mode 100644 index 0000000000..3b6d734723 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_add_int_lit8.S @@ -0,0 +1 @@ +%include "mips64/binopLit8.S" {"instr":"addu a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_add_long.S b/runtime/interpreter/mterp/mips64/op_add_long.S new file mode 100644 index 0000000000..c8d702f29f --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_add_long.S @@ -0,0 +1 @@ +%include "mips64/binopWide.S" {"instr":"daddu a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_add_long_2addr.S b/runtime/interpreter/mterp/mips64/op_add_long_2addr.S new file mode 100644 index 0000000000..928ff54565 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_add_long_2addr.S @@ -0,0 +1 @@ +%include "mips64/binopWide2addr.S" {"instr":"daddu a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_aget.S b/runtime/interpreter/mterp/mips64/op_aget.S new file mode 100644 index 0000000000..0472a0616b --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_aget.S @@ -0,0 +1,29 @@ +%default { "load":"lw", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" } + /* + * Array get, 32 bits or less. vAA <- vBB[vCC]. + * + * for: aget, aget-boolean, aget-byte, aget-char, aget-short + * + * NOTE: assumes data offset for arrays is the same for all non-wide types. + * If this changes, specialize. + */ + /* op vAA, vBB, vCC */ + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + srl a4, rINST, 8 # a4 <- AA + GET_VREG_U a0, a2 # a0 <- vBB (array object) + GET_VREG a1, a3 # a1 <- vCC (requested index) + beqz a0, common_errNullObject # bail if null array object + lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length + .if $shift + # [d]lsa does not support shift count of 0. + dlsa a0, a1, a0, $shift # a0 <- arrayObj + index*width + .else + daddu a0, a1, a0 # a0 <- arrayObj + index*width + .endif + bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + $load a2, $data_offset(a0) # a2 <- vBB[vCC] + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a2, a4 # vAA <- a2 + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_aget_boolean.S b/runtime/interpreter/mterp/mips64/op_aget_boolean.S new file mode 100644 index 0000000000..d5be01b7c5 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_aget_boolean.S @@ -0,0 +1 @@ +%include "mips64/op_aget.S" { "load":"lbu", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" } diff --git a/runtime/interpreter/mterp/mips64/op_aget_byte.S b/runtime/interpreter/mterp/mips64/op_aget_byte.S new file mode 100644 index 0000000000..084de8d4df --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_aget_byte.S @@ -0,0 +1 @@ +%include "mips64/op_aget.S" { "load":"lb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" } diff --git a/runtime/interpreter/mterp/mips64/op_aget_char.S b/runtime/interpreter/mterp/mips64/op_aget_char.S new file mode 100644 index 0000000000..6c99ed52ad --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_aget_char.S @@ -0,0 +1 @@ +%include "mips64/op_aget.S" { "load":"lhu", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" } diff --git a/runtime/interpreter/mterp/mips64/op_aget_object.S b/runtime/interpreter/mterp/mips64/op_aget_object.S new file mode 100644 index 0000000000..6374a05e7b --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_aget_object.S @@ -0,0 +1,21 @@ + /* + * Array object get. vAA <- vBB[vCC]. + * + * for: aget-object + */ + /* op vAA, vBB, vCC */ + .extern artAGetObjectFromMterp + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + EXPORT_PC + GET_VREG_U a0, a2 # a0 <- vBB (array object) + GET_VREG a1, a3 # a1 <- vCC (requested index) + jal artAGetObjectFromMterp # (array, index) + ld a1, THREAD_EXCEPTION_OFFSET(rSELF) + srl a4, rINST, 8 # a4 <- AA + PREFETCH_INST 2 + bnez a1, MterpException + SET_VREG_OBJECT v0, a4 # vAA <- v0 + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_aget_short.S b/runtime/interpreter/mterp/mips64/op_aget_short.S new file mode 100644 index 0000000000..0158b0a1a1 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_aget_short.S @@ -0,0 +1 @@ +%include "mips64/op_aget.S" { "load":"lh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" } diff --git a/runtime/interpreter/mterp/mips64/op_aget_wide.S b/runtime/interpreter/mterp/mips64/op_aget_wide.S new file mode 100644 index 0000000000..0945acae5a --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_aget_wide.S @@ -0,0 +1,21 @@ + /* + * Array get, 64 bits. vAA <- vBB[vCC]. + * + */ + /* aget-wide vAA, vBB, vCC */ + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + srl a4, rINST, 8 # a4 <- AA + GET_VREG_U a0, a2 # a0 <- vBB (array object) + GET_VREG a1, a3 # a1 <- vCC (requested index) + beqz a0, common_errNullObject # bail if null array object + lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length + dlsa a0, a1, a0, 3 # a0 <- arrayObj + index*width + bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + lw a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0) + lw a3, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0) + dinsu a2, a3, 32, 32 # a2 <- vBB[vCC] + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a2, a4 # vAA <- a2 + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_and_int.S b/runtime/interpreter/mterp/mips64/op_and_int.S new file mode 100644 index 0000000000..f0792a8351 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_and_int.S @@ -0,0 +1 @@ +%include "mips64/binop.S" {"instr":"and a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_and_int_2addr.S b/runtime/interpreter/mterp/mips64/op_and_int_2addr.S new file mode 100644 index 0000000000..08dc615518 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_and_int_2addr.S @@ -0,0 +1 @@ +%include "mips64/binop2addr.S" {"instr":"and a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_and_int_lit16.S b/runtime/interpreter/mterp/mips64/op_and_int_lit16.S new file mode 100644 index 0000000000..65d28ad20c --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_and_int_lit16.S @@ -0,0 +1 @@ +%include "mips64/binopLit16.S" {"instr":"and a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_and_int_lit8.S b/runtime/interpreter/mterp/mips64/op_and_int_lit8.S new file mode 100644 index 0000000000..ab84bb7ce2 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_and_int_lit8.S @@ -0,0 +1 @@ +%include "mips64/binopLit8.S" {"instr":"and a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_and_long.S b/runtime/interpreter/mterp/mips64/op_and_long.S new file mode 100644 index 0000000000..e383ba00ca --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_and_long.S @@ -0,0 +1 @@ +%include "mips64/binopWide.S" {"instr":"and a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_and_long_2addr.S b/runtime/interpreter/mterp/mips64/op_and_long_2addr.S new file mode 100644 index 0000000000..f863bb9275 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_and_long_2addr.S @@ -0,0 +1 @@ +%include "mips64/binopWide2addr.S" {"instr":"and a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_aput.S b/runtime/interpreter/mterp/mips64/op_aput.S new file mode 100644 index 0000000000..9bfda97d05 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_aput.S @@ -0,0 +1,29 @@ +%default { "store":"sw", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" } + /* + * Array put, 32 bits or less. vBB[vCC] <- vAA. + * + * for: aput, aput-boolean, aput-byte, aput-char, aput-short + * + * NOTE: this assumes data offset for arrays is the same for all non-wide types. + * If this changes, specialize. + */ + /* op vAA, vBB, vCC */ + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + srl a4, rINST, 8 # a4 <- AA + GET_VREG_U a0, a2 # a0 <- vBB (array object) + GET_VREG a1, a3 # a1 <- vCC (requested index) + beqz a0, common_errNullObject # bail if null array object + lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length + .if $shift + # [d]lsa does not support shift count of 0. + dlsa a0, a1, a0, $shift # a0 <- arrayObj + index*width + .else + daddu a0, a1, a0 # a0 <- arrayObj + index*width + .endif + bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_VREG a2, a4 # a2 <- vAA + GET_INST_OPCODE v0 # extract opcode from rINST + $store a2, $data_offset(a0) # vBB[vCC] <- a2 + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_aput_boolean.S b/runtime/interpreter/mterp/mips64/op_aput_boolean.S new file mode 100644 index 0000000000..6707a1f11d --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_aput_boolean.S @@ -0,0 +1 @@ +%include "mips64/op_aput.S" { "store":"sb", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" } diff --git a/runtime/interpreter/mterp/mips64/op_aput_byte.S b/runtime/interpreter/mterp/mips64/op_aput_byte.S new file mode 100644 index 0000000000..7b9ce48379 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_aput_byte.S @@ -0,0 +1 @@ +%include "mips64/op_aput.S" { "store":"sb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" } diff --git a/runtime/interpreter/mterp/mips64/op_aput_char.S b/runtime/interpreter/mterp/mips64/op_aput_char.S new file mode 100644 index 0000000000..82bc8f7818 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_aput_char.S @@ -0,0 +1 @@ +%include "mips64/op_aput.S" { "store":"sh", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" } diff --git a/runtime/interpreter/mterp/mips64/op_aput_object.S b/runtime/interpreter/mterp/mips64/op_aput_object.S new file mode 100644 index 0000000000..b132456a18 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_aput_object.S @@ -0,0 +1,14 @@ + /* + * Store an object into an array. vBB[vCC] <- vAA. + */ + /* op vAA, vBB, vCC */ + .extern MterpAputObject + EXPORT_PC + daddu a0, rFP, OFF_FP_SHADOWFRAME + move a1, rPC + move a2, rINST + jal MterpAputObject + beqzc v0, MterpPossibleException + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_aput_short.S b/runtime/interpreter/mterp/mips64/op_aput_short.S new file mode 100644 index 0000000000..a7af2945b1 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_aput_short.S @@ -0,0 +1 @@ +%include "mips64/op_aput.S" { "store":"sh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" } diff --git a/runtime/interpreter/mterp/mips64/op_aput_wide.S b/runtime/interpreter/mterp/mips64/op_aput_wide.S new file mode 100644 index 0000000000..a1d7a3b51e --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_aput_wide.S @@ -0,0 +1,21 @@ + /* + * Array put, 64 bits. vBB[vCC] <- vAA. + * + */ + /* aput-wide vAA, vBB, vCC */ + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + srl a4, rINST, 8 # a4 <- AA + GET_VREG_U a0, a2 # a0 <- vBB (array object) + GET_VREG a1, a3 # a1 <- vCC (requested index) + beqz a0, common_errNullObject # bail if null array object + lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length + dlsa a0, a1, a0, 3 # a0 <- arrayObj + index*width + bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail + GET_VREG_WIDE a2, a4 # a2 <- vAA + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + sw a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0) + dsrl32 a2, a2, 0 + sw a2, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0) # vBB[vCC] <- a2 + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_array_length.S b/runtime/interpreter/mterp/mips64/op_array_length.S new file mode 100644 index 0000000000..2d9e172d18 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_array_length.S @@ -0,0 +1,12 @@ + /* + * Return the length of an array. + */ + srl a1, rINST, 12 # a1 <- B + GET_VREG_U a0, a1 # a0 <- vB (object ref) + ext a2, rINST, 8, 4 # a2 <- A + beqz a0, common_errNullObject # yup, fail + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- array length + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a3, a2 # vB <- length + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_check_cast.S b/runtime/interpreter/mterp/mips64/op_check_cast.S new file mode 100644 index 0000000000..472595d824 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_check_cast.S @@ -0,0 +1,17 @@ + /* + * Check to see if a cast from one class to another is allowed. + */ + /* check-cast vAA, class//BBBB */ + .extern MterpCheckCast + EXPORT_PC + lhu a0, 2(rPC) # a0 <- BBBB + srl a1, rINST, 8 # a1 <- AA + dlsa a1, a1, rFP, 2 # a1 <- &object + ld a2, OFF_FP_METHOD(rFP) # a2 <- method + move a3, rSELF # a3 <- self + jal MterpCheckCast # (index, &obj, method, self) + PREFETCH_INST 2 + bnez v0, MterpPossibleException + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_cmp_long.S b/runtime/interpreter/mterp/mips64/op_cmp_long.S new file mode 100644 index 0000000000..6e9376cfab --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_cmp_long.S @@ -0,0 +1,13 @@ + /* cmp-long vAA, vBB, vCC */ + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + srl a4, rINST, 8 # a4 <- AA + GET_VREG_WIDE a0, a2 # a0 <- vBB + GET_VREG_WIDE a1, a3 # a1 <- vCC + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + slt a2, a0, a1 + slt a0, a1, a0 + subu a0, a0, a2 + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a4 # vAA <- result + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_cmpg_double.S b/runtime/interpreter/mterp/mips64/op_cmpg_double.S new file mode 100644 index 0000000000..a8e2ef9867 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_cmpg_double.S @@ -0,0 +1 @@ +%include "mips64/fcmpWide.S" {"gt_bias":"1"} diff --git a/runtime/interpreter/mterp/mips64/op_cmpg_float.S b/runtime/interpreter/mterp/mips64/op_cmpg_float.S new file mode 100644 index 0000000000..0c93eac7de --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_cmpg_float.S @@ -0,0 +1 @@ +%include "mips64/fcmp.S" {"gt_bias":"1"} diff --git a/runtime/interpreter/mterp/mips64/op_cmpl_double.S b/runtime/interpreter/mterp/mips64/op_cmpl_double.S new file mode 100644 index 0000000000..9111b067f6 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_cmpl_double.S @@ -0,0 +1 @@ +%include "mips64/fcmpWide.S" {"gt_bias":"0"} diff --git a/runtime/interpreter/mterp/mips64/op_cmpl_float.S b/runtime/interpreter/mterp/mips64/op_cmpl_float.S new file mode 100644 index 0000000000..b047451842 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_cmpl_float.S @@ -0,0 +1 @@ +%include "mips64/fcmp.S" {"gt_bias":"0"} diff --git a/runtime/interpreter/mterp/mips64/op_const.S b/runtime/interpreter/mterp/mips64/op_const.S new file mode 100644 index 0000000000..4b0d69b763 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_const.S @@ -0,0 +1,9 @@ + /* const vAA, #+BBBBbbbb */ + srl a2, rINST, 8 # a2 <- AA + lh a0, 2(rPC) # a0 <- bbbb (low) + lh a1, 4(rPC) # a1 <- BBBB (high) + FETCH_ADVANCE_INST 3 # advance rPC, load rINST + ins a0, a1, 16, 16 # a0 = BBBBbbbb + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vAA <- +BBBBbbbb + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_const_16.S b/runtime/interpreter/mterp/mips64/op_const_16.S new file mode 100644 index 0000000000..51e68a7df7 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_const_16.S @@ -0,0 +1,7 @@ + /* const/16 vAA, #+BBBB */ + srl a2, rINST, 8 # a2 <- AA + lh a0, 2(rPC) # a0 <- sign-extended BBBB + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vAA <- +BBBB + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_const_4.S b/runtime/interpreter/mterp/mips64/op_const_4.S new file mode 100644 index 0000000000..0a58bff7b7 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_const_4.S @@ -0,0 +1,8 @@ + /* const/4 vA, #+B */ + ext a2, rINST, 8, 4 # a2 <- A + seh a0, rINST # sign extend B in rINST + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + sra a0, a0, 12 # shift B into its final position + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vA <- +B + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_const_class.S b/runtime/interpreter/mterp/mips64/op_const_class.S new file mode 100644 index 0000000000..adf79df38e --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_const_class.S @@ -0,0 +1,13 @@ + /* const/class vAA, Class//BBBB */ + .extern MterpConstClass + EXPORT_PC + lhu a0, 2(rPC) # a0 <- BBBB + srl a1, rINST, 8 # a1 <- AA + daddu a2, rFP, OFF_FP_SHADOWFRAME + move a3, rSELF + jal MterpConstClass # (index, tgt_reg, shadow_frame, self) + PREFETCH_INST 2 # load rINST + bnez v0, MterpPossibleException # let reference interpreter deal with it. + ADVANCE 2 # advance rPC + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_const_high16.S b/runtime/interpreter/mterp/mips64/op_const_high16.S new file mode 100644 index 0000000000..43effb6f60 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_const_high16.S @@ -0,0 +1,8 @@ + /* const/high16 vAA, #+BBBB0000 */ + srl a2, rINST, 8 # a2 <- AA + lh a0, 2(rPC) # a0 <- BBBB + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + sll a0, a0, 16 # a0 <- BBBB0000 + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vAA <- +BBBB0000 + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_const_string.S b/runtime/interpreter/mterp/mips64/op_const_string.S new file mode 100644 index 0000000000..4684c11854 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_const_string.S @@ -0,0 +1,13 @@ + /* const/string vAA, String//BBBB */ + .extern MterpConstString + EXPORT_PC + lhu a0, 2(rPC) # a0 <- BBBB + srl a1, rINST, 8 # a1 <- AA + daddu a2, rFP, OFF_FP_SHADOWFRAME + move a3, rSELF + jal MterpConstString # (index, tgt_reg, shadow_frame, self) + PREFETCH_INST 2 # load rINST + bnez v0, MterpPossibleException # let reference interpreter deal with it. + ADVANCE 2 # advance rPC + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_const_string_jumbo.S b/runtime/interpreter/mterp/mips64/op_const_string_jumbo.S new file mode 100644 index 0000000000..47f2101c88 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_const_string_jumbo.S @@ -0,0 +1,15 @@ + /* const/string vAA, String//BBBBBBBB */ + .extern MterpConstString + EXPORT_PC + lh a0, 2(rPC) # a0 <- bbbb (low) + lh a4, 4(rPC) # a4 <- BBBB (high) + srl a1, rINST, 8 # a1 <- AA + ins a0, a4, 16, 16 # a0 <- BBBBbbbb + daddu a2, rFP, OFF_FP_SHADOWFRAME + move a3, rSELF + jal MterpConstString # (index, tgt_reg, shadow_frame, self) + PREFETCH_INST 3 # load rINST + bnez v0, MterpPossibleException # let reference interpreter deal with it. + ADVANCE 3 # advance rPC + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_const_wide.S b/runtime/interpreter/mterp/mips64/op_const_wide.S new file mode 100644 index 0000000000..f7eaf7c231 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_const_wide.S @@ -0,0 +1,13 @@ + /* const-wide vAA, #+HHHHhhhhBBBBbbbb */ + srl a4, rINST, 8 # a4 <- AA + lh a0, 2(rPC) # a0 <- bbbb (low) + lh a1, 4(rPC) # a1 <- BBBB (low middle) + lh a2, 6(rPC) # a2 <- hhhh (high middle) + lh a3, 8(rPC) # a3 <- HHHH (high) + FETCH_ADVANCE_INST 5 # advance rPC, load rINST + ins a0, a1, 16, 16 # a0 = BBBBbbbb + ins a2, a3, 16, 16 # a2 = HHHHhhhh + dinsu a0, a2, 32, 32 # a0 = HHHHhhhhBBBBbbbb + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a4 # vAA <- +HHHHhhhhBBBBbbbb + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_const_wide_16.S b/runtime/interpreter/mterp/mips64/op_const_wide_16.S new file mode 100644 index 0000000000..3a70937973 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_const_wide_16.S @@ -0,0 +1,7 @@ + /* const-wide/16 vAA, #+BBBB */ + srl a2, rINST, 8 # a2 <- AA + lh a0, 2(rPC) # a0 <- sign-extended BBBB + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vAA <- +BBBB + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_const_wide_32.S b/runtime/interpreter/mterp/mips64/op_const_wide_32.S new file mode 100644 index 0000000000..867197ce13 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_const_wide_32.S @@ -0,0 +1,9 @@ + /* const-wide/32 vAA, #+BBBBbbbb */ + srl a2, rINST, 8 # a2 <- AA + lh a0, 2(rPC) # a0 <- bbbb (low) + lh a1, 4(rPC) # a1 <- BBBB (high) + FETCH_ADVANCE_INST 3 # advance rPC, load rINST + ins a0, a1, 16, 16 # a0 = BBBBbbbb + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vAA <- +BBBBbbbb + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_const_wide_high16.S b/runtime/interpreter/mterp/mips64/op_const_wide_high16.S new file mode 100644 index 0000000000..d741631bcb --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_const_wide_high16.S @@ -0,0 +1,8 @@ + /* const-wide/high16 vAA, #+BBBB000000000000 */ + srl a2, rINST, 8 # a2 <- AA + lh a0, 2(rPC) # a0 <- BBBB + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + dsll32 a0, a0, 16 # a0 <- BBBB000000000000 + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vAA <- +BBBB000000000000 + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_div_double.S b/runtime/interpreter/mterp/mips64/op_div_double.S new file mode 100644 index 0000000000..44998f0c29 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_div_double.S @@ -0,0 +1 @@ +%include "mips64/fbinopWide.S" {"instr":"div.d f0, f0, f1"} diff --git a/runtime/interpreter/mterp/mips64/op_div_double_2addr.S b/runtime/interpreter/mterp/mips64/op_div_double_2addr.S new file mode 100644 index 0000000000..396af798f6 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_div_double_2addr.S @@ -0,0 +1 @@ +%include "mips64/fbinopWide2addr.S" {"instr":"div.d f0, f0, f1"} diff --git a/runtime/interpreter/mterp/mips64/op_div_float.S b/runtime/interpreter/mterp/mips64/op_div_float.S new file mode 100644 index 0000000000..7b09d52f02 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_div_float.S @@ -0,0 +1 @@ +%include "mips64/fbinop.S" {"instr":"div.s f0, f0, f1"} diff --git a/runtime/interpreter/mterp/mips64/op_div_float_2addr.S b/runtime/interpreter/mterp/mips64/op_div_float_2addr.S new file mode 100644 index 0000000000..e74fddae6d --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_div_float_2addr.S @@ -0,0 +1 @@ +%include "mips64/fbinop2addr.S" {"instr":"div.s f0, f0, f1"} diff --git a/runtime/interpreter/mterp/mips64/op_div_int.S b/runtime/interpreter/mterp/mips64/op_div_int.S new file mode 100644 index 0000000000..fb04acbff8 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_div_int.S @@ -0,0 +1 @@ +%include "mips64/binop.S" {"instr":"div a0, a0, a1", "chkzero":"1"} diff --git a/runtime/interpreter/mterp/mips64/op_div_int_2addr.S b/runtime/interpreter/mterp/mips64/op_div_int_2addr.S new file mode 100644 index 0000000000..db29b844fb --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_div_int_2addr.S @@ -0,0 +1 @@ +%include "mips64/binop2addr.S" {"instr":"div a0, a0, a1", "chkzero":"1"} diff --git a/runtime/interpreter/mterp/mips64/op_div_int_lit16.S b/runtime/interpreter/mterp/mips64/op_div_int_lit16.S new file mode 100644 index 0000000000..e903ddee2c --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_div_int_lit16.S @@ -0,0 +1 @@ +%include "mips64/binopLit16.S" {"instr":"div a0, a0, a1", "chkzero":"1"} diff --git a/runtime/interpreter/mterp/mips64/op_div_int_lit8.S b/runtime/interpreter/mterp/mips64/op_div_int_lit8.S new file mode 100644 index 0000000000..055960546f --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_div_int_lit8.S @@ -0,0 +1 @@ +%include "mips64/binopLit8.S" {"instr":"div a0, a0, a1", "chkzero":"1"} diff --git a/runtime/interpreter/mterp/mips64/op_div_long.S b/runtime/interpreter/mterp/mips64/op_div_long.S new file mode 100644 index 0000000000..01fc2b281a --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_div_long.S @@ -0,0 +1 @@ +%include "mips64/binopWide.S" {"instr":"ddiv a0, a0, a1", "chkzero":"1"} diff --git a/runtime/interpreter/mterp/mips64/op_div_long_2addr.S b/runtime/interpreter/mterp/mips64/op_div_long_2addr.S new file mode 100644 index 0000000000..9627ab8a24 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_div_long_2addr.S @@ -0,0 +1 @@ +%include "mips64/binopWide2addr.S" {"instr":"ddiv a0, a0, a1", "chkzero":"1"} diff --git a/runtime/interpreter/mterp/mips64/op_double_to_float.S b/runtime/interpreter/mterp/mips64/op_double_to_float.S new file mode 100644 index 0000000000..2b2acee591 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_double_to_float.S @@ -0,0 +1,8 @@ + /* + * Conversion from or to floating-point happens in a floating-point register. + * Therefore we load the input and store the output into or from a + * floating-point register irrespective of the type. + */ +%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" } + cvt.s.d f0, f0 +%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" } diff --git a/runtime/interpreter/mterp/mips64/op_double_to_int.S b/runtime/interpreter/mterp/mips64/op_double_to_int.S new file mode 100644 index 0000000000..aa2cbcad38 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_double_to_int.S @@ -0,0 +1,23 @@ +%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" } + /* + * TODO: simplify this when the MIPS64R6 emulator + * supports NAN2008=1. + */ + dli t0, INT_MIN_AS_DOUBLE + dmtc1 t0, f1 + cmp.le.d f1, f1, f0 + bc1nez f1, .L${opcode}_trunc + cmp.eq.d f1, f0, f0 + li t0, INT_MIN + mfc1 t1, f1 + and t0, t0, t1 + b .L${opcode}_done +%break +.L${opcode}_trunc: + trunc.w.d f0, f0 + mfc1 t0, f0 +.L${opcode}_done: + /* Can't include fcvtFooter.S after break */ + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG t0, a1 + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_double_to_long.S b/runtime/interpreter/mterp/mips64/op_double_to_long.S new file mode 100644 index 0000000000..777cfeb6c8 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_double_to_long.S @@ -0,0 +1,23 @@ +%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" } + /* + * TODO: simplify this when the MIPS64R6 emulator + * supports NAN2008=1. + */ + dli t0, LONG_MIN_AS_DOUBLE + dmtc1 t0, f1 + cmp.le.d f1, f1, f0 + bc1nez f1, .L${opcode}_trunc + cmp.eq.d f1, f0, f0 + dli t0, LONG_MIN + mfc1 t1, f1 + and t0, t0, t1 + b .L${opcode}_done +%break +.L${opcode}_trunc: + trunc.l.d f0, f0 + dmfc1 t0, f0 +.L${opcode}_done: + /* Can't include fcvtFooter.S after break */ + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE t0, a1 + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_fill_array_data.S b/runtime/interpreter/mterp/mips64/op_fill_array_data.S new file mode 100644 index 0000000000..c90f0b90ad --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_fill_array_data.S @@ -0,0 +1,14 @@ + /* fill-array-data vAA, +BBBBBBBB */ + .extern MterpFillArrayData + EXPORT_PC + lh a1, 2(rPC) # a1 <- bbbb (lo) + lh a0, 4(rPC) # a0 <- BBBB (hi) + srl a3, rINST, 8 # a3 <- AA + ins a1, a0, 16, 16 # a1 <- BBBBbbbb + GET_VREG_U a0, a3 # a0 <- vAA (array object) + dlsa a1, a1, rPC, 1 # a1 <- PC + BBBBbbbb*2 (array data off.) + jal MterpFillArrayData # (obj, payload) + beqzc v0, MterpPossibleException # exception? + FETCH_ADVANCE_INST 3 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_filled_new_array.S b/runtime/interpreter/mterp/mips64/op_filled_new_array.S new file mode 100644 index 0000000000..35f55c27a6 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_filled_new_array.S @@ -0,0 +1,18 @@ +%default { "helper":"MterpFilledNewArray" } + /* + * Create a new array with elements filled from registers. + * + * for: filled-new-array, filled-new-array/range + */ + /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */ + /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */ + .extern $helper + EXPORT_PC + daddu a0, rFP, OFF_FP_SHADOWFRAME + move a1, rPC + move a2, rSELF + jal $helper + beqzc v0, MterpPossibleException + FETCH_ADVANCE_INST 3 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_filled_new_array_range.S b/runtime/interpreter/mterp/mips64/op_filled_new_array_range.S new file mode 100644 index 0000000000..a4e18f68d6 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_filled_new_array_range.S @@ -0,0 +1 @@ +%include "mips64/op_filled_new_array.S" { "helper":"MterpFilledNewArrayRange" } diff --git a/runtime/interpreter/mterp/mips64/op_float_to_double.S b/runtime/interpreter/mterp/mips64/op_float_to_double.S new file mode 100644 index 0000000000..6accfeeff6 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_float_to_double.S @@ -0,0 +1,8 @@ + /* + * Conversion from or to floating-point happens in a floating-point register. + * Therefore we load the input and store the output into or from a + * floating-point register irrespective of the type. + */ +%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" } + cvt.d.s f0, f0 +%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" } diff --git a/runtime/interpreter/mterp/mips64/op_float_to_int.S b/runtime/interpreter/mterp/mips64/op_float_to_int.S new file mode 100644 index 0000000000..d957540a7b --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_float_to_int.S @@ -0,0 +1,23 @@ +%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" } + /* + * TODO: simplify this when the MIPS64R6 emulator + * supports NAN2008=1. + */ + li t0, INT_MIN_AS_FLOAT + mtc1 t0, f1 + cmp.le.s f1, f1, f0 + bc1nez f1, .L${opcode}_trunc + cmp.eq.s f1, f0, f0 + li t0, INT_MIN + mfc1 t1, f1 + and t0, t0, t1 + b .L${opcode}_done +%break +.L${opcode}_trunc: + trunc.w.s f0, f0 + mfc1 t0, f0 +.L${opcode}_done: + /* Can't include fcvtFooter.S after break */ + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG t0, a1 + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_float_to_long.S b/runtime/interpreter/mterp/mips64/op_float_to_long.S new file mode 100644 index 0000000000..5d036c8455 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_float_to_long.S @@ -0,0 +1,23 @@ +%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" } + /* + * TODO: simplify this when the MIPS64R6 emulator + * supports NAN2008=1. + */ + li t0, LONG_MIN_AS_FLOAT + mtc1 t0, f1 + cmp.le.s f1, f1, f0 + bc1nez f1, .L${opcode}_trunc + cmp.eq.s f1, f0, f0 + dli t0, LONG_MIN + mfc1 t1, f1 + and t0, t0, t1 + b .L${opcode}_done +%break +.L${opcode}_trunc: + trunc.l.s f0, f0 + dmfc1 t0, f0 +.L${opcode}_done: + /* Can't include fcvtFooter.S after break */ + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE t0, a1 + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_goto.S b/runtime/interpreter/mterp/mips64/op_goto.S new file mode 100644 index 0000000000..7c7d0ecf5a --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_goto.S @@ -0,0 +1,25 @@ + /* + * Unconditional branch, 8-bit offset. + * + * The branch distance is a signed code-unit offset, which we need to + * double to get a byte offset. + */ + /* goto +AA */ + .extern MterpProfileBranch + srl rINST, rINST, 8 + seb rINST, rINST # rINST <- offset (sign-extended AA) +#if MTERP_PROFILE_BRANCHES + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rINST + jal MterpProfileBranch # (self, shadow_frame, offset) + bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST +#endif + dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2 + lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue + move a0, rINST # a0 <- offset + FETCH_INST # load rINST + bltz a0, MterpCheckSuspendAndContinue # suspend check if backwards branch + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_goto_16.S b/runtime/interpreter/mterp/mips64/op_goto_16.S new file mode 100644 index 0000000000..566e3a78f0 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_goto_16.S @@ -0,0 +1,24 @@ + /* + * Unconditional branch, 16-bit offset. + * + * The branch distance is a signed code-unit offset, which we need to + * double to get a byte offset. + */ + /* goto/16 +AAAA */ + .extern MterpProfileBranch + lh rINST, 2(rPC) # rINST <- offset (sign-extended AAAA) +#if MTERP_PROFILE_BRANCHES + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rINST + jal MterpProfileBranch # (self, shadow_frame, offset) + bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST +#endif + dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2 + lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue + move a0, rINST # a0 <- offset + FETCH_INST # load rINST + bltz a0, MterpCheckSuspendAndContinue # suspend check if backwards branch + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_goto_32.S b/runtime/interpreter/mterp/mips64/op_goto_32.S new file mode 100644 index 0000000000..b260083ae8 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_goto_32.S @@ -0,0 +1,29 @@ + /* + * Unconditional branch, 32-bit offset. + * + * The branch distance is a signed code-unit offset, which we need to + * double to get a byte offset. + * + * Unlike most opcodes, this one is allowed to branch to itself, so + * our "backward branch" test must be "<=0" instead of "<0". + */ + /* goto/32 +AAAAAAAA */ + .extern MterpProfileBranch + lh rINST, 2(rPC) # rINST <- aaaa (low) + lh a1, 4(rPC) # a1 <- AAAA (high) + ins rINST, a1, 16, 16 # rINST <- offset (sign-extended AAAAaaaa) +#if MTERP_PROFILE_BRANCHES + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rINST + jal MterpProfileBranch # (self, shadow_frame, offset) + bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST +#endif + dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2 + lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue + move a0, rINST # a0 <- offset + FETCH_INST # load rINST + blez a0, MterpCheckSuspendAndContinue # suspend check if backwards branch + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_if_eq.S b/runtime/interpreter/mterp/mips64/op_if_eq.S new file mode 100644 index 0000000000..aa35cadf17 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_if_eq.S @@ -0,0 +1 @@ +%include "mips64/bincmp.S" { "condition":"eq" } diff --git a/runtime/interpreter/mterp/mips64/op_if_eqz.S b/runtime/interpreter/mterp/mips64/op_if_eqz.S new file mode 100644 index 0000000000..0fe34187a0 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_if_eqz.S @@ -0,0 +1 @@ +%include "mips64/zcmp.S" { "condition":"eq" } diff --git a/runtime/interpreter/mterp/mips64/op_if_ge.S b/runtime/interpreter/mterp/mips64/op_if_ge.S new file mode 100644 index 0000000000..59fdcc5b33 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_if_ge.S @@ -0,0 +1 @@ +%include "mips64/bincmp.S" { "condition":"ge" } diff --git a/runtime/interpreter/mterp/mips64/op_if_gez.S b/runtime/interpreter/mterp/mips64/op_if_gez.S new file mode 100644 index 0000000000..57f1f66ecd --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_if_gez.S @@ -0,0 +1 @@ +%include "mips64/zcmp.S" { "condition":"ge" } diff --git a/runtime/interpreter/mterp/mips64/op_if_gt.S b/runtime/interpreter/mterp/mips64/op_if_gt.S new file mode 100644 index 0000000000..26cc1195b5 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_if_gt.S @@ -0,0 +1 @@ +%include "mips64/bincmp.S" { "condition":"gt" } diff --git a/runtime/interpreter/mterp/mips64/op_if_gtz.S b/runtime/interpreter/mterp/mips64/op_if_gtz.S new file mode 100644 index 0000000000..69fcacb82d --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_if_gtz.S @@ -0,0 +1 @@ +%include "mips64/zcmp.S" { "condition":"gt" } diff --git a/runtime/interpreter/mterp/mips64/op_if_le.S b/runtime/interpreter/mterp/mips64/op_if_le.S new file mode 100644 index 0000000000..a7fce17c40 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_if_le.S @@ -0,0 +1 @@ +%include "mips64/bincmp.S" { "condition":"le" } diff --git a/runtime/interpreter/mterp/mips64/op_if_lez.S b/runtime/interpreter/mterp/mips64/op_if_lez.S new file mode 100644 index 0000000000..f3edcc6d99 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_if_lez.S @@ -0,0 +1 @@ +%include "mips64/zcmp.S" { "condition":"le" } diff --git a/runtime/interpreter/mterp/mips64/op_if_lt.S b/runtime/interpreter/mterp/mips64/op_if_lt.S new file mode 100644 index 0000000000..a975a31b57 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_if_lt.S @@ -0,0 +1 @@ +%include "mips64/bincmp.S" { "condition":"lt" } diff --git a/runtime/interpreter/mterp/mips64/op_if_ltz.S b/runtime/interpreter/mterp/mips64/op_if_ltz.S new file mode 100644 index 0000000000..c1d730d43f --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_if_ltz.S @@ -0,0 +1 @@ +%include "mips64/zcmp.S" { "condition":"lt" } diff --git a/runtime/interpreter/mterp/mips64/op_if_ne.S b/runtime/interpreter/mterp/mips64/op_if_ne.S new file mode 100644 index 0000000000..f143ee917e --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_if_ne.S @@ -0,0 +1 @@ +%include "mips64/bincmp.S" { "condition":"ne" } diff --git a/runtime/interpreter/mterp/mips64/op_if_nez.S b/runtime/interpreter/mterp/mips64/op_if_nez.S new file mode 100644 index 0000000000..1856b96dbc --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_if_nez.S @@ -0,0 +1 @@ +%include "mips64/zcmp.S" { "condition":"ne" } diff --git a/runtime/interpreter/mterp/mips64/op_iget.S b/runtime/interpreter/mterp/mips64/op_iget.S new file mode 100644 index 0000000000..ade4b31b80 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iget.S @@ -0,0 +1,26 @@ +%default { "is_object":"0", "helper":"artGet32InstanceFromCode"} + /* + * General instance field get. + * + * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short + */ + .extern $helper + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref CCCC + srl a1, rINST, 12 # a1 <- B + GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer + ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer + move a3, rSELF # a3 <- self + jal $helper + ld a3, THREAD_EXCEPTION_OFFSET(rSELF) + ext a2, rINST, 8, 4 # a2 <- A + PREFETCH_INST 2 + bnez a3, MterpPossibleException # bail out + .if $is_object + SET_VREG_OBJECT v0, a2 # fp[A] <- v0 + .else + SET_VREG v0, a2 # fp[A] <- v0 + .endif + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_iget_boolean.S b/runtime/interpreter/mterp/mips64/op_iget_boolean.S new file mode 100644 index 0000000000..cb2c8bef07 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iget_boolean.S @@ -0,0 +1 @@ +%include "mips64/op_iget.S" { "helper":"artGetBooleanInstanceFromCode" } diff --git a/runtime/interpreter/mterp/mips64/op_iget_boolean_quick.S b/runtime/interpreter/mterp/mips64/op_iget_boolean_quick.S new file mode 100644 index 0000000000..979dc7079e --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iget_boolean_quick.S @@ -0,0 +1 @@ +%include "mips64/op_iget_quick.S" { "load":"lbu" } diff --git a/runtime/interpreter/mterp/mips64/op_iget_byte.S b/runtime/interpreter/mterp/mips64/op_iget_byte.S new file mode 100644 index 0000000000..099d8d0362 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iget_byte.S @@ -0,0 +1 @@ +%include "mips64/op_iget.S" { "helper":"artGetByteInstanceFromCode" } diff --git a/runtime/interpreter/mterp/mips64/op_iget_byte_quick.S b/runtime/interpreter/mterp/mips64/op_iget_byte_quick.S new file mode 100644 index 0000000000..cb35556721 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iget_byte_quick.S @@ -0,0 +1 @@ +%include "mips64/op_iget_quick.S" { "load":"lb" } diff --git a/runtime/interpreter/mterp/mips64/op_iget_char.S b/runtime/interpreter/mterp/mips64/op_iget_char.S new file mode 100644 index 0000000000..927b7affa6 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iget_char.S @@ -0,0 +1 @@ +%include "mips64/op_iget.S" { "helper":"artGetCharInstanceFromCode" } diff --git a/runtime/interpreter/mterp/mips64/op_iget_char_quick.S b/runtime/interpreter/mterp/mips64/op_iget_char_quick.S new file mode 100644 index 0000000000..603456775b --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iget_char_quick.S @@ -0,0 +1 @@ +%include "mips64/op_iget_quick.S" { "load":"lhu" } diff --git a/runtime/interpreter/mterp/mips64/op_iget_object.S b/runtime/interpreter/mterp/mips64/op_iget_object.S new file mode 100644 index 0000000000..c658556992 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iget_object.S @@ -0,0 +1 @@ +%include "mips64/op_iget.S" { "is_object":"1", "helper":"artGetObjInstanceFromCode" } diff --git a/runtime/interpreter/mterp/mips64/op_iget_object_quick.S b/runtime/interpreter/mterp/mips64/op_iget_object_quick.S new file mode 100644 index 0000000000..171d54301b --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iget_object_quick.S @@ -0,0 +1,16 @@ + /* For: iget-object-quick */ + /* op vA, vB, offset//CCCC */ + .extern artIGetObjectFromMterp + srl a2, rINST, 12 # a2 <- B + lhu a1, 2(rPC) # a1 <- field byte offset + EXPORT_PC + GET_VREG_U a0, a2 # a0 <- object we're operating on + jal artIGetObjectFromMterp # (obj, offset) + ld a3, THREAD_EXCEPTION_OFFSET(rSELF) + ext a2, rINST, 8, 4 # a2 <- A + PREFETCH_INST 2 + bnez a3, MterpPossibleException # bail out + SET_VREG_OBJECT v0, a2 # fp[A] <- v0 + ADVANCE 2 # advance rPC + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_iget_quick.S b/runtime/interpreter/mterp/mips64/op_iget_quick.S new file mode 100644 index 0000000000..fee6ab738c --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iget_quick.S @@ -0,0 +1,14 @@ +%default { "load":"lw" } + /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */ + /* op vA, vB, offset//CCCC */ + srl a2, rINST, 12 # a2 <- B + lhu a1, 2(rPC) # a1 <- field byte offset + GET_VREG_U a3, a2 # a3 <- object we're operating on + ext a4, rINST, 8, 4 # a4 <- A + daddu a1, a1, a3 + beqz a3, common_errNullObject # object was null + $load a0, 0(a1) # a0 <- obj.field + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + SET_VREG a0, a4 # fp[A] <- a0 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_iget_short.S b/runtime/interpreter/mterp/mips64/op_iget_short.S new file mode 100644 index 0000000000..28b5093b6d --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iget_short.S @@ -0,0 +1 @@ +%include "mips64/op_iget.S" { "helper":"artGetShortInstanceFromCode" } diff --git a/runtime/interpreter/mterp/mips64/op_iget_short_quick.S b/runtime/interpreter/mterp/mips64/op_iget_short_quick.S new file mode 100644 index 0000000000..6e152dbf48 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iget_short_quick.S @@ -0,0 +1 @@ +%include "mips64/op_iget_quick.S" { "load":"lh" } diff --git a/runtime/interpreter/mterp/mips64/op_iget_wide.S b/runtime/interpreter/mterp/mips64/op_iget_wide.S new file mode 100644 index 0000000000..85cf6705a7 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iget_wide.S @@ -0,0 +1,21 @@ + /* + * 64-bit instance field get. + * + * for: iget-wide + */ + .extern artGet64InstanceFromCode + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref CCCC + srl a1, rINST, 12 # a1 <- B + GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer + ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer + move a3, rSELF # a3 <- self + jal artGet64InstanceFromCode + ld a3, THREAD_EXCEPTION_OFFSET(rSELF) + ext a2, rINST, 8, 4 # a2 <- A + PREFETCH_INST 2 + bnez a3, MterpPossibleException # bail out + SET_VREG_WIDE v0, a2 # fp[A] <- v0 + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_iget_wide_quick.S b/runtime/interpreter/mterp/mips64/op_iget_wide_quick.S new file mode 100644 index 0000000000..2adc6adf15 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iget_wide_quick.S @@ -0,0 +1,14 @@ + /* iget-wide-quick vA, vB, offset//CCCC */ + srl a2, rINST, 12 # a2 <- B + lhu a4, 2(rPC) # a4 <- field byte offset + GET_VREG_U a3, a2 # a3 <- object we're operating on + ext a2, rINST, 8, 4 # a2 <- A + beqz a3, common_errNullObject # object was null + daddu a4, a3, a4 # create direct pointer + lw a0, 0(a4) + lw a1, 4(a4) + dinsu a0, a1, 32, 32 + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + SET_VREG_WIDE a0, a2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_instance_of.S b/runtime/interpreter/mterp/mips64/op_instance_of.S new file mode 100644 index 0000000000..39a5dc7c26 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_instance_of.S @@ -0,0 +1,23 @@ + /* + * Check to see if an object reference is an instance of a class. + * + * Most common situation is a non-null object, being compared against + * an already-resolved class. + */ + /* instance-of vA, vB, class//CCCC */ + .extern MterpInstanceOf + EXPORT_PC + lhu a0, 2(rPC) # a0 <- CCCC + srl a1, rINST, 12 # a1 <- B + dlsa a1, a1, rFP, 2 # a1 <- &object + ld a2, OFF_FP_METHOD(rFP) # a2 <- method + move a3, rSELF # a3 <- self + jal MterpInstanceOf # (index, &obj, method, self) + ld a1, THREAD_EXCEPTION_OFFSET(rSELF) + ext a2, rINST, 8, 4 # a2 <- A + PREFETCH_INST 2 + bnez a1, MterpException + ADVANCE 2 # advance rPC + SET_VREG v0, a2 # vA <- v0 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_int_to_byte.S b/runtime/interpreter/mterp/mips64/op_int_to_byte.S new file mode 100644 index 0000000000..1993e076a6 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_int_to_byte.S @@ -0,0 +1 @@ +%include "mips64/unop.S" {"instr":"seb a0, a0"} diff --git a/runtime/interpreter/mterp/mips64/op_int_to_char.S b/runtime/interpreter/mterp/mips64/op_int_to_char.S new file mode 100644 index 0000000000..8f03acd3f6 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_int_to_char.S @@ -0,0 +1 @@ +%include "mips64/unop.S" {"instr":"and a0, a0, 0xffff"} diff --git a/runtime/interpreter/mterp/mips64/op_int_to_double.S b/runtime/interpreter/mterp/mips64/op_int_to_double.S new file mode 100644 index 0000000000..6df71be394 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_int_to_double.S @@ -0,0 +1,8 @@ + /* + * Conversion from or to floating-point happens in a floating-point register. + * Therefore we load the input and store the output into or from a + * floating-point register irrespective of the type. + */ +%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" } + cvt.d.w f0, f0 +%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" } diff --git a/runtime/interpreter/mterp/mips64/op_int_to_float.S b/runtime/interpreter/mterp/mips64/op_int_to_float.S new file mode 100644 index 0000000000..77e9eba53a --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_int_to_float.S @@ -0,0 +1,8 @@ + /* + * Conversion from or to floating-point happens in a floating-point register. + * Therefore we load the input and store the output into or from a + * floating-point register irrespective of the type. + */ +%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" } + cvt.s.w f0, f0 +%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" } diff --git a/runtime/interpreter/mterp/mips64/op_int_to_long.S b/runtime/interpreter/mterp/mips64/op_int_to_long.S new file mode 100644 index 0000000000..7b9ad86fdc --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_int_to_long.S @@ -0,0 +1,8 @@ + /* int-to-long vA, vB */ + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a3 # a0 <- vB (sign-extended to 64 bits) + ext a2, rINST, 8, 4 # a2 <- A + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vA <- vB + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_int_to_short.S b/runtime/interpreter/mterp/mips64/op_int_to_short.S new file mode 100644 index 0000000000..4a3f2346cf --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_int_to_short.S @@ -0,0 +1 @@ +%include "mips64/unop.S" {"instr":"seh a0, a0"} diff --git a/runtime/interpreter/mterp/mips64/op_invoke_direct.S b/runtime/interpreter/mterp/mips64/op_invoke_direct.S new file mode 100644 index 0000000000..5047118e48 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_invoke_direct.S @@ -0,0 +1 @@ +%include "mips64/invoke.S" { "helper":"MterpInvokeDirect" } diff --git a/runtime/interpreter/mterp/mips64/op_invoke_direct_range.S b/runtime/interpreter/mterp/mips64/op_invoke_direct_range.S new file mode 100644 index 0000000000..5c9b95f5be --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_invoke_direct_range.S @@ -0,0 +1 @@ +%include "mips64/invoke.S" { "helper":"MterpInvokeDirectRange" } diff --git a/runtime/interpreter/mterp/mips64/op_invoke_interface.S b/runtime/interpreter/mterp/mips64/op_invoke_interface.S new file mode 100644 index 0000000000..ed148adcbb --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_invoke_interface.S @@ -0,0 +1,8 @@ +%include "mips64/invoke.S" { "helper":"MterpInvokeInterface" } + /* + * Handle an interface method call. + * + * for: invoke-interface, invoke-interface/range + */ + /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ + /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ diff --git a/runtime/interpreter/mterp/mips64/op_invoke_interface_range.S b/runtime/interpreter/mterp/mips64/op_invoke_interface_range.S new file mode 100644 index 0000000000..91c231e0f4 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_invoke_interface_range.S @@ -0,0 +1 @@ +%include "mips64/invoke.S" { "helper":"MterpInvokeInterfaceRange" } diff --git a/runtime/interpreter/mterp/mips64/op_invoke_static.S b/runtime/interpreter/mterp/mips64/op_invoke_static.S new file mode 100644 index 0000000000..44f5cb7a78 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_invoke_static.S @@ -0,0 +1 @@ +%include "mips64/invoke.S" { "helper":"MterpInvokeStatic" } diff --git a/runtime/interpreter/mterp/mips64/op_invoke_static_range.S b/runtime/interpreter/mterp/mips64/op_invoke_static_range.S new file mode 100644 index 0000000000..289e5aa977 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_invoke_static_range.S @@ -0,0 +1 @@ +%include "mips64/invoke.S" { "helper":"MterpInvokeStaticRange" } diff --git a/runtime/interpreter/mterp/mips64/op_invoke_super.S b/runtime/interpreter/mterp/mips64/op_invoke_super.S new file mode 100644 index 0000000000..b13fffe714 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_invoke_super.S @@ -0,0 +1,8 @@ +%include "mips64/invoke.S" { "helper":"MterpInvokeSuper" } + /* + * Handle a "super" method call. + * + * for: invoke-super, invoke-super/range + */ + /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ + /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ diff --git a/runtime/interpreter/mterp/mips64/op_invoke_super_range.S b/runtime/interpreter/mterp/mips64/op_invoke_super_range.S new file mode 100644 index 0000000000..350b9757ba --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_invoke_super_range.S @@ -0,0 +1 @@ +%include "mips64/invoke.S" { "helper":"MterpInvokeSuperRange" } diff --git a/runtime/interpreter/mterp/mips64/op_invoke_virtual.S b/runtime/interpreter/mterp/mips64/op_invoke_virtual.S new file mode 100644 index 0000000000..0d26cda812 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_invoke_virtual.S @@ -0,0 +1,8 @@ +%include "mips64/invoke.S" { "helper":"MterpInvokeVirtual" } + /* + * Handle a virtual method call. + * + * for: invoke-virtual, invoke-virtual/range + */ + /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ + /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ diff --git a/runtime/interpreter/mterp/mips64/op_invoke_virtual_quick.S b/runtime/interpreter/mterp/mips64/op_invoke_virtual_quick.S new file mode 100644 index 0000000000..f39562c199 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_invoke_virtual_quick.S @@ -0,0 +1 @@ +%include "mips64/invoke.S" { "helper":"MterpInvokeVirtualQuick" } diff --git a/runtime/interpreter/mterp/mips64/op_invoke_virtual_range.S b/runtime/interpreter/mterp/mips64/op_invoke_virtual_range.S new file mode 100644 index 0000000000..0bb43f8fcc --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_invoke_virtual_range.S @@ -0,0 +1 @@ +%include "mips64/invoke.S" { "helper":"MterpInvokeVirtualRange" } diff --git a/runtime/interpreter/mterp/mips64/op_invoke_virtual_range_quick.S b/runtime/interpreter/mterp/mips64/op_invoke_virtual_range_quick.S new file mode 100644 index 0000000000..c4488513bd --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_invoke_virtual_range_quick.S @@ -0,0 +1 @@ +%include "mips64/invoke.S" { "helper":"MterpInvokeVirtualQuickRange" } diff --git a/runtime/interpreter/mterp/mips64/op_iput.S b/runtime/interpreter/mterp/mips64/op_iput.S new file mode 100644 index 0000000000..a906a0fc82 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iput.S @@ -0,0 +1,21 @@ +%default { "helper":"artSet32InstanceFromMterp" } + /* + * General 32-bit instance field put. + * + * for: iput, iput-boolean, iput-byte, iput-char, iput-short + */ + /* op vA, vB, field//CCCC */ + .extern $helper + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref CCCC + srl a1, rINST, 12 # a1 <- B + GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer + ext a2, rINST, 8, 4 # a2 <- A + GET_VREG a2, a2 # a2 <- fp[A] + ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer + PREFETCH_INST 2 + jal $helper + bnez v0, MterpPossibleException # bail out + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_iput_boolean.S b/runtime/interpreter/mterp/mips64/op_iput_boolean.S new file mode 100644 index 0000000000..3034fa59d5 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iput_boolean.S @@ -0,0 +1 @@ +%include "mips64/op_iput.S" { "helper":"artSet8InstanceFromMterp" } diff --git a/runtime/interpreter/mterp/mips64/op_iput_boolean_quick.S b/runtime/interpreter/mterp/mips64/op_iput_boolean_quick.S new file mode 100644 index 0000000000..df99948e40 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iput_boolean_quick.S @@ -0,0 +1 @@ +%include "mips64/op_iput_quick.S" { "store":"sb" } diff --git a/runtime/interpreter/mterp/mips64/op_iput_byte.S b/runtime/interpreter/mterp/mips64/op_iput_byte.S new file mode 100644 index 0000000000..3034fa59d5 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iput_byte.S @@ -0,0 +1 @@ +%include "mips64/op_iput.S" { "helper":"artSet8InstanceFromMterp" } diff --git a/runtime/interpreter/mterp/mips64/op_iput_byte_quick.S b/runtime/interpreter/mterp/mips64/op_iput_byte_quick.S new file mode 100644 index 0000000000..df99948e40 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iput_byte_quick.S @@ -0,0 +1 @@ +%include "mips64/op_iput_quick.S" { "store":"sb" } diff --git a/runtime/interpreter/mterp/mips64/op_iput_char.S b/runtime/interpreter/mterp/mips64/op_iput_char.S new file mode 100644 index 0000000000..4c2fa28776 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iput_char.S @@ -0,0 +1 @@ +%include "mips64/op_iput.S" { "helper":"artSet16InstanceFromMterp" } diff --git a/runtime/interpreter/mterp/mips64/op_iput_char_quick.S b/runtime/interpreter/mterp/mips64/op_iput_char_quick.S new file mode 100644 index 0000000000..a6286b7b97 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iput_char_quick.S @@ -0,0 +1 @@ +%include "mips64/op_iput_quick.S" { "store":"sh" } diff --git a/runtime/interpreter/mterp/mips64/op_iput_object.S b/runtime/interpreter/mterp/mips64/op_iput_object.S new file mode 100644 index 0000000000..9a42f54669 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iput_object.S @@ -0,0 +1,11 @@ + .extern MterpIputObject + EXPORT_PC + daddu a0, rFP, OFF_FP_SHADOWFRAME + move a1, rPC + move a2, rINST + move a3, rSELF + jal MterpIputObject + beqzc v0, MterpException + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_iput_object_quick.S b/runtime/interpreter/mterp/mips64/op_iput_object_quick.S new file mode 100644 index 0000000000..658ef42a19 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iput_object_quick.S @@ -0,0 +1,10 @@ + .extern MterpIputObjectQuick + EXPORT_PC + daddu a0, rFP, OFF_FP_SHADOWFRAME + move a1, rPC + move a2, rINST + jal MterpIputObjectQuick + beqzc v0, MterpException + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_iput_quick.S b/runtime/interpreter/mterp/mips64/op_iput_quick.S new file mode 100644 index 0000000000..b95adfcd4f --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iput_quick.S @@ -0,0 +1,14 @@ +%default { "store":"sw" } + /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */ + /* op vA, vB, offset//CCCC */ + srl a2, rINST, 12 # a2 <- B + lhu a1, 2(rPC) # a1 <- field byte offset + GET_VREG_U a3, a2 # a3 <- fp[B], the object pointer + ext a2, rINST, 8, 4 # a2 <- A + beqz a3, common_errNullObject # object was null + GET_VREG a0, a2 # a0 <- fp[A] + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + daddu a1, a1, a3 + $store a0, 0(a1) # obj.field <- a0 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_iput_short.S b/runtime/interpreter/mterp/mips64/op_iput_short.S new file mode 100644 index 0000000000..4c2fa28776 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iput_short.S @@ -0,0 +1 @@ +%include "mips64/op_iput.S" { "helper":"artSet16InstanceFromMterp" } diff --git a/runtime/interpreter/mterp/mips64/op_iput_short_quick.S b/runtime/interpreter/mterp/mips64/op_iput_short_quick.S new file mode 100644 index 0000000000..a6286b7b97 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iput_short_quick.S @@ -0,0 +1 @@ +%include "mips64/op_iput_quick.S" { "store":"sh" } diff --git a/runtime/interpreter/mterp/mips64/op_iput_wide.S b/runtime/interpreter/mterp/mips64/op_iput_wide.S new file mode 100644 index 0000000000..9b790f812a --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iput_wide.S @@ -0,0 +1,15 @@ + /* iput-wide vA, vB, field//CCCC */ + .extern artSet64InstanceFromMterp + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref CCCC + srl a1, rINST, 12 # a1 <- B + GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer + ext a2, rINST, 8, 4 # a2 <- A + dlsa a2, a2, rFP, 2 # a2 <- &fp[A] + ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer + PREFETCH_INST 2 + jal artSet64InstanceFromMterp + bnez v0, MterpPossibleException # bail out + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_iput_wide_quick.S b/runtime/interpreter/mterp/mips64/op_iput_wide_quick.S new file mode 100644 index 0000000000..95a8ad8f9c --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_iput_wide_quick.S @@ -0,0 +1,14 @@ + /* iput-wide-quick vA, vB, offset//CCCC */ + srl a2, rINST, 12 # a2 <- B + lhu a3, 2(rPC) # a3 <- field byte offset + GET_VREG_U a2, a2 # a2 <- fp[B], the object pointer + ext a0, rINST, 8, 4 # a0 <- A + beqz a2, common_errNullObject # object was null + GET_VREG_WIDE a0, a0 # a0 <- fp[A] + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + daddu a1, a2, a3 # create a direct pointer + sw a0, 0(a1) + dsrl32 a0, a0, 0 + sw a0, 4(a1) + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_long_to_double.S b/runtime/interpreter/mterp/mips64/op_long_to_double.S new file mode 100644 index 0000000000..8503e769b9 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_long_to_double.S @@ -0,0 +1,8 @@ + /* + * Conversion from or to floating-point happens in a floating-point register. + * Therefore we load the input and store the output into or from a + * floating-point register irrespective of the type. + */ +%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" } + cvt.d.l f0, f0 +%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" } diff --git a/runtime/interpreter/mterp/mips64/op_long_to_float.S b/runtime/interpreter/mterp/mips64/op_long_to_float.S new file mode 100644 index 0000000000..31f5c0e9b0 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_long_to_float.S @@ -0,0 +1,8 @@ + /* + * Conversion from or to floating-point happens in a floating-point register. + * Therefore we load the input and store the output into or from a + * floating-point register irrespective of the type. + */ +%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" } + cvt.s.l f0, f0 +%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" } diff --git a/runtime/interpreter/mterp/mips64/op_long_to_int.S b/runtime/interpreter/mterp/mips64/op_long_to_int.S new file mode 100644 index 0000000000..4ef4b512dc --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_long_to_int.S @@ -0,0 +1,2 @@ +/* we ignore the high word, making this equivalent to a 32-bit reg move */ +%include "mips64/op_move.S" diff --git a/runtime/interpreter/mterp/mips64/op_monitor_enter.S b/runtime/interpreter/mterp/mips64/op_monitor_enter.S new file mode 100644 index 0000000000..36ae50346e --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_monitor_enter.S @@ -0,0 +1,14 @@ + /* + * Synchronize on an object. + */ + /* monitor-enter vAA */ + .extern artLockObjectFromCode + EXPORT_PC + srl a2, rINST, 8 # a2 <- AA + GET_VREG_U a0, a2 # a0 <- vAA (object) + move a1, rSELF # a1 <- self + jal artLockObjectFromCode + bnezc v0, MterpException + FETCH_ADVANCE_INST 1 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_monitor_exit.S b/runtime/interpreter/mterp/mips64/op_monitor_exit.S new file mode 100644 index 0000000000..9945952017 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_monitor_exit.S @@ -0,0 +1,18 @@ + /* + * Unlock an object. + * + * Exceptions that occur when unlocking a monitor need to appear as + * if they happened at the following instruction. See the Dalvik + * instruction spec. + */ + /* monitor-exit vAA */ + .extern artUnlockObjectFromCode + EXPORT_PC + srl a2, rINST, 8 # a2 <- AA + GET_VREG_U a0, a2 # a0 <- vAA (object) + move a1, rSELF # a1 <- self + jal artUnlockObjectFromCode # v0 <- success for unlock(self, obj) + bnezc v0, MterpException + FETCH_ADVANCE_INST 1 # before throw: advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_move.S b/runtime/interpreter/mterp/mips64/op_move.S new file mode 100644 index 0000000000..c79f6cde8d --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_move.S @@ -0,0 +1,14 @@ +%default { "is_object":"0" } + /* for move, move-object, long-to-int */ + /* op vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + GET_VREG a0, a3 # a0 <- vB + GET_INST_OPCODE v0 # extract opcode from rINST + .if $is_object + SET_VREG_OBJECT a0, a2 # vA <- vB + .else + SET_VREG a0, a2 # vA <- vB + .endif + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_move_16.S b/runtime/interpreter/mterp/mips64/op_move_16.S new file mode 100644 index 0000000000..9d5c4dce8c --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_move_16.S @@ -0,0 +1,14 @@ +%default { "is_object":"0" } + /* for: move/16, move-object/16 */ + /* op vAAAA, vBBBB */ + lhu a3, 4(rPC) # a3 <- BBBB + lhu a2, 2(rPC) # a2 <- AAAA + FETCH_ADVANCE_INST 3 # advance rPC, load rINST + GET_VREG a0, a3 # a0 <- vBBBB + GET_INST_OPCODE v0 # extract opcode from rINST + .if $is_object + SET_VREG_OBJECT a0, a2 # vAAAA <- vBBBB + .else + SET_VREG a0, a2 # vAAAA <- vBBBB + .endif + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_move_exception.S b/runtime/interpreter/mterp/mips64/op_move_exception.S new file mode 100644 index 0000000000..d226718c8f --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_move_exception.S @@ -0,0 +1,8 @@ + /* move-exception vAA */ + srl a2, rINST, 8 # a2 <- AA + ld a0, THREAD_EXCEPTION_OFFSET(rSELF) # load exception obj + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + SET_VREG_OBJECT a0, a2 # vAA <- exception obj + GET_INST_OPCODE v0 # extract opcode from rINST + sd zero, THREAD_EXCEPTION_OFFSET(rSELF) # clear exception + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_move_from16.S b/runtime/interpreter/mterp/mips64/op_move_from16.S new file mode 100644 index 0000000000..6d6bde007f --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_move_from16.S @@ -0,0 +1,14 @@ +%default { "is_object":"0" } + /* for: move/from16, move-object/from16 */ + /* op vAA, vBBBB */ + lhu a3, 2(rPC) # a3 <- BBBB + srl a2, rINST, 8 # a2 <- AA + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_VREG a0, a3 # a0 <- vBBBB + GET_INST_OPCODE v0 # extract opcode from rINST + .if $is_object + SET_VREG_OBJECT a0, a2 # vAA <- vBBBB + .else + SET_VREG a0, a2 # vAA <- vBBBB + .endif + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_move_object.S b/runtime/interpreter/mterp/mips64/op_move_object.S new file mode 100644 index 0000000000..47e0272a6c --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_move_object.S @@ -0,0 +1 @@ +%include "mips64/op_move.S" {"is_object":"1"} diff --git a/runtime/interpreter/mterp/mips64/op_move_object_16.S b/runtime/interpreter/mterp/mips64/op_move_object_16.S new file mode 100644 index 0000000000..a777dcdaf8 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_move_object_16.S @@ -0,0 +1 @@ +%include "mips64/op_move_16.S" {"is_object":"1"} diff --git a/runtime/interpreter/mterp/mips64/op_move_object_from16.S b/runtime/interpreter/mterp/mips64/op_move_object_from16.S new file mode 100644 index 0000000000..ab55ebd646 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_move_object_from16.S @@ -0,0 +1 @@ +%include "mips64/op_move_from16.S" {"is_object":"1"} diff --git a/runtime/interpreter/mterp/mips64/op_move_result.S b/runtime/interpreter/mterp/mips64/op_move_result.S new file mode 100644 index 0000000000..1ec28cb6d8 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_move_result.S @@ -0,0 +1,14 @@ +%default { "is_object":"0" } + /* for: move-result, move-result-object */ + /* op vAA */ + srl a2, rINST, 8 # a2 <- AA + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + ld a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType + lw a0, 0(a0) # a0 <- result.i + GET_INST_OPCODE v0 # extract opcode from rINST + .if $is_object + SET_VREG_OBJECT a0, a2 # vAA <- result + .else + SET_VREG a0, a2 # vAA <- result + .endif + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_move_result_object.S b/runtime/interpreter/mterp/mips64/op_move_result_object.S new file mode 100644 index 0000000000..e76bc22c11 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_move_result_object.S @@ -0,0 +1 @@ +%include "mips64/op_move_result.S" {"is_object":"1"} diff --git a/runtime/interpreter/mterp/mips64/op_move_result_wide.S b/runtime/interpreter/mterp/mips64/op_move_result_wide.S new file mode 100644 index 0000000000..3ba0d7288b --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_move_result_wide.S @@ -0,0 +1,9 @@ + /* for: move-result-wide */ + /* op vAA */ + srl a2, rINST, 8 # a2 <- AA + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + ld a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType + ld a0, 0(a0) # a0 <- result.j + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vAA <- result + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_move_wide.S b/runtime/interpreter/mterp/mips64/op_move_wide.S new file mode 100644 index 0000000000..ea23f87ff0 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_move_wide.S @@ -0,0 +1,9 @@ + /* move-wide vA, vB */ + /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ + ext a3, rINST, 12, 4 # a3 <- B + ext a2, rINST, 8, 4 # a2 <- A + GET_VREG_WIDE a0, a3 # a0 <- vB + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vA <- vB + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_move_wide_16.S b/runtime/interpreter/mterp/mips64/op_move_wide_16.S new file mode 100644 index 0000000000..8ec606834b --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_move_wide_16.S @@ -0,0 +1,9 @@ + /* move-wide/16 vAAAA, vBBBB */ + /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ + lhu a3, 4(rPC) # a3 <- BBBB + lhu a2, 2(rPC) # a2 <- AAAA + GET_VREG_WIDE a0, a3 # a0 <- vBBBB + FETCH_ADVANCE_INST 3 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vAAAA <- vBBBB + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_move_wide_from16.S b/runtime/interpreter/mterp/mips64/op_move_wide_from16.S new file mode 100644 index 0000000000..11d5603fe1 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_move_wide_from16.S @@ -0,0 +1,9 @@ + /* move-wide/from16 vAA, vBBBB */ + /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ + lhu a3, 2(rPC) # a3 <- BBBB + srl a2, rINST, 8 # a2 <- AA + GET_VREG_WIDE a0, a3 # a0 <- vBBBB + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vAA <- vBBBB + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_mul_double.S b/runtime/interpreter/mterp/mips64/op_mul_double.S new file mode 100644 index 0000000000..e7e17f7ece --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_mul_double.S @@ -0,0 +1 @@ +%include "mips64/fbinopWide.S" {"instr":"mul.d f0, f0, f1"} diff --git a/runtime/interpreter/mterp/mips64/op_mul_double_2addr.S b/runtime/interpreter/mterp/mips64/op_mul_double_2addr.S new file mode 100644 index 0000000000..f404d4688d --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_mul_double_2addr.S @@ -0,0 +1 @@ +%include "mips64/fbinopWide2addr.S" {"instr":"mul.d f0, f0, f1"} diff --git a/runtime/interpreter/mterp/mips64/op_mul_float.S b/runtime/interpreter/mterp/mips64/op_mul_float.S new file mode 100644 index 0000000000..9a695fca16 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_mul_float.S @@ -0,0 +1 @@ +%include "mips64/fbinop.S" {"instr":"mul.s f0, f0, f1"} diff --git a/runtime/interpreter/mterp/mips64/op_mul_float_2addr.S b/runtime/interpreter/mterp/mips64/op_mul_float_2addr.S new file mode 100644 index 0000000000..a134a34253 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_mul_float_2addr.S @@ -0,0 +1 @@ +%include "mips64/fbinop2addr.S" {"instr":"mul.s f0, f0, f1"} diff --git a/runtime/interpreter/mterp/mips64/op_mul_int.S b/runtime/interpreter/mterp/mips64/op_mul_int.S new file mode 100644 index 0000000000..e1b90ff4e9 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_mul_int.S @@ -0,0 +1 @@ +%include "mips64/binop.S" {"instr":"mul a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_mul_int_2addr.S b/runtime/interpreter/mterp/mips64/op_mul_int_2addr.S new file mode 100644 index 0000000000..c0c4063d54 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_mul_int_2addr.S @@ -0,0 +1 @@ +%include "mips64/binop2addr.S" {"instr":"mul a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_mul_int_lit16.S b/runtime/interpreter/mterp/mips64/op_mul_int_lit16.S new file mode 100644 index 0000000000..bb4fff8747 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_mul_int_lit16.S @@ -0,0 +1 @@ +%include "mips64/binopLit16.S" {"instr":"mul a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_mul_int_lit8.S b/runtime/interpreter/mterp/mips64/op_mul_int_lit8.S new file mode 100644 index 0000000000..da11ea9295 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_mul_int_lit8.S @@ -0,0 +1 @@ +%include "mips64/binopLit8.S" {"instr":"mul a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_mul_long.S b/runtime/interpreter/mterp/mips64/op_mul_long.S new file mode 100644 index 0000000000..ec32850606 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_mul_long.S @@ -0,0 +1 @@ +%include "mips64/binopWide.S" {"instr":"dmul a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_mul_long_2addr.S b/runtime/interpreter/mterp/mips64/op_mul_long_2addr.S new file mode 100644 index 0000000000..eb50cda03c --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_mul_long_2addr.S @@ -0,0 +1 @@ +%include "mips64/binopWide2addr.S" {"instr":"dmul a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_neg_double.S b/runtime/interpreter/mterp/mips64/op_neg_double.S new file mode 100644 index 0000000000..a135d61173 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_neg_double.S @@ -0,0 +1,3 @@ +%include "mips64/fcvtHeader.S" { "suffix":"_DOUBLE", "valreg":"f0" } + neg.d f0, f0 +%include "mips64/fcvtFooter.S" { "suffix":"_DOUBLE", "valreg":"f0" } diff --git a/runtime/interpreter/mterp/mips64/op_neg_float.S b/runtime/interpreter/mterp/mips64/op_neg_float.S new file mode 100644 index 0000000000..78019f03d8 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_neg_float.S @@ -0,0 +1,3 @@ +%include "mips64/fcvtHeader.S" { "suffix":"_FLOAT", "valreg":"f0" } + neg.s f0, f0 +%include "mips64/fcvtFooter.S" { "suffix":"_FLOAT", "valreg":"f0" } diff --git a/runtime/interpreter/mterp/mips64/op_neg_int.S b/runtime/interpreter/mterp/mips64/op_neg_int.S new file mode 100644 index 0000000000..31538c0caa --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_neg_int.S @@ -0,0 +1 @@ +%include "mips64/unop.S" {"instr":"subu a0, zero, a0"} diff --git a/runtime/interpreter/mterp/mips64/op_neg_long.S b/runtime/interpreter/mterp/mips64/op_neg_long.S new file mode 100644 index 0000000000..bc80d0623f --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_neg_long.S @@ -0,0 +1 @@ +%include "mips64/unopWide.S" {"instr":"dsubu a0, zero, a0"} diff --git a/runtime/interpreter/mterp/mips64/op_new_array.S b/runtime/interpreter/mterp/mips64/op_new_array.S new file mode 100644 index 0000000000..d78b4ac32e --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_new_array.S @@ -0,0 +1,19 @@ + /* + * Allocate an array of objects, specified with the array class + * and a count. + * + * The verifier guarantees that this is an array class, so we don't + * check for it here. + */ + /* new-array vA, vB, class//CCCC */ + .extern MterpNewArray + EXPORT_PC + daddu a0, rFP, OFF_FP_SHADOWFRAME + move a1, rPC + move a2, rINST + move a3, rSELF + jal MterpNewArray + beqzc v0, MterpPossibleException + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_new_instance.S b/runtime/interpreter/mterp/mips64/op_new_instance.S new file mode 100644 index 0000000000..cc5e13e00d --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_new_instance.S @@ -0,0 +1,14 @@ + /* + * Create a new instance of a class. + */ + /* new-instance vAA, class//BBBB */ + .extern MterpNewInstance + EXPORT_PC + daddu a0, rFP, OFF_FP_SHADOWFRAME + move a1, rSELF + move a2, rINST + jal MterpNewInstance # (shadow_frame, self, inst_data) + beqzc v0, MterpPossibleException + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_nop.S b/runtime/interpreter/mterp/mips64/op_nop.S new file mode 100644 index 0000000000..cc803a791a --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_nop.S @@ -0,0 +1,3 @@ + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_not_int.S b/runtime/interpreter/mterp/mips64/op_not_int.S new file mode 100644 index 0000000000..59540950cd --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_not_int.S @@ -0,0 +1 @@ +%include "mips64/unop.S" {"instr":"nor a0, zero, a0"} diff --git a/runtime/interpreter/mterp/mips64/op_not_long.S b/runtime/interpreter/mterp/mips64/op_not_long.S new file mode 100644 index 0000000000..c8f5da7e82 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_not_long.S @@ -0,0 +1 @@ +%include "mips64/unopWide.S" {"instr":"nor a0, zero, a0"} diff --git a/runtime/interpreter/mterp/mips64/op_or_int.S b/runtime/interpreter/mterp/mips64/op_or_int.S new file mode 100644 index 0000000000..0102355c55 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_or_int.S @@ -0,0 +1 @@ +%include "mips64/binop.S" {"instr":"or a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_or_int_2addr.S b/runtime/interpreter/mterp/mips64/op_or_int_2addr.S new file mode 100644 index 0000000000..eed89008c0 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_or_int_2addr.S @@ -0,0 +1 @@ +%include "mips64/binop2addr.S" {"instr":"or a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_or_int_lit16.S b/runtime/interpreter/mterp/mips64/op_or_int_lit16.S new file mode 100644 index 0000000000..16a0f3e1a2 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_or_int_lit16.S @@ -0,0 +1 @@ +%include "mips64/binopLit16.S" {"instr":"or a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_or_int_lit8.S b/runtime/interpreter/mterp/mips64/op_or_int_lit8.S new file mode 100644 index 0000000000..dbbf7904c6 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_or_int_lit8.S @@ -0,0 +1 @@ +%include "mips64/binopLit8.S" {"instr":"or a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_or_long.S b/runtime/interpreter/mterp/mips64/op_or_long.S new file mode 100644 index 0000000000..e6f8639e52 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_or_long.S @@ -0,0 +1 @@ +%include "mips64/binopWide.S" {"instr":"or a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_or_long_2addr.S b/runtime/interpreter/mterp/mips64/op_or_long_2addr.S new file mode 100644 index 0000000000..ad5e6c8e99 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_or_long_2addr.S @@ -0,0 +1 @@ +%include "mips64/binopWide2addr.S" {"instr":"or a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_packed_switch.S b/runtime/interpreter/mterp/mips64/op_packed_switch.S new file mode 100644 index 0000000000..2c6eb2f3ca --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_packed_switch.S @@ -0,0 +1,36 @@ +%default { "func":"MterpDoPackedSwitch" } + /* + * Handle a packed-switch or sparse-switch instruction. In both cases + * we decode it and hand it off to a helper function. + * + * We don't really expect backward branches in a switch statement, but + * they're perfectly legal, so we check for them here. + * + * for: packed-switch, sparse-switch + */ + /* op vAA, +BBBBBBBB */ + .extern $func + .extern MterpProfileBranch + lh a0, 2(rPC) # a0 <- bbbb (lo) + lh a1, 4(rPC) # a1 <- BBBB (hi) + srl a3, rINST, 8 # a3 <- AA + ins a0, a1, 16, 16 # a0 <- BBBBbbbb + GET_VREG a1, a3 # a1 <- vAA + dlsa a0, a0, rPC, 1 # a0 <- PC + BBBBbbbb*2 + jal $func # v0 <- code-unit branch offset + move rINST, v0 +#if MTERP_PROFILE_BRANCHES + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rINST + jal MterpProfileBranch # (self, shadow_frame, offset) + bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST +#endif + dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2 + lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue + move a0, rINST # a0 <- offset + FETCH_INST # load rINST + blez a0, MterpCheckSuspendAndContinue # suspend check if backwards branch + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_rem_double.S b/runtime/interpreter/mterp/mips64/op_rem_double.S new file mode 100644 index 0000000000..ba61cfdc71 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_rem_double.S @@ -0,0 +1,12 @@ + /* rem-double vAA, vBB, vCC */ + .extern fmod + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_DOUBLE f12, a2 # f12 <- vBB + GET_VREG_DOUBLE f13, a3 # f13 <- vCC + jal fmod # f0 <- f12 op f13 + srl a4, rINST, 8 # a4 <- AA + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_DOUBLE f0, a4 # vAA <- f0 + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_rem_double_2addr.S b/runtime/interpreter/mterp/mips64/op_rem_double_2addr.S new file mode 100644 index 0000000000..c649f0d62c --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_rem_double_2addr.S @@ -0,0 +1,12 @@ + /* rem-double/2addr vA, vB */ + .extern fmod + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_DOUBLE f12, a2 # f12 <- vA + GET_VREG_DOUBLE f13, a3 # f13 <- vB + jal fmod # f0 <- f12 op f13 + ext a2, rINST, 8, 4 # a2 <- A + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_DOUBLE f0, a2 # vA <- f0 + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_rem_float.S b/runtime/interpreter/mterp/mips64/op_rem_float.S new file mode 100644 index 0000000000..3967b0b02c --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_rem_float.S @@ -0,0 +1,12 @@ + /* rem-float vAA, vBB, vCC */ + .extern fmodf + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_FLOAT f12, a2 # f12 <- vBB + GET_VREG_FLOAT f13, a3 # f13 <- vCC + jal fmodf # f0 <- f12 op f13 + srl a4, rINST, 8 # a4 <- AA + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_FLOAT f0, a4 # vAA <- f0 + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_rem_float_2addr.S b/runtime/interpreter/mterp/mips64/op_rem_float_2addr.S new file mode 100644 index 0000000000..3fed41e851 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_rem_float_2addr.S @@ -0,0 +1,12 @@ + /* rem-float/2addr vA, vB */ + .extern fmodf + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_FLOAT f12, a2 # f12 <- vA + GET_VREG_FLOAT f13, a3 # f13 <- vB + jal fmodf # f0 <- f12 op f13 + ext a2, rINST, 8, 4 # a2 <- A + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_FLOAT f0, a2 # vA <- f0 + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_rem_int.S b/runtime/interpreter/mterp/mips64/op_rem_int.S new file mode 100644 index 0000000000..c05e9c49fc --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_rem_int.S @@ -0,0 +1 @@ +%include "mips64/binop.S" {"instr":"mod a0, a0, a1", "chkzero":"1"} diff --git a/runtime/interpreter/mterp/mips64/op_rem_int_2addr.S b/runtime/interpreter/mterp/mips64/op_rem_int_2addr.S new file mode 100644 index 0000000000..a4e162d3fa --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_rem_int_2addr.S @@ -0,0 +1 @@ +%include "mips64/binop2addr.S" {"instr":"mod a0, a0, a1", "chkzero":"1"} diff --git a/runtime/interpreter/mterp/mips64/op_rem_int_lit16.S b/runtime/interpreter/mterp/mips64/op_rem_int_lit16.S new file mode 100644 index 0000000000..3284f1473c --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_rem_int_lit16.S @@ -0,0 +1 @@ +%include "mips64/binopLit16.S" {"instr":"mod a0, a0, a1", "chkzero":"1"} diff --git a/runtime/interpreter/mterp/mips64/op_rem_int_lit8.S b/runtime/interpreter/mterp/mips64/op_rem_int_lit8.S new file mode 100644 index 0000000000..1e6a584be5 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_rem_int_lit8.S @@ -0,0 +1 @@ +%include "mips64/binopLit8.S" {"instr":"mod a0, a0, a1", "chkzero":"1"} diff --git a/runtime/interpreter/mterp/mips64/op_rem_long.S b/runtime/interpreter/mterp/mips64/op_rem_long.S new file mode 100644 index 0000000000..32b2d1916d --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_rem_long.S @@ -0,0 +1 @@ +%include "mips64/binopWide.S" {"instr":"dmod a0, a0, a1", "chkzero":"1"} diff --git a/runtime/interpreter/mterp/mips64/op_rem_long_2addr.S b/runtime/interpreter/mterp/mips64/op_rem_long_2addr.S new file mode 100644 index 0000000000..ad658e1fde --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_rem_long_2addr.S @@ -0,0 +1 @@ +%include "mips64/binopWide2addr.S" {"instr":"dmod a0, a0, a1", "chkzero":"1"} diff --git a/runtime/interpreter/mterp/mips64/op_return.S b/runtime/interpreter/mterp/mips64/op_return.S new file mode 100644 index 0000000000..ec986b8066 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_return.S @@ -0,0 +1,18 @@ + /* + * Return a 32-bit value. + * + * for: return, return-object + */ + /* op vAA */ + .extern MterpThreadFenceForConstructor + .extern MterpSuspendCheck + jal MterpThreadFenceForConstructor + lw ra, THREAD_FLAGS_OFFSET(rSELF) + move a0, rSELF + and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + beqzc ra, 1f + jal MterpSuspendCheck # (self) +1: + srl a2, rINST, 8 # a2 <- AA + GET_VREG_U a0, a2 # a0 <- vAA + b MterpReturn diff --git a/runtime/interpreter/mterp/mips64/op_return_object.S b/runtime/interpreter/mterp/mips64/op_return_object.S new file mode 100644 index 0000000000..67f1871e3d --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_return_object.S @@ -0,0 +1 @@ +%include "mips64/op_return.S" diff --git a/runtime/interpreter/mterp/mips64/op_return_void.S b/runtime/interpreter/mterp/mips64/op_return_void.S new file mode 100644 index 0000000000..05253aea05 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_return_void.S @@ -0,0 +1,11 @@ + .extern MterpThreadFenceForConstructor + .extern MterpSuspendCheck + jal MterpThreadFenceForConstructor + lw ra, THREAD_FLAGS_OFFSET(rSELF) + move a0, rSELF + and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + beqzc ra, 1f + jal MterpSuspendCheck # (self) +1: + li a0, 0 + b MterpReturn diff --git a/runtime/interpreter/mterp/mips64/op_return_void_no_barrier.S b/runtime/interpreter/mterp/mips64/op_return_void_no_barrier.S new file mode 100644 index 0000000000..f67e811c70 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_return_void_no_barrier.S @@ -0,0 +1,9 @@ + .extern MterpSuspendCheck + lw ra, THREAD_FLAGS_OFFSET(rSELF) + move a0, rSELF + and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + beqzc ra, 1f + jal MterpSuspendCheck # (self) +1: + li a0, 0 + b MterpReturn diff --git a/runtime/interpreter/mterp/mips64/op_return_wide.S b/runtime/interpreter/mterp/mips64/op_return_wide.S new file mode 100644 index 0000000000..544e02794d --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_return_wide.S @@ -0,0 +1,17 @@ + /* + * Return a 64-bit value. + */ + /* return-wide vAA */ + /* op vAA */ + .extern MterpThreadFenceForConstructor + .extern MterpSuspendCheck + jal MterpThreadFenceForConstructor + lw ra, THREAD_FLAGS_OFFSET(rSELF) + move a0, rSELF + and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + beqzc ra, 1f + jal MterpSuspendCheck # (self) +1: + srl a2, rINST, 8 # a2 <- AA + GET_VREG_WIDE a0, a2 # a0 <- vAA + b MterpReturn diff --git a/runtime/interpreter/mterp/mips64/op_rsub_int.S b/runtime/interpreter/mterp/mips64/op_rsub_int.S new file mode 100644 index 0000000000..fa31a0af5f --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_rsub_int.S @@ -0,0 +1 @@ +%include "mips64/binopLit16.S" {"instr":"subu a0, a1, a0"} diff --git a/runtime/interpreter/mterp/mips64/op_rsub_int_lit8.S b/runtime/interpreter/mterp/mips64/op_rsub_int_lit8.S new file mode 100644 index 0000000000..c31ff32060 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_rsub_int_lit8.S @@ -0,0 +1 @@ +%include "mips64/binopLit8.S" {"instr":"subu a0, a1, a0"} diff --git a/runtime/interpreter/mterp/mips64/op_sget.S b/runtime/interpreter/mterp/mips64/op_sget.S new file mode 100644 index 0000000000..bd2cfe3778 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_sget.S @@ -0,0 +1,26 @@ +%default { "is_object":"0", "helper":"artGet32StaticFromCode", "extend":"" } + /* + * General SGET handler wrapper. + * + * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short + */ + /* op vAA, field//BBBB */ + .extern $helper + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref BBBB + ld a1, OFF_FP_METHOD(rFP) + move a2, rSELF + jal $helper + ld a3, THREAD_EXCEPTION_OFFSET(rSELF) + srl a2, rINST, 8 # a2 <- AA + $extend + PREFETCH_INST 2 + bnez a3, MterpException # bail out + .if $is_object + SET_VREG_OBJECT v0, a2 # fp[AA] <- v0 + .else + SET_VREG v0, a2 # fp[AA] <- v0 + .endif + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 diff --git a/runtime/interpreter/mterp/mips64/op_sget_boolean.S b/runtime/interpreter/mterp/mips64/op_sget_boolean.S new file mode 100644 index 0000000000..e7b1844d86 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_sget_boolean.S @@ -0,0 +1 @@ +%include "mips64/op_sget.S" {"helper":"artGetBooleanStaticFromCode", "extend":"and v0, v0, 0xff"} diff --git a/runtime/interpreter/mterp/mips64/op_sget_byte.S b/runtime/interpreter/mterp/mips64/op_sget_byte.S new file mode 100644 index 0000000000..52a2e4a5d5 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_sget_byte.S @@ -0,0 +1 @@ +%include "mips64/op_sget.S" {"helper":"artGetByteStaticFromCode", "extend":"seb v0, v0"} diff --git a/runtime/interpreter/mterp/mips64/op_sget_char.S b/runtime/interpreter/mterp/mips64/op_sget_char.S new file mode 100644 index 0000000000..873d82a0d6 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_sget_char.S @@ -0,0 +1 @@ +%include "mips64/op_sget.S" {"helper":"artGetCharStaticFromCode", "extend":"and v0, v0, 0xffff"} diff --git a/runtime/interpreter/mterp/mips64/op_sget_object.S b/runtime/interpreter/mterp/mips64/op_sget_object.S new file mode 100644 index 0000000000..3108417e00 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_sget_object.S @@ -0,0 +1 @@ +%include "mips64/op_sget.S" {"is_object":"1", "helper":"artGetObjStaticFromCode"} diff --git a/runtime/interpreter/mterp/mips64/op_sget_short.S b/runtime/interpreter/mterp/mips64/op_sget_short.S new file mode 100644 index 0000000000..fed4e76baa --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_sget_short.S @@ -0,0 +1 @@ +%include "mips64/op_sget.S" {"helper":"artGetShortStaticFromCode", "extend":"seh v0, v0"} diff --git a/runtime/interpreter/mterp/mips64/op_sget_wide.S b/runtime/interpreter/mterp/mips64/op_sget_wide.S new file mode 100644 index 0000000000..77124d1d8d --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_sget_wide.S @@ -0,0 +1,18 @@ + /* + * SGET_WIDE handler wrapper. + * + */ + /* sget-wide vAA, field//BBBB */ + .extern artGet64StaticFromCode + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref BBBB + ld a1, OFF_FP_METHOD(rFP) + move a2, rSELF + jal artGet64StaticFromCode + ld a3, THREAD_EXCEPTION_OFFSET(rSELF) + srl a4, rINST, 8 # a4 <- AA + bnez a3, MterpException # bail out + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + SET_VREG_WIDE v0, a4 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_shl_int.S b/runtime/interpreter/mterp/mips64/op_shl_int.S new file mode 100644 index 0000000000..784481f335 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_shl_int.S @@ -0,0 +1 @@ +%include "mips64/binop.S" {"instr":"sll a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_shl_int_2addr.S b/runtime/interpreter/mterp/mips64/op_shl_int_2addr.S new file mode 100644 index 0000000000..a6c8a78ff6 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_shl_int_2addr.S @@ -0,0 +1 @@ +%include "mips64/binop2addr.S" {"instr":"sll a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_shl_int_lit8.S b/runtime/interpreter/mterp/mips64/op_shl_int_lit8.S new file mode 100644 index 0000000000..36ef207edf --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_shl_int_lit8.S @@ -0,0 +1 @@ +%include "mips64/binopLit8.S" {"instr":"sll a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_shl_long.S b/runtime/interpreter/mterp/mips64/op_shl_long.S new file mode 100644 index 0000000000..225a2cbc2a --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_shl_long.S @@ -0,0 +1 @@ +%include "mips64/binopWide.S" {"instr":"dsll a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_shl_long_2addr.S b/runtime/interpreter/mterp/mips64/op_shl_long_2addr.S new file mode 100644 index 0000000000..c04d8823f4 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_shl_long_2addr.S @@ -0,0 +1 @@ +%include "mips64/binopWide2addr.S" {"instr":"dsll a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_shr_int.S b/runtime/interpreter/mterp/mips64/op_shr_int.S new file mode 100644 index 0000000000..eded0373b1 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_shr_int.S @@ -0,0 +1 @@ +%include "mips64/binop.S" {"instr":"sra a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_shr_int_2addr.S b/runtime/interpreter/mterp/mips64/op_shr_int_2addr.S new file mode 100644 index 0000000000..5b4d96f187 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_shr_int_2addr.S @@ -0,0 +1 @@ +%include "mips64/binop2addr.S" {"instr":"sra a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_shr_int_lit8.S b/runtime/interpreter/mterp/mips64/op_shr_int_lit8.S new file mode 100644 index 0000000000..175eb8633a --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_shr_int_lit8.S @@ -0,0 +1 @@ +%include "mips64/binopLit8.S" {"instr":"sra a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_shr_long.S b/runtime/interpreter/mterp/mips64/op_shr_long.S new file mode 100644 index 0000000000..0db38c8510 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_shr_long.S @@ -0,0 +1 @@ +%include "mips64/binopWide.S" {"instr":"dsra a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_shr_long_2addr.S b/runtime/interpreter/mterp/mips64/op_shr_long_2addr.S new file mode 100644 index 0000000000..48131ad7e4 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_shr_long_2addr.S @@ -0,0 +1 @@ +%include "mips64/binopWide2addr.S" {"instr":"dsra a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_sparse_switch.S b/runtime/interpreter/mterp/mips64/op_sparse_switch.S new file mode 100644 index 0000000000..b065aaa95b --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_sparse_switch.S @@ -0,0 +1 @@ +%include "mips64/op_packed_switch.S" { "func":"MterpDoSparseSwitch" } diff --git a/runtime/interpreter/mterp/mips64/op_sput.S b/runtime/interpreter/mterp/mips64/op_sput.S new file mode 100644 index 0000000000..142f18f3ba --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_sput.S @@ -0,0 +1,20 @@ +%default { "helper":"artSet32StaticFromCode" } + /* + * General SPUT handler wrapper. + * + * for: sput, sput-boolean, sput-byte, sput-char, sput-short + */ + /* op vAA, field//BBBB */ + .extern $helper + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref BBBB + srl a3, rINST, 8 # a3 <- AA + GET_VREG a1, a3 # a1 <- fp[AA] + ld a2, OFF_FP_METHOD(rFP) + move a3, rSELF + PREFETCH_INST 2 # Get next inst, but don't advance rPC + jal $helper + bnezc v0, MterpException # 0 on success + ADVANCE 2 # Past exception point - now advance rPC + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_sput_boolean.S b/runtime/interpreter/mterp/mips64/op_sput_boolean.S new file mode 100644 index 0000000000..f5b8dbf433 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_sput_boolean.S @@ -0,0 +1 @@ +%include "mips64/op_sput.S" {"helper":"artSet8StaticFromCode"} diff --git a/runtime/interpreter/mterp/mips64/op_sput_byte.S b/runtime/interpreter/mterp/mips64/op_sput_byte.S new file mode 100644 index 0000000000..f5b8dbf433 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_sput_byte.S @@ -0,0 +1 @@ +%include "mips64/op_sput.S" {"helper":"artSet8StaticFromCode"} diff --git a/runtime/interpreter/mterp/mips64/op_sput_char.S b/runtime/interpreter/mterp/mips64/op_sput_char.S new file mode 100644 index 0000000000..c4d195c82f --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_sput_char.S @@ -0,0 +1 @@ +%include "mips64/op_sput.S" {"helper":"artSet16StaticFromCode"} diff --git a/runtime/interpreter/mterp/mips64/op_sput_object.S b/runtime/interpreter/mterp/mips64/op_sput_object.S new file mode 100644 index 0000000000..ef4c685116 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_sput_object.S @@ -0,0 +1,11 @@ + .extern MterpSputObject + EXPORT_PC + daddu a0, rFP, OFF_FP_SHADOWFRAME + move a1, rPC + move a2, rINST + move a3, rSELF + jal MterpSputObject + beqzc v0, MterpException + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_sput_short.S b/runtime/interpreter/mterp/mips64/op_sput_short.S new file mode 100644 index 0000000000..c4d195c82f --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_sput_short.S @@ -0,0 +1 @@ +%include "mips64/op_sput.S" {"helper":"artSet16StaticFromCode"} diff --git a/runtime/interpreter/mterp/mips64/op_sput_wide.S b/runtime/interpreter/mterp/mips64/op_sput_wide.S new file mode 100644 index 0000000000..828ddc15e7 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_sput_wide.S @@ -0,0 +1,18 @@ + /* + * SPUT_WIDE handler wrapper. + * + */ + /* sput-wide vAA, field//BBBB */ + .extern artSet64IndirectStaticFromMterp + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref BBBB + ld a1, OFF_FP_METHOD(rFP) + srl a2, rINST, 8 # a2 <- AA + dlsa a2, a2, rFP, 2 + move a3, rSELF + PREFETCH_INST 2 # Get next inst, but don't advance rPC + jal artSet64IndirectStaticFromMterp + bnezc v0, MterpException # 0 on success, -1 on failure + ADVANCE 2 # Past exception point - now advance rPC + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/op_sub_double.S b/runtime/interpreter/mterp/mips64/op_sub_double.S new file mode 100644 index 0000000000..40a6c89a10 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_sub_double.S @@ -0,0 +1 @@ +%include "mips64/fbinopWide.S" {"instr":"sub.d f0, f0, f1"} diff --git a/runtime/interpreter/mterp/mips64/op_sub_double_2addr.S b/runtime/interpreter/mterp/mips64/op_sub_double_2addr.S new file mode 100644 index 0000000000..984737e553 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_sub_double_2addr.S @@ -0,0 +1 @@ +%include "mips64/fbinopWide2addr.S" {"instr":"sub.d f0, f0, f1"} diff --git a/runtime/interpreter/mterp/mips64/op_sub_float.S b/runtime/interpreter/mterp/mips64/op_sub_float.S new file mode 100644 index 0000000000..9010592116 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_sub_float.S @@ -0,0 +1 @@ +%include "mips64/fbinop.S" {"instr":"sub.s f0, f0, f1"} diff --git a/runtime/interpreter/mterp/mips64/op_sub_float_2addr.S b/runtime/interpreter/mterp/mips64/op_sub_float_2addr.S new file mode 100644 index 0000000000..e7d4ffe1ae --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_sub_float_2addr.S @@ -0,0 +1 @@ +%include "mips64/fbinop2addr.S" {"instr":"sub.s f0, f0, f1"} diff --git a/runtime/interpreter/mterp/mips64/op_sub_int.S b/runtime/interpreter/mterp/mips64/op_sub_int.S new file mode 100644 index 0000000000..609ea0575d --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_sub_int.S @@ -0,0 +1 @@ +%include "mips64/binop.S" {"instr":"subu a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_sub_int_2addr.S b/runtime/interpreter/mterp/mips64/op_sub_int_2addr.S new file mode 100644 index 0000000000..ba2f1e875b --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_sub_int_2addr.S @@ -0,0 +1 @@ +%include "mips64/binop2addr.S" {"instr":"subu a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_sub_long.S b/runtime/interpreter/mterp/mips64/op_sub_long.S new file mode 100644 index 0000000000..09a6afd26e --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_sub_long.S @@ -0,0 +1 @@ +%include "mips64/binopWide.S" {"instr":"dsubu a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_sub_long_2addr.S b/runtime/interpreter/mterp/mips64/op_sub_long_2addr.S new file mode 100644 index 0000000000..b9ec82a19b --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_sub_long_2addr.S @@ -0,0 +1 @@ +%include "mips64/binopWide2addr.S" {"instr":"dsubu a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_throw.S b/runtime/interpreter/mterp/mips64/op_throw.S new file mode 100644 index 0000000000..6418d57ecc --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_throw.S @@ -0,0 +1,10 @@ + /* + * Throw an exception object in the current thread. + */ + /* throw vAA */ + EXPORT_PC + srl a2, rINST, 8 # a2 <- AA + GET_VREG_U a0, a2 # a0 <- vAA (exception object) + beqzc a0, common_errNullObject + sd a0, THREAD_EXCEPTION_OFFSET(rSELF) # thread->exception <- obj + b MterpException diff --git a/runtime/interpreter/mterp/mips64/op_unused_3e.S b/runtime/interpreter/mterp/mips64/op_unused_3e.S new file mode 100644 index 0000000000..29463d73fc --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_unused_3e.S @@ -0,0 +1 @@ +%include "mips64/unused.S" diff --git a/runtime/interpreter/mterp/mips64/op_unused_3f.S b/runtime/interpreter/mterp/mips64/op_unused_3f.S new file mode 100644 index 0000000000..29463d73fc --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_unused_3f.S @@ -0,0 +1 @@ +%include "mips64/unused.S" diff --git a/runtime/interpreter/mterp/mips64/op_unused_40.S b/runtime/interpreter/mterp/mips64/op_unused_40.S new file mode 100644 index 0000000000..29463d73fc --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_unused_40.S @@ -0,0 +1 @@ +%include "mips64/unused.S" diff --git a/runtime/interpreter/mterp/mips64/op_unused_41.S b/runtime/interpreter/mterp/mips64/op_unused_41.S new file mode 100644 index 0000000000..29463d73fc --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_unused_41.S @@ -0,0 +1 @@ +%include "mips64/unused.S" diff --git a/runtime/interpreter/mterp/mips64/op_unused_42.S b/runtime/interpreter/mterp/mips64/op_unused_42.S new file mode 100644 index 0000000000..29463d73fc --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_unused_42.S @@ -0,0 +1 @@ +%include "mips64/unused.S" diff --git a/runtime/interpreter/mterp/mips64/op_unused_43.S b/runtime/interpreter/mterp/mips64/op_unused_43.S new file mode 100644 index 0000000000..29463d73fc --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_unused_43.S @@ -0,0 +1 @@ +%include "mips64/unused.S" diff --git a/runtime/interpreter/mterp/mips64/op_unused_79.S b/runtime/interpreter/mterp/mips64/op_unused_79.S new file mode 100644 index 0000000000..29463d73fc --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_unused_79.S @@ -0,0 +1 @@ +%include "mips64/unused.S" diff --git a/runtime/interpreter/mterp/mips64/op_unused_7a.S b/runtime/interpreter/mterp/mips64/op_unused_7a.S new file mode 100644 index 0000000000..29463d73fc --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_unused_7a.S @@ -0,0 +1 @@ +%include "mips64/unused.S" diff --git a/runtime/interpreter/mterp/mips64/op_unused_f4.S b/runtime/interpreter/mterp/mips64/op_unused_f4.S new file mode 100644 index 0000000000..29463d73fc --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_unused_f4.S @@ -0,0 +1 @@ +%include "mips64/unused.S" diff --git a/runtime/interpreter/mterp/mips64/op_unused_fa.S b/runtime/interpreter/mterp/mips64/op_unused_fa.S new file mode 100644 index 0000000000..29463d73fc --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_unused_fa.S @@ -0,0 +1 @@ +%include "mips64/unused.S" diff --git a/runtime/interpreter/mterp/mips64/op_unused_fb.S b/runtime/interpreter/mterp/mips64/op_unused_fb.S new file mode 100644 index 0000000000..29463d73fc --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_unused_fb.S @@ -0,0 +1 @@ +%include "mips64/unused.S" diff --git a/runtime/interpreter/mterp/mips64/op_unused_fc.S b/runtime/interpreter/mterp/mips64/op_unused_fc.S new file mode 100644 index 0000000000..29463d73fc --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_unused_fc.S @@ -0,0 +1 @@ +%include "mips64/unused.S" diff --git a/runtime/interpreter/mterp/mips64/op_unused_fd.S b/runtime/interpreter/mterp/mips64/op_unused_fd.S new file mode 100644 index 0000000000..29463d73fc --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_unused_fd.S @@ -0,0 +1 @@ +%include "mips64/unused.S" diff --git a/runtime/interpreter/mterp/mips64/op_unused_fe.S b/runtime/interpreter/mterp/mips64/op_unused_fe.S new file mode 100644 index 0000000000..29463d73fc --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_unused_fe.S @@ -0,0 +1 @@ +%include "mips64/unused.S" diff --git a/runtime/interpreter/mterp/mips64/op_unused_ff.S b/runtime/interpreter/mterp/mips64/op_unused_ff.S new file mode 100644 index 0000000000..29463d73fc --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_unused_ff.S @@ -0,0 +1 @@ +%include "mips64/unused.S" diff --git a/runtime/interpreter/mterp/mips64/op_ushr_int.S b/runtime/interpreter/mterp/mips64/op_ushr_int.S new file mode 100644 index 0000000000..37c90cb7ec --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_ushr_int.S @@ -0,0 +1 @@ +%include "mips64/binop.S" {"instr":"srl a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_ushr_int_2addr.S b/runtime/interpreter/mterp/mips64/op_ushr_int_2addr.S new file mode 100644 index 0000000000..d6bf4135dc --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_ushr_int_2addr.S @@ -0,0 +1 @@ +%include "mips64/binop2addr.S" {"instr":"srl a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_ushr_int_lit8.S b/runtime/interpreter/mterp/mips64/op_ushr_int_lit8.S new file mode 100644 index 0000000000..2a2d843c8a --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_ushr_int_lit8.S @@ -0,0 +1 @@ +%include "mips64/binopLit8.S" {"instr":"srl a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_ushr_long.S b/runtime/interpreter/mterp/mips64/op_ushr_long.S new file mode 100644 index 0000000000..e724405f1f --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_ushr_long.S @@ -0,0 +1 @@ +%include "mips64/binopWide.S" {"instr":"dsrl a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_ushr_long_2addr.S b/runtime/interpreter/mterp/mips64/op_ushr_long_2addr.S new file mode 100644 index 0000000000..d2cf135566 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_ushr_long_2addr.S @@ -0,0 +1 @@ +%include "mips64/binopWide2addr.S" {"instr":"dsrl a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_xor_int.S b/runtime/interpreter/mterp/mips64/op_xor_int.S new file mode 100644 index 0000000000..ee25ebc925 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_xor_int.S @@ -0,0 +1 @@ +%include "mips64/binop.S" {"instr":"xor a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_xor_int_2addr.S b/runtime/interpreter/mterp/mips64/op_xor_int_2addr.S new file mode 100644 index 0000000000..0f0496729a --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_xor_int_2addr.S @@ -0,0 +1 @@ +%include "mips64/binop2addr.S" {"instr":"xor a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_xor_int_lit16.S b/runtime/interpreter/mterp/mips64/op_xor_int_lit16.S new file mode 100644 index 0000000000..ecb21aee07 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_xor_int_lit16.S @@ -0,0 +1 @@ +%include "mips64/binopLit16.S" {"instr":"xor a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_xor_int_lit8.S b/runtime/interpreter/mterp/mips64/op_xor_int_lit8.S new file mode 100644 index 0000000000..115ae99917 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_xor_int_lit8.S @@ -0,0 +1 @@ +%include "mips64/binopLit8.S" {"instr":"xor a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_xor_long.S b/runtime/interpreter/mterp/mips64/op_xor_long.S new file mode 100644 index 0000000000..7ebabc2710 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_xor_long.S @@ -0,0 +1 @@ +%include "mips64/binopWide.S" {"instr":"xor a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/op_xor_long_2addr.S b/runtime/interpreter/mterp/mips64/op_xor_long_2addr.S new file mode 100644 index 0000000000..0f1919a21e --- /dev/null +++ b/runtime/interpreter/mterp/mips64/op_xor_long_2addr.S @@ -0,0 +1 @@ +%include "mips64/binopWide2addr.S" {"instr":"xor a0, a0, a1"} diff --git a/runtime/interpreter/mterp/mips64/unop.S b/runtime/interpreter/mterp/mips64/unop.S new file mode 100644 index 0000000000..e3f7ea0eda --- /dev/null +++ b/runtime/interpreter/mterp/mips64/unop.S @@ -0,0 +1,18 @@ +%default {"preinstr":""} + /* + * Generic 32-bit unary operation. Provide an "instr" line that + * specifies an instruction that performs "a0 = op a0". + * + * for: int-to-byte, int-to-char, int-to-short, + * not-int, neg-int + */ + /* unop vA, vB */ + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a3 # a0 <- vB + ext a2, rINST, 8, 4 # a2 <- A + $preinstr # optional op + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + $instr # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/unopWide.S b/runtime/interpreter/mterp/mips64/unopWide.S new file mode 100644 index 0000000000..c0dd1aa1d3 --- /dev/null +++ b/runtime/interpreter/mterp/mips64/unopWide.S @@ -0,0 +1,17 @@ +%default {"preinstr":""} + /* + * Generic 64-bit unary operation. Provide an "instr" line that + * specifies an instruction that performs "a0 = op a0". + * + * For: not-long, neg-long + */ + /* unop vA, vB */ + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_WIDE a0, a3 # a0 <- vB + ext a2, rINST, 8, 4 # a2 <- A + $preinstr # optional op + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + $instr # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mips64/unused.S b/runtime/interpreter/mterp/mips64/unused.S new file mode 100644 index 0000000000..30d38bd6cd --- /dev/null +++ b/runtime/interpreter/mterp/mips64/unused.S @@ -0,0 +1,4 @@ +/* + * Bail to reference interpreter to throw. + */ + b MterpFallback diff --git a/runtime/interpreter/mterp/mips64/zcmp.S b/runtime/interpreter/mterp/mips64/zcmp.S new file mode 100644 index 0000000000..0e0477fadf --- /dev/null +++ b/runtime/interpreter/mterp/mips64/zcmp.S @@ -0,0 +1,30 @@ + /* + * Generic one-operand compare-and-branch operation. Provide a "condition" + * fragment that specifies the comparison to perform, e.g. for + * "if-lez" you would use "le". + * + * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez + */ + /* if-cmp vAA, +BBBB */ + .extern MterpProfileBranch + srl a2, rINST, 8 # a2 <- AA + lh rINST, 2(rPC) # rINST <- offset (sign-extended BBBB) + GET_VREG a0, a2 # a0 <- vAA + b${condition}zc a0, 1f + li rINST, 2 # offset if branch not taken +1: +#if MTERP_PROFILE_BRANCHES + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rINST + jal MterpProfileBranch # (self, shadow_frame, offset) + bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST +#endif + dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2 + lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue + move a0, rINST # a0 <- offset + FETCH_INST # load rINST + bltz a0, MterpCheckSuspendAndContinue # suspend check if backwards branch + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc index e1bde1ba01..10b19c5f4f 100644 --- a/runtime/interpreter/mterp/mterp.cc +++ b/runtime/interpreter/mterp/mterp.cc @@ -147,15 +147,7 @@ extern "C" bool MterpShouldSwitchInterpreters() SHARED_REQUIRES(Locks::mutator_lock_) { const instrumentation::Instrumentation* const instrumentation = Runtime::Current()->GetInstrumentation(); - bool unhandled_instrumentation; - // TODO: enable for other targets after more extensive testing. - if ((kRuntimeISA == kArm64) || (kRuntimeISA == kArm) || - (kRuntimeISA == kX86) || (kRuntimeISA == kMips)) { - unhandled_instrumentation = instrumentation->NonJitProfilingActive(); - } else { - unhandled_instrumentation = instrumentation->IsActive(); - } - return unhandled_instrumentation || Dbg::IsDebuggerActive(); + return instrumentation->NonJitProfilingActive() || Dbg::IsDebuggerActive(); } diff --git a/runtime/interpreter/mterp/out/mterp_arm.S b/runtime/interpreter/mterp/out/mterp_arm.S index 2b74d4c86e..092474d544 100644 --- a/runtime/interpreter/mterp/out/mterp_arm.S +++ b/runtime/interpreter/mterp/out/mterp_arm.S @@ -343,8 +343,8 @@ ExecuteMterpImpl: /* set up "named" registers */ mov rSELF, r0 ldr r0, [r2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET] - add rFP, r2, #SHADOWFRAME_VREGS_OFFSET @ point to insns[] (i.e. - the dalivk byte code). - add rREFS, rFP, r0, lsl #2 @ point to reference array in shadow frame + add rFP, r2, #SHADOWFRAME_VREGS_OFFSET @ point to vregs. + VREG_INDEX_TO_ADDR rREFS, r0 @ point to reference array in shadow frame ldr r0, [r2, #SHADOWFRAME_DEX_PC_OFFSET] @ Get starting dex_pc. add rPC, r1, #CODEITEM_INSNS_OFFSET @ Point to base of insns[] add rPC, rPC, r0, lsl #1 @ Create direct pointer to 1st dex opcode @@ -435,8 +435,8 @@ artMterpAsmInstructionStart = .L_op_nop /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ mov r3, rINST, lsr #12 @ r3<- B ubfx rINST, rINST, #8, #4 @ rINST<- A - add r3, rFP, r3, lsl #2 @ r3<- &fp[B] - add r2, rFP, rINST, lsl #2 @ r2<- &fp[A] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B] + VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[A] ldmia r3, {r0-r1} @ r0/r1<- fp[B] CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs FETCH_ADVANCE_INST 1 @ advance rPC, load rINST @@ -452,8 +452,8 @@ artMterpAsmInstructionStart = .L_op_nop /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ FETCH r3, 1 @ r3<- BBBB mov rINST, rINST, lsr #8 @ rINST<- AA - add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] - add r2, rFP, rINST, lsl #2 @ r2<- &fp[AA] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BBBB] + VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[AA] ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs FETCH_ADVANCE_INST 2 @ advance rPC, load rINST @@ -469,8 +469,8 @@ artMterpAsmInstructionStart = .L_op_nop /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ FETCH r3, 2 @ r3<- BBBB FETCH r2, 1 @ r2<- AAAA - add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] - add lr, rFP, r2, lsl #2 @ r2<- &fp[AAAA] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BBBB] + VREG_INDEX_TO_ADDR lr, r2 @ r2<- &fp[AAAA] ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] FETCH_ADVANCE_INST 3 @ advance rPC, load rINST CLEAR_SHADOW_PAIR r2, r3, ip @ Zero out the shadow regs @@ -563,7 +563,7 @@ artMterpAsmInstructionStart = .L_op_nop /* move-result-wide vAA */ mov rINST, rINST, lsr #8 @ rINST<- AA ldr r3, [rFP, #OFF_FP_RESULT_REGISTER] - add r2, rFP, rINST, lsl #2 @ r2<- &fp[AA] + VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[AA] ldmia r3, {r0-r1} @ r0/r1<- retval.j CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs FETCH_ADVANCE_INST 1 @ advance rPC, load rINST @@ -655,7 +655,7 @@ artMterpAsmInstructionStart = .L_op_nop ands lr, #(THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) blne MterpSuspendCheck @ (self) mov r2, rINST, lsr #8 @ r2<- AA - add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] + VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[AA] ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1 b MterpReturn @@ -687,10 +687,9 @@ artMterpAsmInstructionStart = .L_op_nop .L_op_const_4: /* 0x12 */ /* File: arm/op_const_4.S */ /* const/4 vA, #+B */ - mov r1, rINST, lsl #16 @ r1<- Bxxx0000 + sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended) ubfx r0, rINST, #8, #4 @ r0<- A FETCH_ADVANCE_INST 1 @ advance rPC, load rINST - mov r1, r1, asr #28 @ r1<- sssssssB (sign-extended) GET_INST_OPCODE ip @ ip<- opcode from rINST SET_VREG r1, r0 @ fp[A]<- r1 GOTO_OPCODE ip @ execute next instruction @@ -700,7 +699,7 @@ artMterpAsmInstructionStart = .L_op_nop .L_op_const_16: /* 0x13 */ /* File: arm/op_const_16.S */ /* const/16 vAA, #+BBBB */ - FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended + FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended) mov r3, rINST, lsr #8 @ r3<- AA FETCH_ADVANCE_INST 2 @ advance rPC, load rINST SET_VREG r0, r3 @ vAA<- r0 @@ -713,8 +712,8 @@ artMterpAsmInstructionStart = .L_op_nop /* File: arm/op_const.S */ /* const vAA, #+BBBBbbbb */ mov r3, rINST, lsr #8 @ r3<- AA - FETCH r0, 1 @ r0<- bbbb (low - FETCH r1, 2 @ r1<- BBBB (high + FETCH r0, 1 @ r0<- bbbb (low) + FETCH r1, 2 @ r1<- BBBB (high) FETCH_ADVANCE_INST 3 @ advance rPC, load rINST orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb GET_INST_OPCODE ip @ extract opcode from rINST @@ -726,7 +725,7 @@ artMterpAsmInstructionStart = .L_op_nop .L_op_const_high16: /* 0x15 */ /* File: arm/op_const_high16.S */ /* const/high16 vAA, #+BBBB0000 */ - FETCH r0, 1 @ r0<- 0000BBBB (zero-extended + FETCH r0, 1 @ r0<- 0000BBBB (zero-extended) mov r3, rINST, lsr #8 @ r3<- AA mov r0, r0, lsl #16 @ r0<- BBBB0000 FETCH_ADVANCE_INST 2 @ advance rPC, load rINST @@ -739,12 +738,12 @@ artMterpAsmInstructionStart = .L_op_nop .L_op_const_wide_16: /* 0x16 */ /* File: arm/op_const_wide_16.S */ /* const-wide/16 vAA, #+BBBB */ - FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended + FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended) mov r3, rINST, lsr #8 @ r3<- AA mov r1, r0, asr #31 @ r1<- ssssssss FETCH_ADVANCE_INST 2 @ advance rPC, load rINST CLEAR_SHADOW_PAIR r3, r2, lr @ Zero out the shadow regs - add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA] GET_INST_OPCODE ip @ extract opcode from rINST stmia r3, {r0-r1} @ vAA<- r0/r1 GOTO_OPCODE ip @ jump to next instruction @@ -760,7 +759,7 @@ artMterpAsmInstructionStart = .L_op_nop FETCH_ADVANCE_INST 3 @ advance rPC, load rINST orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb CLEAR_SHADOW_PAIR r3, r2, lr @ Zero out the shadow regs - add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA] mov r1, r0, asr #31 @ r1<- ssssssss GET_INST_OPCODE ip @ extract opcode from rINST stmia r3, {r0-r1} @ vAA<- r0/r1 @@ -780,7 +779,7 @@ artMterpAsmInstructionStart = .L_op_nop orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word) CLEAR_SHADOW_PAIR r9, r2, r3 @ Zero out the shadow regs FETCH_ADVANCE_INST 5 @ advance rPC, load rINST - add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] + VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA] GET_INST_OPCODE ip @ extract opcode from rINST stmia r9, {r0-r1} @ vAA<- r0/r1 GOTO_OPCODE ip @ jump to next instruction @@ -796,7 +795,7 @@ artMterpAsmInstructionStart = .L_op_nop mov r1, r1, lsl #16 @ r1<- BBBB0000 FETCH_ADVANCE_INST 2 @ advance rPC, load rINST CLEAR_SHADOW_PAIR r3, r0, r2 @ Zero shadow regs - add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA] GET_INST_OPCODE ip @ extract opcode from rINST stmia r3, {r0-r1} @ vAA<- r0/r1 GOTO_OPCODE ip @ jump to next instruction @@ -825,8 +824,8 @@ artMterpAsmInstructionStart = .L_op_nop /* File: arm/op_const_string_jumbo.S */ /* const/string vAA, String@BBBBBBBB */ EXPORT_PC - FETCH r0, 1 @ r0<- bbbb (low - FETCH r2, 2 @ r2<- BBBB (high + FETCH r0, 1 @ r0<- bbbb (low) + FETCH r2, 2 @ r2<- BBBB (high) mov r1, rINST, lsr #8 @ r1<- AA orr r0, r0, r2, lsl #16 @ r1<- BBBBbbbb add r2, rFP, #OFF_FP_SHADOWFRAME @@ -938,10 +937,9 @@ artMterpAsmInstructionStart = .L_op_nop VREG_INDEX_TO_ADDR r1, r1 @ r1<- &object ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- method mov r3, rSELF @ r3<- self - mov r9, rINST, lsr #8 @ r9<- A+ - and r9, r9, #15 @ r9<- A bl MterpInstanceOf @ (index, &obj, method, self) ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET] + ubfx r9, rINST, #8, #4 @ r9<- A PREFETCH_INST 2 cmp r1, #0 @ exception pending? bne MterpException @@ -1353,7 +1351,7 @@ artMterpAsmInstructionStart = .L_op_nop VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC flds s0, [r2] @ s0<- vBB flds s1, [r3] @ s1<- vCC - fcmpes s0, s1 @ compare (vBB, vCC) + vcmpe.f32 s0, s1 @ compare (vBB, vCC) FETCH_ADVANCE_INST 2 @ advance rPC, load rINST mvn r0, #0 @ r0<- -1 (default) GET_INST_OPCODE ip @ extract opcode from rINST @@ -1392,7 +1390,7 @@ artMterpAsmInstructionStart = .L_op_nop VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC flds s0, [r2] @ s0<- vBB flds s1, [r3] @ s1<- vCC - fcmpes s0, s1 @ compare (vBB, vCC) + vcmpe.f32 s0, s1 @ compare (vBB, vCC) FETCH_ADVANCE_INST 2 @ advance rPC, load rINST mov r0, #1 @ r0<- 1 (default) GET_INST_OPCODE ip @ extract opcode from rINST @@ -1431,7 +1429,7 @@ artMterpAsmInstructionStart = .L_op_nop VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC fldd d0, [r2] @ d0<- vBB fldd d1, [r3] @ d1<- vCC - fcmped d0, d1 @ compare (vBB, vCC) + vcmpe.f64 d0, d1 @ compare (vBB, vCC) FETCH_ADVANCE_INST 2 @ advance rPC, load rINST mvn r0, #0 @ r0<- -1 (default) GET_INST_OPCODE ip @ extract opcode from rINST @@ -1470,7 +1468,7 @@ artMterpAsmInstructionStart = .L_op_nop VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC fldd d0, [r2] @ d0<- vBB fldd d1, [r3] @ d1<- vCC - fcmped d0, d1 @ compare (vBB, vCC) + vcmpe.f64 d0, d1 @ compare (vBB, vCC) FETCH_ADVANCE_INST 2 @ advance rPC, load rINST mov r0, #1 @ r0<- 1 (default) GET_INST_OPCODE ip @ extract opcode from rINST @@ -1509,8 +1507,8 @@ artMterpAsmInstructionStart = .L_op_nop mov r9, rINST, lsr #8 @ r9<- AA and r2, r0, #255 @ r2<- BB mov r3, r0, lsr #8 @ r3<- CC - add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] - add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] + VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC] ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 cmp r1, r3 @ compare (vBB+1, vCC+1) @@ -2089,7 +2087,7 @@ artMterpAsmInstructionStart = .L_op_nop bcs common_errArrayIndex @ index >= length, bail FETCH_ADVANCE_INST 2 @ advance rPC, load rINST ldrd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC] - add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] + VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA] GET_INST_OPCODE ip @ extract opcode from rINST stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3 GOTO_OPCODE ip @ jump to next instruction @@ -2314,7 +2312,7 @@ artMterpAsmInstructionStart = .L_op_nop ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width cmp r1, r3 @ compare unsigned index, length - add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] + VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA] bcs common_errArrayIndex @ index >= length, bail FETCH_ADVANCE_INST 2 @ advance rPC, load rINST ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1 @@ -2533,7 +2531,7 @@ artMterpAsmInstructionStart = .L_op_nop cmp r3, #0 bne MterpException @ bail out CLEAR_SHADOW_PAIR r2, ip, lr @ Zero out the shadow regs - add r3, rFP, r2, lsl #2 @ r3<- &fp[A] + VREG_INDEX_TO_ADDR r3, r2 @ r3<- &fp[A] stmia r3, {r0-r1} @ fp[A]<- r0/r1 ADVANCE 2 GET_INST_OPCODE ip @ extract opcode from rINST @@ -2736,7 +2734,7 @@ artMterpAsmInstructionStart = .L_op_nop mov r1, rINST, lsr #12 @ r1<- B GET_VREG r1, r1 @ r1<- fp[B], the object pointer ubfx r2, rINST, #8, #4 @ r2<- A - add r2, rFP, r2, lsl #2 @ r2<- &fp[A] + VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[A] ldr r3, [rFP, #OFF_FP_METHOD] @ r3<- referrer PREFETCH_INST 2 bl artSet64InstanceFromMterp @@ -2923,7 +2921,7 @@ artMterpAsmInstructionStart = .L_op_nop bl artGet64StaticFromCode ldr r3, [rSELF, #THREAD_EXCEPTION_OFFSET] mov r9, rINST, lsr #8 @ r9<- AA - add lr, rFP, r9, lsl #2 @ r9<- &fp[AA] + VREG_INDEX_TO_ADDR lr, r9 @ r9<- &fp[AA] cmp r3, #0 @ Fail to resolve? bne MterpException @ bail out FETCH_ADVANCE_INST 2 @ advance rPC, load rINST @@ -3135,7 +3133,7 @@ artMterpAsmInstructionStart = .L_op_nop FETCH r0, 1 @ r0<- field ref BBBB ldr r1, [rFP, #OFF_FP_METHOD] mov r2, rINST, lsr #8 @ r3<- AA - add r2, rFP, r2, lsl #2 + VREG_INDEX_TO_ADDR r2, r2 mov r3, rSELF PREFETCH_INST 2 @ Get next inst, but don't advance rPC bl artSet64IndirectStaticFromMterp @@ -3668,8 +3666,8 @@ artMterpAsmInstructionStart = .L_op_nop /* unop vA, vB */ mov r3, rINST, lsr #12 @ r3<- B ubfx rINST, rINST, #8, #4 @ rINST<- A - add r3, rFP, r3, lsl #2 @ r3<- &fp[B] - add r9, rFP, rINST, lsl #2 @ r9<- &fp[A] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A] ldmia r3, {r0-r1} @ r0/r1<- vAA CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs FETCH_ADVANCE_INST 1 @ advance rPC, load rINST @@ -3696,8 +3694,8 @@ artMterpAsmInstructionStart = .L_op_nop /* unop vA, vB */ mov r3, rINST, lsr #12 @ r3<- B ubfx rINST, rINST, #8, #4 @ rINST<- A - add r3, rFP, r3, lsl #2 @ r3<- &fp[B] - add r9, rFP, rINST, lsl #2 @ r9<- &fp[A] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A] ldmia r3, {r0-r1} @ r0/r1<- vAA CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs FETCH_ADVANCE_INST 1 @ advance rPC, load rINST @@ -3750,8 +3748,8 @@ artMterpAsmInstructionStart = .L_op_nop /* unop vA, vB */ mov r3, rINST, lsr #12 @ r3<- B ubfx rINST, rINST, #8, #4 @ rINST<- A - add r3, rFP, r3, lsl #2 @ r3<- &fp[B] - add r9, rFP, rINST, lsl #2 @ r9<- &fp[A] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A] ldmia r3, {r0-r1} @ r0/r1<- vAA CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs FETCH_ADVANCE_INST 1 @ advance rPC, load rINST @@ -3779,7 +3777,7 @@ artMterpAsmInstructionStart = .L_op_nop mov r3, rINST, lsr #12 @ r3<- B ubfx rINST, rINST, #8, #4 @ rINST<- A GET_VREG r0, r3 @ r0<- vB - add r9, rFP, rINST, lsl #2 @ r9<- &fp[A] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A] @ optional op; may set condition codes CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs FETCH_ADVANCE_INST 1 @ advance rPC, load rINST @@ -3803,11 +3801,10 @@ artMterpAsmInstructionStart = .L_op_nop */ /* unop vA, vB */ mov r3, rINST, lsr #12 @ r3<- B - mov r9, rINST, lsr #8 @ r9<- A+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB flds s0, [r3] @ s0<- vB + ubfx r9, rINST, #8, #4 @ r9<- A FETCH_ADVANCE_INST 1 @ advance rPC, load rINST - and r9, r9, #15 @ r9<- A fsitos s1, s0 @ s1<- op GET_INST_OPCODE ip @ extract opcode from rINST VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA @@ -3828,11 +3825,10 @@ artMterpAsmInstructionStart = .L_op_nop */ /* unop vA, vB */ mov r3, rINST, lsr #12 @ r3<- B - mov r9, rINST, lsr #8 @ r9<- A+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB flds s0, [r3] @ s0<- vB + ubfx r9, rINST, #8, #4 @ r9<- A FETCH_ADVANCE_INST 1 @ advance rPC, load rINST - and r9, r9, #15 @ r9<- A fsitod d0, s0 @ d0<- op CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs GET_INST_OPCODE ip @ extract opcode from rINST @@ -3880,7 +3876,7 @@ artMterpAsmInstructionStart = .L_op_nop /* unop vA, vB */ mov r3, rINST, lsr #12 @ r3<- B ubfx r9, rINST, #8, #4 @ r9<- A - add r3, rFP, r3, lsl #2 @ r3<- &fp[B] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B] ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 FETCH_ADVANCE_INST 1 @ advance rPC, load rINST @ optional op; may set condition codes @@ -3904,8 +3900,8 @@ artMterpAsmInstructionStart = .L_op_nop */ mov r3, rINST, lsr #12 @ r3<- B ubfx r9, rINST, #8, #4 @ r9<- A - add r3, rFP, r3, lsl #2 @ r3<- &fp[B] - add r9, rFP, r9, lsl #2 @ r9<- &fp[A] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B] + VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A] vldr d0, [r3] @ d0<- vAA FETCH_ADVANCE_INST 1 @ advance rPC, load rINST @@ -3935,11 +3931,10 @@ constvalop_long_to_double: */ /* unop vA, vB */ mov r3, rINST, lsr #12 @ r3<- B - mov r9, rINST, lsr #8 @ r9<- A+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB flds s0, [r3] @ s0<- vB + ubfx r9, rINST, #8, #4 @ r9<- A FETCH_ADVANCE_INST 1 @ advance rPC, load rINST - and r9, r9, #15 @ r9<- A ftosizs s1, s0 @ s1<- op GET_INST_OPCODE ip @ extract opcode from rINST VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA @@ -3964,7 +3959,7 @@ constvalop_long_to_double: mov r3, rINST, lsr #12 @ r3<- B ubfx rINST, rINST, #8, #4 @ rINST<- A GET_VREG r0, r3 @ r0<- vB - add r9, rFP, rINST, lsl #2 @ r9<- &fp[A] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A] @ optional op; may set condition codes CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs FETCH_ADVANCE_INST 1 @ advance rPC, load rINST @@ -3989,12 +3984,11 @@ constvalop_long_to_double: */ /* unop vA, vB */ mov r3, rINST, lsr #12 @ r3<- B - mov r9, rINST, lsr #8 @ r9<- A+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB flds s0, [r3] @ s0<- vB + ubfx r9, rINST, #8, #4 @ r9<- A FETCH_ADVANCE_INST 1 @ advance rPC, load rINST - and r9, r9, #15 @ r9<- A - fcvtds d0, s0 @ d0<- op + vcvt.f64.f32 d0, s0 @ d0<- op CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs GET_INST_OPCODE ip @ extract opcode from rINST VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA @@ -4015,11 +4009,10 @@ constvalop_long_to_double: */ /* unop vA, vB */ mov r3, rINST, lsr #12 @ r3<- B - mov r9, rINST, lsr #8 @ r9<- A+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB fldd d0, [r3] @ d0<- vB + ubfx r9, rINST, #8, #4 @ r9<- A FETCH_ADVANCE_INST 1 @ advance rPC, load rINST - and r9, r9, #15 @ r9<- A ftosizd s0, d0 @ s0<- op GET_INST_OPCODE ip @ extract opcode from rINST VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA @@ -4043,8 +4036,8 @@ constvalop_long_to_double: /* unop vA, vB */ mov r3, rINST, lsr #12 @ r3<- B ubfx rINST, rINST, #8, #4 @ rINST<- A - add r3, rFP, r3, lsl #2 @ r3<- &fp[B] - add r9, rFP, rINST, lsl #2 @ r9<- &fp[A] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A] ldmia r3, {r0-r1} @ r0/r1<- vAA CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs FETCH_ADVANCE_INST 1 @ advance rPC, load rINST @@ -4070,12 +4063,11 @@ constvalop_long_to_double: */ /* unop vA, vB */ mov r3, rINST, lsr #12 @ r3<- B - mov r9, rINST, lsr #8 @ r9<- A+ VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB fldd d0, [r3] @ d0<- vB + ubfx r9, rINST, #8, #4 @ r9<- A FETCH_ADVANCE_INST 1 @ advance rPC, load rINST - and r9, r9, #15 @ r9<- A - fcvtsd s0, d0 @ s0<- op + vcvt.f32.f64 s0, d0 @ s0<- op GET_INST_OPCODE ip @ extract opcode from rINST VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA fsts s0, [r9] @ vA<- s0 @@ -4626,9 +4618,9 @@ constvalop_long_to_double: mov rINST, rINST, lsr #8 @ rINST<- AA and r2, r0, #255 @ r2<- BB mov r3, r0, lsr #8 @ r3<- CC - add r9, rFP, rINST, lsl #2 @ r9<- &fp[AA] - add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] - add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA] + VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC] ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 .if 0 @@ -4670,9 +4662,9 @@ constvalop_long_to_double: mov rINST, rINST, lsr #8 @ rINST<- AA and r2, r0, #255 @ r2<- BB mov r3, r0, lsr #8 @ r3<- CC - add r9, rFP, rINST, lsl #2 @ r9<- &fp[AA] - add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] - add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA] + VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC] ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 .if 0 @@ -4715,8 +4707,8 @@ constvalop_long_to_double: FETCH r0, 1 @ r0<- CCBB and r2, r0, #255 @ r2<- BB mov r3, r0, lsr #8 @ r3<- CC - add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] - add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] + VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC] ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 mul ip, r2, r1 @ ip<- ZxW @@ -4724,7 +4716,7 @@ constvalop_long_to_double: mla r2, r0, r3, ip @ r2<- YxX + (ZxW) mov r0, rINST, lsr #8 @ r0<- AA add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) - add r0, rFP, r0, lsl #2 @ r0<- &fp[AA] + VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[AA] FETCH_ADVANCE_INST 2 @ advance rPC, load rINST GET_INST_OPCODE ip @ extract opcode from rINST stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 @@ -4755,9 +4747,9 @@ constvalop_long_to_double: mov rINST, rINST, lsr #8 @ rINST<- AA and r2, r0, #255 @ r2<- BB mov r3, r0, lsr #8 @ r3<- CC - add r9, rFP, rINST, lsl #2 @ r9<- &fp[AA] - add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] - add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA] + VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC] ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 .if 1 @@ -4800,9 +4792,9 @@ constvalop_long_to_double: mov rINST, rINST, lsr #8 @ rINST<- AA and r2, r0, #255 @ r2<- BB mov r3, r0, lsr #8 @ r3<- CC - add r9, rFP, rINST, lsl #2 @ r9<- &fp[AA] - add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] - add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA] + VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC] ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 .if 1 @@ -4844,9 +4836,9 @@ constvalop_long_to_double: mov rINST, rINST, lsr #8 @ rINST<- AA and r2, r0, #255 @ r2<- BB mov r3, r0, lsr #8 @ r3<- CC - add r9, rFP, rINST, lsl #2 @ r9<- &fp[AA] - add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] - add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA] + VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC] ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 .if 0 @@ -4888,9 +4880,9 @@ constvalop_long_to_double: mov rINST, rINST, lsr #8 @ rINST<- AA and r2, r0, #255 @ r2<- BB mov r3, r0, lsr #8 @ r3<- CC - add r9, rFP, rINST, lsl #2 @ r9<- &fp[AA] - add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] - add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA] + VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC] ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 .if 0 @@ -4932,9 +4924,9 @@ constvalop_long_to_double: mov rINST, rINST, lsr #8 @ rINST<- AA and r2, r0, #255 @ r2<- BB mov r3, r0, lsr #8 @ r3<- CC - add r9, rFP, rINST, lsl #2 @ r9<- &fp[AA] - add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] - add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA] + VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC] ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 .if 0 @@ -4966,12 +4958,12 @@ constvalop_long_to_double: mov r9, rINST, lsr #8 @ r9<- AA and r3, r0, #255 @ r3<- BB mov r0, r0, lsr #8 @ r0<- CC - add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB] GET_VREG r2, r0 @ r2<- vCC ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs and r2, r2, #63 @ r2<- r2 & 0x3f - add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] + VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA] mov r1, r1, asl r2 @ r1<- r1 << r2 rsb r3, r2, #32 @ r3<- 32 - r2 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) @@ -4998,12 +4990,12 @@ constvalop_long_to_double: mov r9, rINST, lsr #8 @ r9<- AA and r3, r0, #255 @ r3<- BB mov r0, r0, lsr #8 @ r0<- CC - add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB] GET_VREG r2, r0 @ r2<- vCC ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs and r2, r2, #63 @ r0<- r0 & 0x3f - add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] + VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA] mov r0, r0, lsr r2 @ r0<- r2 >> r2 rsb r3, r2, #32 @ r3<- 32 - r2 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) @@ -5030,12 +5022,12 @@ constvalop_long_to_double: mov r9, rINST, lsr #8 @ r9<- AA and r3, r0, #255 @ r3<- BB mov r0, r0, lsr #8 @ r0<- CC - add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB] GET_VREG r2, r0 @ r2<- vCC ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs and r2, r2, #63 @ r0<- r0 & 0x3f - add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] + VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA] mov r0, r0, lsr r2 @ r0<- r2 >> r2 rsb r3, r2, #32 @ r3<- 32 - r2 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) @@ -5355,9 +5347,9 @@ constvalop_long_to_double: mov rINST, rINST, lsr #8 @ rINST<- AA and r2, r0, #255 @ r2<- BB mov r3, r0, lsr #8 @ r3<- CC - add r9, rFP, rINST, lsl #2 @ r9<- &fp[AA] - add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] - add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA] + VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB] + VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC] ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 .if 0 @@ -5808,8 +5800,8 @@ constvalop_long_to_double: /* binop/2addr vA, vB */ mov r1, rINST, lsr #12 @ r1<- B ubfx rINST, rINST, #8, #4 @ rINST<- A - add r1, rFP, r1, lsl #2 @ r1<- &fp[B] - add r9, rFP, rINST, lsl #2 @ r9<- &fp[A] + VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A] ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 .if 0 @@ -5848,8 +5840,8 @@ constvalop_long_to_double: /* binop/2addr vA, vB */ mov r1, rINST, lsr #12 @ r1<- B ubfx rINST, rINST, #8, #4 @ rINST<- A - add r1, rFP, r1, lsl #2 @ r1<- &fp[B] - add r9, rFP, rINST, lsl #2 @ r9<- &fp[A] + VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A] ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 .if 0 @@ -5881,8 +5873,8 @@ constvalop_long_to_double: /* mul-long/2addr vA, vB */ mov r1, rINST, lsr #12 @ r1<- B ubfx r9, rINST, #8, #4 @ r9<- A - add r1, rFP, r1, lsl #2 @ r1<- &fp[B] - add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A] + VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B] + VREG_INDEX_TO_ADDR rINST, r9 @ rINST<- &fp[A] ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1 mul ip, r2, r1 @ ip<- ZxW @@ -5917,8 +5909,8 @@ constvalop_long_to_double: /* binop/2addr vA, vB */ mov r1, rINST, lsr #12 @ r1<- B ubfx rINST, rINST, #8, #4 @ rINST<- A - add r1, rFP, r1, lsl #2 @ r1<- &fp[B] - add r9, rFP, rINST, lsl #2 @ r9<- &fp[A] + VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A] ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 .if 1 @@ -5958,8 +5950,8 @@ constvalop_long_to_double: /* binop/2addr vA, vB */ mov r1, rINST, lsr #12 @ r1<- B ubfx rINST, rINST, #8, #4 @ rINST<- A - add r1, rFP, r1, lsl #2 @ r1<- &fp[B] - add r9, rFP, rINST, lsl #2 @ r9<- &fp[A] + VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A] ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 .if 1 @@ -5998,8 +5990,8 @@ constvalop_long_to_double: /* binop/2addr vA, vB */ mov r1, rINST, lsr #12 @ r1<- B ubfx rINST, rINST, #8, #4 @ rINST<- A - add r1, rFP, r1, lsl #2 @ r1<- &fp[B] - add r9, rFP, rINST, lsl #2 @ r9<- &fp[A] + VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A] ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 .if 0 @@ -6038,8 +6030,8 @@ constvalop_long_to_double: /* binop/2addr vA, vB */ mov r1, rINST, lsr #12 @ r1<- B ubfx rINST, rINST, #8, #4 @ rINST<- A - add r1, rFP, r1, lsl #2 @ r1<- &fp[B] - add r9, rFP, rINST, lsl #2 @ r9<- &fp[A] + VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A] ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 .if 0 @@ -6078,8 +6070,8 @@ constvalop_long_to_double: /* binop/2addr vA, vB */ mov r1, rINST, lsr #12 @ r1<- B ubfx rINST, rINST, #8, #4 @ rINST<- A - add r1, rFP, r1, lsl #2 @ r1<- &fp[B] - add r9, rFP, rINST, lsl #2 @ r9<- &fp[A] + VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A] ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 .if 0 @@ -6109,7 +6101,7 @@ constvalop_long_to_double: ubfx r9, rINST, #8, #4 @ r9<- A GET_VREG r2, r3 @ r2<- vB CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs - add r9, rFP, r9, lsl #2 @ r9<- &fp[A] + VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A] and r2, r2, #63 @ r2<- r2 & 0x3f ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 mov r1, r1, asl r2 @ r1<- r1 << r2 @@ -6136,7 +6128,7 @@ constvalop_long_to_double: ubfx r9, rINST, #8, #4 @ r9<- A GET_VREG r2, r3 @ r2<- vB CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs - add r9, rFP, r9, lsl #2 @ r9<- &fp[A] + VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A] and r2, r2, #63 @ r2<- r2 & 0x3f ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 mov r0, r0, lsr r2 @ r0<- r2 >> r2 @@ -6163,7 +6155,7 @@ constvalop_long_to_double: ubfx r9, rINST, #8, #4 @ r9<- A GET_VREG r2, r3 @ r2<- vB CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs - add r9, rFP, r9, lsl #2 @ r9<- &fp[A] + VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A] and r2, r2, #63 @ r2<- r2 & 0x3f ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 mov r0, r0, lsr r2 @ r0<- r2 >> r2 @@ -6191,14 +6183,12 @@ constvalop_long_to_double: */ /* binop/2addr vA, vB */ mov r3, rINST, lsr #12 @ r3<- B - mov r9, rINST, lsr #8 @ r9<- A+ + ubfx r9, rINST, #8, #4 @ r9<- A VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB - and r9, r9, #15 @ r9<- A - flds s1, [r3] @ s1<- vB VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA + flds s1, [r3] @ s1<- vB FETCH_ADVANCE_INST 1 @ advance rPC, load rINST flds s0, [r9] @ s0<- vA - fadds s2, s0, s1 @ s2<- op GET_INST_OPCODE ip @ extract opcode from rINST fsts s2, [r9] @ vAA<- s2 @@ -6219,14 +6209,12 @@ constvalop_long_to_double: */ /* binop/2addr vA, vB */ mov r3, rINST, lsr #12 @ r3<- B - mov r9, rINST, lsr #8 @ r9<- A+ + ubfx r9, rINST, #8, #4 @ r9<- A VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB - and r9, r9, #15 @ r9<- A - flds s1, [r3] @ s1<- vB VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA + flds s1, [r3] @ s1<- vB FETCH_ADVANCE_INST 1 @ advance rPC, load rINST flds s0, [r9] @ s0<- vA - fsubs s2, s0, s1 @ s2<- op GET_INST_OPCODE ip @ extract opcode from rINST fsts s2, [r9] @ vAA<- s2 @@ -6247,14 +6235,12 @@ constvalop_long_to_double: */ /* binop/2addr vA, vB */ mov r3, rINST, lsr #12 @ r3<- B - mov r9, rINST, lsr #8 @ r9<- A+ + ubfx r9, rINST, #8, #4 @ r9<- A VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB - and r9, r9, #15 @ r9<- A - flds s1, [r3] @ s1<- vB VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA + flds s1, [r3] @ s1<- vB FETCH_ADVANCE_INST 1 @ advance rPC, load rINST flds s0, [r9] @ s0<- vA - fmuls s2, s0, s1 @ s2<- op GET_INST_OPCODE ip @ extract opcode from rINST fsts s2, [r9] @ vAA<- s2 @@ -6275,14 +6261,12 @@ constvalop_long_to_double: */ /* binop/2addr vA, vB */ mov r3, rINST, lsr #12 @ r3<- B - mov r9, rINST, lsr #8 @ r9<- A+ + ubfx r9, rINST, #8, #4 @ r9<- A VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB - and r9, r9, #15 @ r9<- A - flds s1, [r3] @ s1<- vB VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA + flds s1, [r3] @ s1<- vB FETCH_ADVANCE_INST 1 @ advance rPC, load rINST flds s0, [r9] @ s0<- vA - fdivs s2, s0, s1 @ s2<- op GET_INST_OPCODE ip @ extract opcode from rINST fsts s2, [r9] @ vAA<- s2 @@ -6343,11 +6327,10 @@ constvalop_long_to_double: */ /* binop/2addr vA, vB */ mov r3, rINST, lsr #12 @ r3<- B - mov r9, rINST, lsr #8 @ r9<- A+ + ubfx r9, rINST, #8, #4 @ r9<- A VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB - and r9, r9, #15 @ r9<- A - fldd d1, [r3] @ d1<- vB CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs + fldd d1, [r3] @ d1<- vB VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA FETCH_ADVANCE_INST 1 @ advance rPC, load rINST fldd d0, [r9] @ d0<- vA @@ -6372,11 +6355,10 @@ constvalop_long_to_double: */ /* binop/2addr vA, vB */ mov r3, rINST, lsr #12 @ r3<- B - mov r9, rINST, lsr #8 @ r9<- A+ + ubfx r9, rINST, #8, #4 @ r9<- A VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB - and r9, r9, #15 @ r9<- A - fldd d1, [r3] @ d1<- vB CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs + fldd d1, [r3] @ d1<- vB VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA FETCH_ADVANCE_INST 1 @ advance rPC, load rINST fldd d0, [r9] @ d0<- vA @@ -6401,11 +6383,10 @@ constvalop_long_to_double: */ /* binop/2addr vA, vB */ mov r3, rINST, lsr #12 @ r3<- B - mov r9, rINST, lsr #8 @ r9<- A+ + ubfx r9, rINST, #8, #4 @ r9<- A VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB - and r9, r9, #15 @ r9<- A - fldd d1, [r3] @ d1<- vB CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs + fldd d1, [r3] @ d1<- vB VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA FETCH_ADVANCE_INST 1 @ advance rPC, load rINST fldd d0, [r9] @ d0<- vA @@ -6430,11 +6411,10 @@ constvalop_long_to_double: */ /* binop/2addr vA, vB */ mov r3, rINST, lsr #12 @ r3<- B - mov r9, rINST, lsr #8 @ r9<- A+ + ubfx r9, rINST, #8, #4 @ r9<- A VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB - and r9, r9, #15 @ r9<- A - fldd d1, [r3] @ d1<- vB CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs + fldd d1, [r3] @ d1<- vB VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA FETCH_ADVANCE_INST 1 @ advance rPC, load rINST fldd d0, [r9] @ d0<- vA @@ -6467,8 +6447,8 @@ constvalop_long_to_double: /* binop/2addr vA, vB */ mov r1, rINST, lsr #12 @ r1<- B ubfx rINST, rINST, #8, #4 @ rINST<- A - add r1, rFP, r1, lsl #2 @ r1<- &fp[B] - add r9, rFP, rINST, lsl #2 @ r9<- &fp[A] + VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B] + VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A] ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 .if 0 @@ -6783,7 +6763,7 @@ constvalop_long_to_double: * shl-int/lit8, shr-int/lit8, ushr-int/lit8 */ /* binop/lit8 vAA, vBB, #+CC */ - FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC + FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC) mov r9, rINST, lsr #8 @ r9<- AA and r2, r3, #255 @ r2<- BB GET_VREG r0, r2 @ r0<- vBB @@ -6821,7 +6801,7 @@ constvalop_long_to_double: * shl-int/lit8, shr-int/lit8, ushr-int/lit8 */ /* binop/lit8 vAA, vBB, #+CC */ - FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC + FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC) mov r9, rINST, lsr #8 @ r9<- AA and r2, r3, #255 @ r2<- BB GET_VREG r0, r2 @ r0<- vBB @@ -6860,7 +6840,7 @@ constvalop_long_to_double: * shl-int/lit8, shr-int/lit8, ushr-int/lit8 */ /* binop/lit8 vAA, vBB, #+CC */ - FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC + FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC) mov r9, rINST, lsr #8 @ r9<- AA and r2, r3, #255 @ r2<- BB GET_VREG r0, r2 @ r0<- vBB @@ -6967,7 +6947,7 @@ constvalop_long_to_double: * shl-int/lit8, shr-int/lit8, ushr-int/lit8 */ /* binop/lit8 vAA, vBB, #+CC */ - FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC + FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC) mov r9, rINST, lsr #8 @ r9<- AA and r2, r3, #255 @ r2<- BB GET_VREG r0, r2 @ r0<- vBB @@ -7005,7 +6985,7 @@ constvalop_long_to_double: * shl-int/lit8, shr-int/lit8, ushr-int/lit8 */ /* binop/lit8 vAA, vBB, #+CC */ - FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC + FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC) mov r9, rINST, lsr #8 @ r9<- AA and r2, r3, #255 @ r2<- BB GET_VREG r0, r2 @ r0<- vBB @@ -7043,7 +7023,7 @@ constvalop_long_to_double: * shl-int/lit8, shr-int/lit8, ushr-int/lit8 */ /* binop/lit8 vAA, vBB, #+CC */ - FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC + FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC) mov r9, rINST, lsr #8 @ r9<- AA and r2, r3, #255 @ r2<- BB GET_VREG r0, r2 @ r0<- vBB @@ -7081,7 +7061,7 @@ constvalop_long_to_double: * shl-int/lit8, shr-int/lit8, ushr-int/lit8 */ /* binop/lit8 vAA, vBB, #+CC */ - FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC + FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC) mov r9, rINST, lsr #8 @ r9<- AA and r2, r3, #255 @ r2<- BB GET_VREG r0, r2 @ r0<- vBB @@ -7119,7 +7099,7 @@ constvalop_long_to_double: * shl-int/lit8, shr-int/lit8, ushr-int/lit8 */ /* binop/lit8 vAA, vBB, #+CC */ - FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC + FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC) mov r9, rINST, lsr #8 @ r9<- AA and r2, r3, #255 @ r2<- BB GET_VREG r0, r2 @ r0<- vBB @@ -7157,7 +7137,7 @@ constvalop_long_to_double: * shl-int/lit8, shr-int/lit8, ushr-int/lit8 */ /* binop/lit8 vAA, vBB, #+CC */ - FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC + FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC) mov r9, rINST, lsr #8 @ r9<- AA and r2, r3, #255 @ r2<- BB GET_VREG r0, r2 @ r0<- vBB @@ -7207,7 +7187,7 @@ constvalop_long_to_double: beq common_errNullObject @ object was null ldrd r0, [r3, ip] @ r0<- obj.field (64 bits, aligned) FETCH_ADVANCE_INST 2 @ advance rPC, load rINST - add r3, rFP, r2, lsl #2 @ r3<- &fp[A] + VREG_INDEX_TO_ADDR r3, r2 @ r3<- &fp[A] CLEAR_SHADOW_PAIR r2, ip, lr @ Zero out the shadow regs GET_INST_OPCODE ip @ extract opcode from rINST stmia r3, {r0-r1} @ fp[A]<- r0/r1 @@ -7263,7 +7243,7 @@ constvalop_long_to_double: ubfx r0, rINST, #8, #4 @ r0<- A cmp r2, #0 @ check object for null beq common_errNullObject @ object was null - add r0, rFP, r0, lsl #2 @ r0<- &fp[A] + VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[A] ldmia r0, {r0-r1} @ r0/r1<- fp[A]/fp[A+1] FETCH_ADVANCE_INST 2 @ advance rPC, load rINST strd r0, [r2, r3] @ obj.field<- r0/r1 @@ -7673,7 +7653,7 @@ f2l_doconv: cmp r0, #0 @ nonzero == yes mvnne r0, #0 @ return maxlong (7fffffff) mvnne r1, #0x80000000 - ldmnefd sp!, {r4, pc} + popne {r4, pc} mov r0, r4 @ recover arg mov r1, #0xdf000000 @ (float)minlong @@ -7681,14 +7661,14 @@ f2l_doconv: cmp r0, #0 @ nonzero == yes movne r0, #0 @ return minlong (80000000) movne r1, #0x80000000 - ldmnefd sp!, {r4, pc} + popne {r4, pc} mov r0, r4 @ recover arg mov r1, r4 bl __aeabi_fcmpeq @ is arg == self? cmp r0, #0 @ zero == no moveq r1, #0 @ return zero for NaN - ldmeqfd sp!, {r4, pc} + popeq {r4, pc} mov r0, r4 @ recover arg bl __aeabi_f2lz @ convert float to long diff --git a/runtime/interpreter/mterp/out/mterp_arm64.S b/runtime/interpreter/mterp/out/mterp_arm64.S index c7c0fb5b79..cdb27e89e6 100644 --- a/runtime/interpreter/mterp/out/mterp_arm64.S +++ b/runtime/interpreter/mterp/out/mterp_arm64.S @@ -279,7 +279,7 @@ codes. * Convert a virtual register index into an address. */ .macro VREG_INDEX_TO_ADDR reg, vreg - add \reg, xFP, \vreg, lsl #2 /* WARNING/FIXME: handle shadow frame vreg zero if store */ + add \reg, xFP, \vreg, lsl #2 /* WARNING: handle shadow frame vreg zero if store */ .endm /* @@ -338,7 +338,7 @@ ExecuteMterpImpl: /* set up "named" registers */ mov xSELF, x0 ldr w0, [x2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET] - add xFP, x2, #SHADOWFRAME_VREGS_OFFSET // point to insns[] (i.e. - the dalivk byte code). + add xFP, x2, #SHADOWFRAME_VREGS_OFFSET // point to vregs. add xREFS, xFP, w0, lsl #2 // point to reference array in shadow frame ldr w0, [x2, #SHADOWFRAME_DEX_PC_OFFSET] // Get starting dex_pc. add xPC, x1, #CODEITEM_INSNS_OFFSET // Point to base of insns[] @@ -2552,7 +2552,7 @@ artMterpAsmInstructionStart = .L_op_nop lsr w1, wINST, #12 // w1<- B GET_VREG w1, w1 // w1<- fp[B], the object pointer ubfx w2, wINST, #8, #4 // w2<- A - add x2, xFP, x2, lsl #2 // w2<- &fp[A] + VREG_INDEX_TO_ADDR x2, x2 // w2<- &fp[A] ldr x3, [xFP, #OFF_FP_METHOD] // w3<- referrer PREFETCH_INST 2 bl artSet64InstanceFromMterp @@ -2941,7 +2941,7 @@ artMterpAsmInstructionStart = .L_op_nop FETCH w0, 1 // w0<- field ref BBBB ldr x1, [xFP, #OFF_FP_METHOD] lsr w2, wINST, #8 // w3<- AA - add x2, xFP, w2, lsl #2 + VREG_INDEX_TO_ADDR x2, w2 mov x3, xSELF PREFETCH_INST 2 // Get next inst, but don't advance rPC bl artSet64IndirectStaticFromMterp diff --git a/runtime/interpreter/mterp/out/mterp_mips.S b/runtime/interpreter/mterp/out/mterp_mips.S index 7ae1ab110d..b134129a5a 100644 --- a/runtime/interpreter/mterp/out/mterp_mips.S +++ b/runtime/interpreter/mterp/out/mterp_mips.S @@ -542,7 +542,7 @@ ExecuteMterpImpl: /* set up "named" registers */ move rSELF, a0 lw a0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2) - addu rFP, a2, SHADOWFRAME_VREGS_OFFSET # point to insns[] (i.e. - the dalivk byte code). + addu rFP, a2, SHADOWFRAME_VREGS_OFFSET # point to vregs. EAS2(rREFS, rFP, a0) # point to reference array in shadow frame lw a0, SHADOWFRAME_DEX_PC_OFFSET(a2) # Get starting dex_pc addu rPC, a1, CODEITEM_INSNS_OFFSET # Point to base of insns[] @@ -4373,8 +4373,8 @@ artMterpAsmInstructionStart = .L_op_nop * * If "chkzero" is set to 1, we perform a divide-by-zero check on * vCC (a1). Useful for integer division and modulus. Note that we - * *don't* check for (INT_MIN / -1) here, because the ARM math lib - * handles it correctly. + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. * * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, * xor-int, shl-int, shr-int, ushr-int @@ -4412,8 +4412,8 @@ artMterpAsmInstructionStart = .L_op_nop * * If "chkzero" is set to 1, we perform a divide-by-zero check on * vCC (a1). Useful for integer division and modulus. Note that we - * *don't* check for (INT_MIN / -1) here, because the ARM math lib - * handles it correctly. + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. * * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, * xor-int, shl-int, shr-int, ushr-int @@ -4451,8 +4451,8 @@ artMterpAsmInstructionStart = .L_op_nop * * If "chkzero" is set to 1, we perform a divide-by-zero check on * vCC (a1). Useful for integer division and modulus. Note that we - * *don't* check for (INT_MIN / -1) here, because the ARM math lib - * handles it correctly. + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. * * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, * xor-int, shl-int, shr-int, ushr-int @@ -4491,8 +4491,8 @@ artMterpAsmInstructionStart = .L_op_nop * * If "chkzero" is set to 1, we perform a divide-by-zero check on * vCC (a1). Useful for integer division and modulus. Note that we - * *don't* check for (INT_MIN / -1) here, because the ARM math lib - * handles it correctly. + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. * * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, * xor-int, shl-int, shr-int, ushr-int @@ -4526,8 +4526,8 @@ artMterpAsmInstructionStart = .L_op_nop * * If "chkzero" is set to 1, we perform a divide-by-zero check on * vCC (a1). Useful for integer division and modulus. Note that we - * *don't* check for (INT_MIN / -1) here, because the ARM math lib - * handles it correctly. + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. * * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, * xor-int, shl-int, shr-int, ushr-int @@ -4567,8 +4567,8 @@ artMterpAsmInstructionStart = .L_op_nop * * If "chkzero" is set to 1, we perform a divide-by-zero check on * vCC (a1). Useful for integer division and modulus. Note that we - * *don't* check for (INT_MIN / -1) here, because the ARM math lib - * handles it correctly. + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. * * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, * xor-int, shl-int, shr-int, ushr-int @@ -4602,8 +4602,8 @@ artMterpAsmInstructionStart = .L_op_nop * * If "chkzero" is set to 1, we perform a divide-by-zero check on * vCC (a1). Useful for integer division and modulus. Note that we - * *don't* check for (INT_MIN / -1) here, because the ARM math lib - * handles it correctly. + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. * * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, * xor-int, shl-int, shr-int, ushr-int @@ -4642,8 +4642,8 @@ artMterpAsmInstructionStart = .L_op_nop * * If "chkzero" is set to 1, we perform a divide-by-zero check on * vCC (a1). Useful for integer division and modulus. Note that we - * *don't* check for (INT_MIN / -1) here, because the ARM math lib - * handles it correctly. + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. * * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, * xor-int, shl-int, shr-int, ushr-int @@ -4681,8 +4681,8 @@ artMterpAsmInstructionStart = .L_op_nop * * If "chkzero" is set to 1, we perform a divide-by-zero check on * vCC (a1). Useful for integer division and modulus. Note that we - * *don't* check for (INT_MIN / -1) here, because the ARM math lib - * handles it correctly. + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. * * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, * xor-int, shl-int, shr-int, ushr-int @@ -4720,8 +4720,8 @@ artMterpAsmInstructionStart = .L_op_nop * * If "chkzero" is set to 1, we perform a divide-by-zero check on * vCC (a1). Useful for integer division and modulus. Note that we - * *don't* check for (INT_MIN / -1) here, because the ARM math lib - * handles it correctly. + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. * * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, * xor-int, shl-int, shr-int, ushr-int @@ -4759,8 +4759,8 @@ artMterpAsmInstructionStart = .L_op_nop * * If "chkzero" is set to 1, we perform a divide-by-zero check on * vCC (a1). Useful for integer division and modulus. Note that we - * *don't* check for (INT_MIN / -1) here, because the ARM math lib - * handles it correctly. + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. * * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, * xor-int, shl-int, shr-int, ushr-int @@ -4798,8 +4798,8 @@ artMterpAsmInstructionStart = .L_op_nop * * If "chkzero" is set to 1, we perform a divide-by-zero check on * vCC (a1). Useful for integer division and modulus. Note that we - * *don't* check for (INT_MIN / -1) here, because the ARM math lib - * handles it correctly. + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. * * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, * xor-int, shl-int, shr-int, ushr-int @@ -4837,8 +4837,8 @@ artMterpAsmInstructionStart = .L_op_nop * * If "chkzero" is set to 1, we perform a divide-by-zero check on * vCC (a1). Useful for integer division and modulus. Note that we - * *don't* check for (INT_MIN / -1) here, because the ARM math lib - * handles it correctly. + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. * * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, * xor-int, shl-int, shr-int, ushr-int diff --git a/runtime/interpreter/mterp/out/mterp_mips64.S b/runtime/interpreter/mterp/out/mterp_mips64.S new file mode 100644 index 0000000000..a17252b2f8 --- /dev/null +++ b/runtime/interpreter/mterp/out/mterp_mips64.S @@ -0,0 +1,12425 @@ +/* + * This file was generated automatically by gen-mterp.py for 'mips64'. + * + * --> DO NOT EDIT <-- + */ + +/* File: mips64/header.S */ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <machine/regdef.h> + +/* TODO: add the missing file and use its FP register definitions. */ +/* #include <machine/fpregdef.h> */ +/* FP register definitions */ +#define f0 $f0 +#define f1 $f1 +#define f2 $f2 +#define f3 $f3 +#define f12 $f12 +#define f13 $f13 + +/* + * It looks like the GNU assembler currently does not support the blec and bgtc + * idioms, which should translate into bgec and bltc respectively with swapped + * left and right register operands. + * TODO: remove these macros when the assembler is fixed. + */ +.macro blec lreg, rreg, target + bgec \rreg, \lreg, \target +.endm +.macro bgtc lreg, rreg, target + bltc \rreg, \lreg, \target +.endm + +/* +Mterp and MIPS64 notes: + +The following registers have fixed assignments: + + reg nick purpose + s0 rPC interpreted program counter, used for fetching instructions + s1 rFP interpreted frame pointer, used for accessing locals and args + s2 rSELF self (Thread) pointer + s3 rINST first 16-bit code unit of current instruction + s4 rIBASE interpreted instruction base pointer, used for computed goto + s5 rREFS base of object references in shadow frame (ideally, we'll get rid of this later). +*/ + +/* During bringup, we'll use the shadow frame model instead of rFP */ +/* single-purpose registers, given names for clarity */ +#define rPC s0 +#define rFP s1 +#define rSELF s2 +#define rINST s3 +#define rIBASE s4 +#define rREFS s5 + +/* + * This is a #include, not a %include, because we want the C pre-processor + * to expand the macros into assembler assignment statements. + */ +#include "asm_support.h" + +/* + * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So, + * to access other shadow frame fields, we need to use a backwards offset. Define those here. + */ +#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET) +#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET) +#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET) +#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET) +#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET) +#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET) +#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET) +#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET) +#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET) + +#define MTERP_PROFILE_BRANCHES 1 +#define MTERP_LOGGING 0 + +/* + * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must + * be done *before* something throws. + * + * It's okay to do this more than once. + * + * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped + * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction + * offset into the code_items_[] array. For effiency, we will "export" the + * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC + * to convert to a dex pc when needed. + */ +.macro EXPORT_PC + sd rPC, OFF_FP_DEX_PC_PTR(rFP) +.endm + +/* + * Refresh handler table. + */ +.macro REFRESH_IBASE + ld rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF) +.endm + +/* + * Fetch the next instruction from rPC into rINST. Does not advance rPC. + */ +.macro FETCH_INST + lhu rINST, 0(rPC) +.endm + +/* Advance rPC by some number of code units. */ +.macro ADVANCE count + daddu rPC, rPC, (\count) * 2 +.endm + +/* + * Fetch the next instruction from the specified offset. Advances rPC + * to point to the next instruction. + * + * This must come AFTER anything that can throw an exception, or the + * exception catch may miss. (This also implies that it must come after + * EXPORT_PC.) + */ +.macro FETCH_ADVANCE_INST count + ADVANCE \count + FETCH_INST +.endm + +/* + * Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load + * rINST ahead of possible exception point. Be sure to manually advance rPC + * later. + */ +.macro PREFETCH_INST count + lhu rINST, ((\count) * 2)(rPC) +.endm + +/* + * Put the instruction's opcode field into the specified register. + */ +.macro GET_INST_OPCODE reg + and \reg, rINST, 255 +.endm + +/* + * Begin executing the opcode in _reg. + */ +.macro GOTO_OPCODE reg + .set noat + sll AT, \reg, 7 + daddu AT, rIBASE, AT + jic AT, 0 + .set at +.endm + +/* + * Get/set the 32-bit value from a Dalvik register. + * Note, GET_VREG does sign extension to 64 bits while + * GET_VREG_U does zero extension to 64 bits. + * One is useful for arithmetic while the other is + * useful for storing the result value as 64-bit. + */ +.macro GET_VREG reg, vreg + .set noat + dlsa AT, \vreg, rFP, 2 + lw \reg, 0(AT) + .set at +.endm +.macro GET_VREG_U reg, vreg + .set noat + dlsa AT, \vreg, rFP, 2 + lwu \reg, 0(AT) + .set at +.endm +.macro GET_VREG_FLOAT reg, vreg + .set noat + dlsa AT, \vreg, rFP, 2 + lwc1 \reg, 0(AT) + .set at +.endm +.macro SET_VREG reg, vreg + .set noat + dlsa AT, \vreg, rFP, 2 + sw \reg, 0(AT) + dlsa AT, \vreg, rREFS, 2 + sw zero, 0(AT) + .set at +.endm +.macro SET_VREG_OBJECT reg, vreg + .set noat + dlsa AT, \vreg, rFP, 2 + sw \reg, 0(AT) + dlsa AT, \vreg, rREFS, 2 + sw \reg, 0(AT) + .set at +.endm +.macro SET_VREG_FLOAT reg, vreg + .set noat + dlsa AT, \vreg, rFP, 2 + swc1 \reg, 0(AT) + dlsa AT, \vreg, rREFS, 2 + sw zero, 0(AT) + .set at +.endm + +/* + * Get/set the 64-bit value from a Dalvik register. + * Avoid unaligned memory accesses. + * Note, SET_VREG_WIDE clobbers the register containing the value being stored. + * Note, SET_VREG_DOUBLE clobbers the register containing the Dalvik register number. + */ +.macro GET_VREG_WIDE reg, vreg + .set noat + dlsa AT, \vreg, rFP, 2 + lw \reg, 0(AT) + lw AT, 4(AT) + dinsu \reg, AT, 32, 32 + .set at +.endm +.macro GET_VREG_DOUBLE reg, vreg + .set noat + dlsa AT, \vreg, rFP, 2 + lwc1 \reg, 0(AT) + lw AT, 4(AT) + mthc1 AT, \reg + .set at +.endm +.macro SET_VREG_WIDE reg, vreg + .set noat + dlsa AT, \vreg, rFP, 2 + sw \reg, 0(AT) + drotr32 \reg, \reg, 0 + sw \reg, 4(AT) + dlsa AT, \vreg, rREFS, 2 + sw zero, 0(AT) + sw zero, 4(AT) + .set at +.endm +.macro SET_VREG_DOUBLE reg, vreg + .set noat + dlsa AT, \vreg, rREFS, 2 + sw zero, 0(AT) + sw zero, 4(AT) + dlsa AT, \vreg, rFP, 2 + swc1 \reg, 0(AT) + mfhc1 \vreg, \reg + sw \vreg, 4(AT) + .set at +.endm + +/* + * On-stack offsets for spilling/unspilling callee-saved registers + * and the frame size. + */ +#define STACK_OFFSET_RA 0 +#define STACK_OFFSET_GP 8 +#define STACK_OFFSET_S0 16 +#define STACK_OFFSET_S1 24 +#define STACK_OFFSET_S2 32 +#define STACK_OFFSET_S3 40 +#define STACK_OFFSET_S4 48 +#define STACK_OFFSET_S5 56 +#define STACK_SIZE 64 + +/* Constants for float/double_to_int/long conversions */ +#define INT_MIN 0x80000000 +#define INT_MIN_AS_FLOAT 0xCF000000 +#define INT_MIN_AS_DOUBLE 0xC1E0000000000000 +#define LONG_MIN 0x8000000000000000 +#define LONG_MIN_AS_FLOAT 0xDF000000 +#define LONG_MIN_AS_DOUBLE 0xC3E0000000000000 + +/* File: mips64/entry.S */ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Interpreter entry point. + */ + + .set reorder + + .text + .global ExecuteMterpImpl + .type ExecuteMterpImpl, %function + .balign 16 +/* + * On entry: + * a0 Thread* self + * a1 code_item + * a2 ShadowFrame + * a3 JValue* result_register + * + */ +ExecuteMterpImpl: + .cfi_startproc + .cpsetup t9, t8, ExecuteMterpImpl + + .cfi_def_cfa sp, 0 + daddu sp, sp, -STACK_SIZE + .cfi_adjust_cfa_offset STACK_SIZE + + sd t8, STACK_OFFSET_GP(sp) + .cfi_rel_offset 28, STACK_OFFSET_GP + sd ra, STACK_OFFSET_RA(sp) + .cfi_rel_offset 31, STACK_OFFSET_RA + + sd s0, STACK_OFFSET_S0(sp) + .cfi_rel_offset 16, STACK_OFFSET_S0 + sd s1, STACK_OFFSET_S1(sp) + .cfi_rel_offset 17, STACK_OFFSET_S1 + sd s2, STACK_OFFSET_S2(sp) + .cfi_rel_offset 18, STACK_OFFSET_S2 + sd s3, STACK_OFFSET_S3(sp) + .cfi_rel_offset 19, STACK_OFFSET_S3 + sd s4, STACK_OFFSET_S4(sp) + .cfi_rel_offset 20, STACK_OFFSET_S4 + sd s5, STACK_OFFSET_S5(sp) + .cfi_rel_offset 21, STACK_OFFSET_S5 + + /* Remember the return register */ + sd a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2) + + /* Remember the code_item */ + sd a1, SHADOWFRAME_CODE_ITEM_OFFSET(a2) + + /* set up "named" registers */ + move rSELF, a0 + daddu rFP, a2, SHADOWFRAME_VREGS_OFFSET + lw v0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2) + dlsa rREFS, v0, rFP, 2 + daddu rPC, a1, CODEITEM_INSNS_OFFSET + lw v0, SHADOWFRAME_DEX_PC_OFFSET(a2) + dlsa rPC, v0, rPC, 1 + EXPORT_PC + + /* Starting ibase */ + REFRESH_IBASE + + /* start executing the instruction at rPC */ + FETCH_INST + GET_INST_OPCODE v0 + GOTO_OPCODE v0 + + /* NOTE: no fallthrough */ + + + .global artMterpAsmInstructionStart + .type artMterpAsmInstructionStart, %function +artMterpAsmInstructionStart = .L_op_nop + .text + +/* ------------------------------ */ + .balign 128 +.L_op_nop: /* 0x00 */ +/* File: mips64/op_nop.S */ + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_move: /* 0x01 */ +/* File: mips64/op_move.S */ + /* for move, move-object, long-to-int */ + /* op vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + GET_VREG a0, a3 # a0 <- vB + GET_INST_OPCODE v0 # extract opcode from rINST + .if 0 + SET_VREG_OBJECT a0, a2 # vA <- vB + .else + SET_VREG a0, a2 # vA <- vB + .endif + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_move_from16: /* 0x02 */ +/* File: mips64/op_move_from16.S */ + /* for: move/from16, move-object/from16 */ + /* op vAA, vBBBB */ + lhu a3, 2(rPC) # a3 <- BBBB + srl a2, rINST, 8 # a2 <- AA + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_VREG a0, a3 # a0 <- vBBBB + GET_INST_OPCODE v0 # extract opcode from rINST + .if 0 + SET_VREG_OBJECT a0, a2 # vAA <- vBBBB + .else + SET_VREG a0, a2 # vAA <- vBBBB + .endif + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_move_16: /* 0x03 */ +/* File: mips64/op_move_16.S */ + /* for: move/16, move-object/16 */ + /* op vAAAA, vBBBB */ + lhu a3, 4(rPC) # a3 <- BBBB + lhu a2, 2(rPC) # a2 <- AAAA + FETCH_ADVANCE_INST 3 # advance rPC, load rINST + GET_VREG a0, a3 # a0 <- vBBBB + GET_INST_OPCODE v0 # extract opcode from rINST + .if 0 + SET_VREG_OBJECT a0, a2 # vAAAA <- vBBBB + .else + SET_VREG a0, a2 # vAAAA <- vBBBB + .endif + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_move_wide: /* 0x04 */ +/* File: mips64/op_move_wide.S */ + /* move-wide vA, vB */ + /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ + ext a3, rINST, 12, 4 # a3 <- B + ext a2, rINST, 8, 4 # a2 <- A + GET_VREG_WIDE a0, a3 # a0 <- vB + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vA <- vB + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_move_wide_from16: /* 0x05 */ +/* File: mips64/op_move_wide_from16.S */ + /* move-wide/from16 vAA, vBBBB */ + /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ + lhu a3, 2(rPC) # a3 <- BBBB + srl a2, rINST, 8 # a2 <- AA + GET_VREG_WIDE a0, a3 # a0 <- vBBBB + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vAA <- vBBBB + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_move_wide_16: /* 0x06 */ +/* File: mips64/op_move_wide_16.S */ + /* move-wide/16 vAAAA, vBBBB */ + /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ + lhu a3, 4(rPC) # a3 <- BBBB + lhu a2, 2(rPC) # a2 <- AAAA + GET_VREG_WIDE a0, a3 # a0 <- vBBBB + FETCH_ADVANCE_INST 3 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vAAAA <- vBBBB + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_move_object: /* 0x07 */ +/* File: mips64/op_move_object.S */ +/* File: mips64/op_move.S */ + /* for move, move-object, long-to-int */ + /* op vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + GET_VREG a0, a3 # a0 <- vB + GET_INST_OPCODE v0 # extract opcode from rINST + .if 1 + SET_VREG_OBJECT a0, a2 # vA <- vB + .else + SET_VREG a0, a2 # vA <- vB + .endif + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_move_object_from16: /* 0x08 */ +/* File: mips64/op_move_object_from16.S */ +/* File: mips64/op_move_from16.S */ + /* for: move/from16, move-object/from16 */ + /* op vAA, vBBBB */ + lhu a3, 2(rPC) # a3 <- BBBB + srl a2, rINST, 8 # a2 <- AA + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_VREG a0, a3 # a0 <- vBBBB + GET_INST_OPCODE v0 # extract opcode from rINST + .if 1 + SET_VREG_OBJECT a0, a2 # vAA <- vBBBB + .else + SET_VREG a0, a2 # vAA <- vBBBB + .endif + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_move_object_16: /* 0x09 */ +/* File: mips64/op_move_object_16.S */ +/* File: mips64/op_move_16.S */ + /* for: move/16, move-object/16 */ + /* op vAAAA, vBBBB */ + lhu a3, 4(rPC) # a3 <- BBBB + lhu a2, 2(rPC) # a2 <- AAAA + FETCH_ADVANCE_INST 3 # advance rPC, load rINST + GET_VREG a0, a3 # a0 <- vBBBB + GET_INST_OPCODE v0 # extract opcode from rINST + .if 1 + SET_VREG_OBJECT a0, a2 # vAAAA <- vBBBB + .else + SET_VREG a0, a2 # vAAAA <- vBBBB + .endif + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_move_result: /* 0x0a */ +/* File: mips64/op_move_result.S */ + /* for: move-result, move-result-object */ + /* op vAA */ + srl a2, rINST, 8 # a2 <- AA + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + ld a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType + lw a0, 0(a0) # a0 <- result.i + GET_INST_OPCODE v0 # extract opcode from rINST + .if 0 + SET_VREG_OBJECT a0, a2 # vAA <- result + .else + SET_VREG a0, a2 # vAA <- result + .endif + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_move_result_wide: /* 0x0b */ +/* File: mips64/op_move_result_wide.S */ + /* for: move-result-wide */ + /* op vAA */ + srl a2, rINST, 8 # a2 <- AA + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + ld a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType + ld a0, 0(a0) # a0 <- result.j + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vAA <- result + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_move_result_object: /* 0x0c */ +/* File: mips64/op_move_result_object.S */ +/* File: mips64/op_move_result.S */ + /* for: move-result, move-result-object */ + /* op vAA */ + srl a2, rINST, 8 # a2 <- AA + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + ld a0, OFF_FP_RESULT_REGISTER(rFP) # get pointer to result JType + lw a0, 0(a0) # a0 <- result.i + GET_INST_OPCODE v0 # extract opcode from rINST + .if 1 + SET_VREG_OBJECT a0, a2 # vAA <- result + .else + SET_VREG a0, a2 # vAA <- result + .endif + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_move_exception: /* 0x0d */ +/* File: mips64/op_move_exception.S */ + /* move-exception vAA */ + srl a2, rINST, 8 # a2 <- AA + ld a0, THREAD_EXCEPTION_OFFSET(rSELF) # load exception obj + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + SET_VREG_OBJECT a0, a2 # vAA <- exception obj + GET_INST_OPCODE v0 # extract opcode from rINST + sd zero, THREAD_EXCEPTION_OFFSET(rSELF) # clear exception + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_return_void: /* 0x0e */ +/* File: mips64/op_return_void.S */ + .extern MterpThreadFenceForConstructor + .extern MterpSuspendCheck + jal MterpThreadFenceForConstructor + lw ra, THREAD_FLAGS_OFFSET(rSELF) + move a0, rSELF + and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + beqzc ra, 1f + jal MterpSuspendCheck # (self) +1: + li a0, 0 + b MterpReturn + +/* ------------------------------ */ + .balign 128 +.L_op_return: /* 0x0f */ +/* File: mips64/op_return.S */ + /* + * Return a 32-bit value. + * + * for: return, return-object + */ + /* op vAA */ + .extern MterpThreadFenceForConstructor + .extern MterpSuspendCheck + jal MterpThreadFenceForConstructor + lw ra, THREAD_FLAGS_OFFSET(rSELF) + move a0, rSELF + and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + beqzc ra, 1f + jal MterpSuspendCheck # (self) +1: + srl a2, rINST, 8 # a2 <- AA + GET_VREG_U a0, a2 # a0 <- vAA + b MterpReturn + +/* ------------------------------ */ + .balign 128 +.L_op_return_wide: /* 0x10 */ +/* File: mips64/op_return_wide.S */ + /* + * Return a 64-bit value. + */ + /* return-wide vAA */ + /* op vAA */ + .extern MterpThreadFenceForConstructor + .extern MterpSuspendCheck + jal MterpThreadFenceForConstructor + lw ra, THREAD_FLAGS_OFFSET(rSELF) + move a0, rSELF + and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + beqzc ra, 1f + jal MterpSuspendCheck # (self) +1: + srl a2, rINST, 8 # a2 <- AA + GET_VREG_WIDE a0, a2 # a0 <- vAA + b MterpReturn + +/* ------------------------------ */ + .balign 128 +.L_op_return_object: /* 0x11 */ +/* File: mips64/op_return_object.S */ +/* File: mips64/op_return.S */ + /* + * Return a 32-bit value. + * + * for: return, return-object + */ + /* op vAA */ + .extern MterpThreadFenceForConstructor + .extern MterpSuspendCheck + jal MterpThreadFenceForConstructor + lw ra, THREAD_FLAGS_OFFSET(rSELF) + move a0, rSELF + and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + beqzc ra, 1f + jal MterpSuspendCheck # (self) +1: + srl a2, rINST, 8 # a2 <- AA + GET_VREG_U a0, a2 # a0 <- vAA + b MterpReturn + + +/* ------------------------------ */ + .balign 128 +.L_op_const_4: /* 0x12 */ +/* File: mips64/op_const_4.S */ + /* const/4 vA, #+B */ + ext a2, rINST, 8, 4 # a2 <- A + seh a0, rINST # sign extend B in rINST + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + sra a0, a0, 12 # shift B into its final position + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vA <- +B + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_const_16: /* 0x13 */ +/* File: mips64/op_const_16.S */ + /* const/16 vAA, #+BBBB */ + srl a2, rINST, 8 # a2 <- AA + lh a0, 2(rPC) # a0 <- sign-extended BBBB + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vAA <- +BBBB + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_const: /* 0x14 */ +/* File: mips64/op_const.S */ + /* const vAA, #+BBBBbbbb */ + srl a2, rINST, 8 # a2 <- AA + lh a0, 2(rPC) # a0 <- bbbb (low) + lh a1, 4(rPC) # a1 <- BBBB (high) + FETCH_ADVANCE_INST 3 # advance rPC, load rINST + ins a0, a1, 16, 16 # a0 = BBBBbbbb + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vAA <- +BBBBbbbb + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_const_high16: /* 0x15 */ +/* File: mips64/op_const_high16.S */ + /* const/high16 vAA, #+BBBB0000 */ + srl a2, rINST, 8 # a2 <- AA + lh a0, 2(rPC) # a0 <- BBBB + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + sll a0, a0, 16 # a0 <- BBBB0000 + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vAA <- +BBBB0000 + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_const_wide_16: /* 0x16 */ +/* File: mips64/op_const_wide_16.S */ + /* const-wide/16 vAA, #+BBBB */ + srl a2, rINST, 8 # a2 <- AA + lh a0, 2(rPC) # a0 <- sign-extended BBBB + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vAA <- +BBBB + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_const_wide_32: /* 0x17 */ +/* File: mips64/op_const_wide_32.S */ + /* const-wide/32 vAA, #+BBBBbbbb */ + srl a2, rINST, 8 # a2 <- AA + lh a0, 2(rPC) # a0 <- bbbb (low) + lh a1, 4(rPC) # a1 <- BBBB (high) + FETCH_ADVANCE_INST 3 # advance rPC, load rINST + ins a0, a1, 16, 16 # a0 = BBBBbbbb + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vAA <- +BBBBbbbb + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_const_wide: /* 0x18 */ +/* File: mips64/op_const_wide.S */ + /* const-wide vAA, #+HHHHhhhhBBBBbbbb */ + srl a4, rINST, 8 # a4 <- AA + lh a0, 2(rPC) # a0 <- bbbb (low) + lh a1, 4(rPC) # a1 <- BBBB (low middle) + lh a2, 6(rPC) # a2 <- hhhh (high middle) + lh a3, 8(rPC) # a3 <- HHHH (high) + FETCH_ADVANCE_INST 5 # advance rPC, load rINST + ins a0, a1, 16, 16 # a0 = BBBBbbbb + ins a2, a3, 16, 16 # a2 = HHHHhhhh + dinsu a0, a2, 32, 32 # a0 = HHHHhhhhBBBBbbbb + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a4 # vAA <- +HHHHhhhhBBBBbbbb + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_const_wide_high16: /* 0x19 */ +/* File: mips64/op_const_wide_high16.S */ + /* const-wide/high16 vAA, #+BBBB000000000000 */ + srl a2, rINST, 8 # a2 <- AA + lh a0, 2(rPC) # a0 <- BBBB + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + dsll32 a0, a0, 16 # a0 <- BBBB000000000000 + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vAA <- +BBBB000000000000 + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_const_string: /* 0x1a */ +/* File: mips64/op_const_string.S */ + /* const/string vAA, String//BBBB */ + .extern MterpConstString + EXPORT_PC + lhu a0, 2(rPC) # a0 <- BBBB + srl a1, rINST, 8 # a1 <- AA + daddu a2, rFP, OFF_FP_SHADOWFRAME + move a3, rSELF + jal MterpConstString # (index, tgt_reg, shadow_frame, self) + PREFETCH_INST 2 # load rINST + bnez v0, MterpPossibleException # let reference interpreter deal with it. + ADVANCE 2 # advance rPC + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_const_string_jumbo: /* 0x1b */ +/* File: mips64/op_const_string_jumbo.S */ + /* const/string vAA, String//BBBBBBBB */ + .extern MterpConstString + EXPORT_PC + lh a0, 2(rPC) # a0 <- bbbb (low) + lh a4, 4(rPC) # a4 <- BBBB (high) + srl a1, rINST, 8 # a1 <- AA + ins a0, a4, 16, 16 # a0 <- BBBBbbbb + daddu a2, rFP, OFF_FP_SHADOWFRAME + move a3, rSELF + jal MterpConstString # (index, tgt_reg, shadow_frame, self) + PREFETCH_INST 3 # load rINST + bnez v0, MterpPossibleException # let reference interpreter deal with it. + ADVANCE 3 # advance rPC + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_const_class: /* 0x1c */ +/* File: mips64/op_const_class.S */ + /* const/class vAA, Class//BBBB */ + .extern MterpConstClass + EXPORT_PC + lhu a0, 2(rPC) # a0 <- BBBB + srl a1, rINST, 8 # a1 <- AA + daddu a2, rFP, OFF_FP_SHADOWFRAME + move a3, rSELF + jal MterpConstClass # (index, tgt_reg, shadow_frame, self) + PREFETCH_INST 2 # load rINST + bnez v0, MterpPossibleException # let reference interpreter deal with it. + ADVANCE 2 # advance rPC + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_monitor_enter: /* 0x1d */ +/* File: mips64/op_monitor_enter.S */ + /* + * Synchronize on an object. + */ + /* monitor-enter vAA */ + .extern artLockObjectFromCode + EXPORT_PC + srl a2, rINST, 8 # a2 <- AA + GET_VREG_U a0, a2 # a0 <- vAA (object) + move a1, rSELF # a1 <- self + jal artLockObjectFromCode + bnezc v0, MterpException + FETCH_ADVANCE_INST 1 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_monitor_exit: /* 0x1e */ +/* File: mips64/op_monitor_exit.S */ + /* + * Unlock an object. + * + * Exceptions that occur when unlocking a monitor need to appear as + * if they happened at the following instruction. See the Dalvik + * instruction spec. + */ + /* monitor-exit vAA */ + .extern artUnlockObjectFromCode + EXPORT_PC + srl a2, rINST, 8 # a2 <- AA + GET_VREG_U a0, a2 # a0 <- vAA (object) + move a1, rSELF # a1 <- self + jal artUnlockObjectFromCode # v0 <- success for unlock(self, obj) + bnezc v0, MterpException + FETCH_ADVANCE_INST 1 # before throw: advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_check_cast: /* 0x1f */ +/* File: mips64/op_check_cast.S */ + /* + * Check to see if a cast from one class to another is allowed. + */ + /* check-cast vAA, class//BBBB */ + .extern MterpCheckCast + EXPORT_PC + lhu a0, 2(rPC) # a0 <- BBBB + srl a1, rINST, 8 # a1 <- AA + dlsa a1, a1, rFP, 2 # a1 <- &object + ld a2, OFF_FP_METHOD(rFP) # a2 <- method + move a3, rSELF # a3 <- self + jal MterpCheckCast # (index, &obj, method, self) + PREFETCH_INST 2 + bnez v0, MterpPossibleException + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_instance_of: /* 0x20 */ +/* File: mips64/op_instance_of.S */ + /* + * Check to see if an object reference is an instance of a class. + * + * Most common situation is a non-null object, being compared against + * an already-resolved class. + */ + /* instance-of vA, vB, class//CCCC */ + .extern MterpInstanceOf + EXPORT_PC + lhu a0, 2(rPC) # a0 <- CCCC + srl a1, rINST, 12 # a1 <- B + dlsa a1, a1, rFP, 2 # a1 <- &object + ld a2, OFF_FP_METHOD(rFP) # a2 <- method + move a3, rSELF # a3 <- self + jal MterpInstanceOf # (index, &obj, method, self) + ld a1, THREAD_EXCEPTION_OFFSET(rSELF) + ext a2, rINST, 8, 4 # a2 <- A + PREFETCH_INST 2 + bnez a1, MterpException + ADVANCE 2 # advance rPC + SET_VREG v0, a2 # vA <- v0 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_array_length: /* 0x21 */ +/* File: mips64/op_array_length.S */ + /* + * Return the length of an array. + */ + srl a1, rINST, 12 # a1 <- B + GET_VREG_U a0, a1 # a0 <- vB (object ref) + ext a2, rINST, 8, 4 # a2 <- A + beqz a0, common_errNullObject # yup, fail + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- array length + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a3, a2 # vB <- length + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_new_instance: /* 0x22 */ +/* File: mips64/op_new_instance.S */ + /* + * Create a new instance of a class. + */ + /* new-instance vAA, class//BBBB */ + .extern MterpNewInstance + EXPORT_PC + daddu a0, rFP, OFF_FP_SHADOWFRAME + move a1, rSELF + move a2, rINST + jal MterpNewInstance # (shadow_frame, self, inst_data) + beqzc v0, MterpPossibleException + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_new_array: /* 0x23 */ +/* File: mips64/op_new_array.S */ + /* + * Allocate an array of objects, specified with the array class + * and a count. + * + * The verifier guarantees that this is an array class, so we don't + * check for it here. + */ + /* new-array vA, vB, class//CCCC */ + .extern MterpNewArray + EXPORT_PC + daddu a0, rFP, OFF_FP_SHADOWFRAME + move a1, rPC + move a2, rINST + move a3, rSELF + jal MterpNewArray + beqzc v0, MterpPossibleException + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_filled_new_array: /* 0x24 */ +/* File: mips64/op_filled_new_array.S */ + /* + * Create a new array with elements filled from registers. + * + * for: filled-new-array, filled-new-array/range + */ + /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */ + /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */ + .extern MterpFilledNewArray + EXPORT_PC + daddu a0, rFP, OFF_FP_SHADOWFRAME + move a1, rPC + move a2, rSELF + jal MterpFilledNewArray + beqzc v0, MterpPossibleException + FETCH_ADVANCE_INST 3 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_filled_new_array_range: /* 0x25 */ +/* File: mips64/op_filled_new_array_range.S */ +/* File: mips64/op_filled_new_array.S */ + /* + * Create a new array with elements filled from registers. + * + * for: filled-new-array, filled-new-array/range + */ + /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */ + /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */ + .extern MterpFilledNewArrayRange + EXPORT_PC + daddu a0, rFP, OFF_FP_SHADOWFRAME + move a1, rPC + move a2, rSELF + jal MterpFilledNewArrayRange + beqzc v0, MterpPossibleException + FETCH_ADVANCE_INST 3 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_fill_array_data: /* 0x26 */ +/* File: mips64/op_fill_array_data.S */ + /* fill-array-data vAA, +BBBBBBBB */ + .extern MterpFillArrayData + EXPORT_PC + lh a1, 2(rPC) # a1 <- bbbb (lo) + lh a0, 4(rPC) # a0 <- BBBB (hi) + srl a3, rINST, 8 # a3 <- AA + ins a1, a0, 16, 16 # a1 <- BBBBbbbb + GET_VREG_U a0, a3 # a0 <- vAA (array object) + dlsa a1, a1, rPC, 1 # a1 <- PC + BBBBbbbb*2 (array data off.) + jal MterpFillArrayData # (obj, payload) + beqzc v0, MterpPossibleException # exception? + FETCH_ADVANCE_INST 3 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_throw: /* 0x27 */ +/* File: mips64/op_throw.S */ + /* + * Throw an exception object in the current thread. + */ + /* throw vAA */ + EXPORT_PC + srl a2, rINST, 8 # a2 <- AA + GET_VREG_U a0, a2 # a0 <- vAA (exception object) + beqzc a0, common_errNullObject + sd a0, THREAD_EXCEPTION_OFFSET(rSELF) # thread->exception <- obj + b MterpException + +/* ------------------------------ */ + .balign 128 +.L_op_goto: /* 0x28 */ +/* File: mips64/op_goto.S */ + /* + * Unconditional branch, 8-bit offset. + * + * The branch distance is a signed code-unit offset, which we need to + * double to get a byte offset. + */ + /* goto +AA */ + .extern MterpProfileBranch + srl rINST, rINST, 8 + seb rINST, rINST # rINST <- offset (sign-extended AA) +#if MTERP_PROFILE_BRANCHES + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rINST + jal MterpProfileBranch # (self, shadow_frame, offset) + bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST +#endif + dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2 + lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue + move a0, rINST # a0 <- offset + FETCH_INST # load rINST + bltz a0, MterpCheckSuspendAndContinue # suspend check if backwards branch + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_goto_16: /* 0x29 */ +/* File: mips64/op_goto_16.S */ + /* + * Unconditional branch, 16-bit offset. + * + * The branch distance is a signed code-unit offset, which we need to + * double to get a byte offset. + */ + /* goto/16 +AAAA */ + .extern MterpProfileBranch + lh rINST, 2(rPC) # rINST <- offset (sign-extended AAAA) +#if MTERP_PROFILE_BRANCHES + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rINST + jal MterpProfileBranch # (self, shadow_frame, offset) + bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST +#endif + dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2 + lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue + move a0, rINST # a0 <- offset + FETCH_INST # load rINST + bltz a0, MterpCheckSuspendAndContinue # suspend check if backwards branch + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_goto_32: /* 0x2a */ +/* File: mips64/op_goto_32.S */ + /* + * Unconditional branch, 32-bit offset. + * + * The branch distance is a signed code-unit offset, which we need to + * double to get a byte offset. + * + * Unlike most opcodes, this one is allowed to branch to itself, so + * our "backward branch" test must be "<=0" instead of "<0". + */ + /* goto/32 +AAAAAAAA */ + .extern MterpProfileBranch + lh rINST, 2(rPC) # rINST <- aaaa (low) + lh a1, 4(rPC) # a1 <- AAAA (high) + ins rINST, a1, 16, 16 # rINST <- offset (sign-extended AAAAaaaa) +#if MTERP_PROFILE_BRANCHES + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rINST + jal MterpProfileBranch # (self, shadow_frame, offset) + bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST +#endif + dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2 + lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue + move a0, rINST # a0 <- offset + FETCH_INST # load rINST + blez a0, MterpCheckSuspendAndContinue # suspend check if backwards branch + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_packed_switch: /* 0x2b */ +/* File: mips64/op_packed_switch.S */ + /* + * Handle a packed-switch or sparse-switch instruction. In both cases + * we decode it and hand it off to a helper function. + * + * We don't really expect backward branches in a switch statement, but + * they're perfectly legal, so we check for them here. + * + * for: packed-switch, sparse-switch + */ + /* op vAA, +BBBBBBBB */ + .extern MterpDoPackedSwitch + .extern MterpProfileBranch + lh a0, 2(rPC) # a0 <- bbbb (lo) + lh a1, 4(rPC) # a1 <- BBBB (hi) + srl a3, rINST, 8 # a3 <- AA + ins a0, a1, 16, 16 # a0 <- BBBBbbbb + GET_VREG a1, a3 # a1 <- vAA + dlsa a0, a0, rPC, 1 # a0 <- PC + BBBBbbbb*2 + jal MterpDoPackedSwitch # v0 <- code-unit branch offset + move rINST, v0 +#if MTERP_PROFILE_BRANCHES + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rINST + jal MterpProfileBranch # (self, shadow_frame, offset) + bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST +#endif + dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2 + lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue + move a0, rINST # a0 <- offset + FETCH_INST # load rINST + blez a0, MterpCheckSuspendAndContinue # suspend check if backwards branch + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_sparse_switch: /* 0x2c */ +/* File: mips64/op_sparse_switch.S */ +/* File: mips64/op_packed_switch.S */ + /* + * Handle a packed-switch or sparse-switch instruction. In both cases + * we decode it and hand it off to a helper function. + * + * We don't really expect backward branches in a switch statement, but + * they're perfectly legal, so we check for them here. + * + * for: packed-switch, sparse-switch + */ + /* op vAA, +BBBBBBBB */ + .extern MterpDoSparseSwitch + .extern MterpProfileBranch + lh a0, 2(rPC) # a0 <- bbbb (lo) + lh a1, 4(rPC) # a1 <- BBBB (hi) + srl a3, rINST, 8 # a3 <- AA + ins a0, a1, 16, 16 # a0 <- BBBBbbbb + GET_VREG a1, a3 # a1 <- vAA + dlsa a0, a0, rPC, 1 # a0 <- PC + BBBBbbbb*2 + jal MterpDoSparseSwitch # v0 <- code-unit branch offset + move rINST, v0 +#if MTERP_PROFILE_BRANCHES + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rINST + jal MterpProfileBranch # (self, shadow_frame, offset) + bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST +#endif + dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2 + lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue + move a0, rINST # a0 <- offset + FETCH_INST # load rINST + blez a0, MterpCheckSuspendAndContinue # suspend check if backwards branch + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_cmpl_float: /* 0x2d */ +/* File: mips64/op_cmpl_float.S */ +/* File: mips64/fcmp.S */ + /* + * Compare two floating-point values. Puts 0, 1, or -1 into the + * destination register based on the results of the comparison. + * + * For: cmpl-float, cmpg-float + */ + /* op vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_FLOAT f0, a2 # f0 <- vBB + GET_VREG_FLOAT f1, a3 # f1 <- vCC + cmp.eq.s f2, f0, f1 + li a0, 0 + bc1nez f2, 1f # done if vBB == vCC (ordered) + .if 0 + cmp.lt.s f2, f0, f1 + li a0, -1 + bc1nez f2, 1f # done if vBB < vCC (ordered) + li a0, 1 # vBB > vCC or unordered + .else + cmp.lt.s f2, f1, f0 + li a0, 1 + bc1nez f2, 1f # done if vBB > vCC (ordered) + li a0, -1 # vBB < vCC or unordered + .endif +1: + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_cmpg_float: /* 0x2e */ +/* File: mips64/op_cmpg_float.S */ +/* File: mips64/fcmp.S */ + /* + * Compare two floating-point values. Puts 0, 1, or -1 into the + * destination register based on the results of the comparison. + * + * For: cmpl-float, cmpg-float + */ + /* op vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_FLOAT f0, a2 # f0 <- vBB + GET_VREG_FLOAT f1, a3 # f1 <- vCC + cmp.eq.s f2, f0, f1 + li a0, 0 + bc1nez f2, 1f # done if vBB == vCC (ordered) + .if 1 + cmp.lt.s f2, f0, f1 + li a0, -1 + bc1nez f2, 1f # done if vBB < vCC (ordered) + li a0, 1 # vBB > vCC or unordered + .else + cmp.lt.s f2, f1, f0 + li a0, 1 + bc1nez f2, 1f # done if vBB > vCC (ordered) + li a0, -1 # vBB < vCC or unordered + .endif +1: + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_cmpl_double: /* 0x2f */ +/* File: mips64/op_cmpl_double.S */ +/* File: mips64/fcmpWide.S */ + /* + * Compare two floating-point values. Puts 0, 1, or -1 into the + * destination register based on the results of the comparison. + * + * For: cmpl-double, cmpg-double + */ + /* op vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_DOUBLE f0, a2 # f0 <- vBB + GET_VREG_DOUBLE f1, a3 # f1 <- vCC + cmp.eq.d f2, f0, f1 + li a0, 0 + bc1nez f2, 1f # done if vBB == vCC (ordered) + .if 0 + cmp.lt.d f2, f0, f1 + li a0, -1 + bc1nez f2, 1f # done if vBB < vCC (ordered) + li a0, 1 # vBB > vCC or unordered + .else + cmp.lt.d f2, f1, f0 + li a0, 1 + bc1nez f2, 1f # done if vBB > vCC (ordered) + li a0, -1 # vBB < vCC or unordered + .endif +1: + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_cmpg_double: /* 0x30 */ +/* File: mips64/op_cmpg_double.S */ +/* File: mips64/fcmpWide.S */ + /* + * Compare two floating-point values. Puts 0, 1, or -1 into the + * destination register based on the results of the comparison. + * + * For: cmpl-double, cmpg-double + */ + /* op vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_DOUBLE f0, a2 # f0 <- vBB + GET_VREG_DOUBLE f1, a3 # f1 <- vCC + cmp.eq.d f2, f0, f1 + li a0, 0 + bc1nez f2, 1f # done if vBB == vCC (ordered) + .if 1 + cmp.lt.d f2, f0, f1 + li a0, -1 + bc1nez f2, 1f # done if vBB < vCC (ordered) + li a0, 1 # vBB > vCC or unordered + .else + cmp.lt.d f2, f1, f0 + li a0, 1 + bc1nez f2, 1f # done if vBB > vCC (ordered) + li a0, -1 # vBB < vCC or unordered + .endif +1: + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_cmp_long: /* 0x31 */ +/* File: mips64/op_cmp_long.S */ + /* cmp-long vAA, vBB, vCC */ + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + srl a4, rINST, 8 # a4 <- AA + GET_VREG_WIDE a0, a2 # a0 <- vBB + GET_VREG_WIDE a1, a3 # a1 <- vCC + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + slt a2, a0, a1 + slt a0, a1, a0 + subu a0, a0, a2 + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a4 # vAA <- result + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_if_eq: /* 0x32 */ +/* File: mips64/op_if_eq.S */ +/* File: mips64/bincmp.S */ + /* + * Generic two-operand compare-and-branch operation. Provide a "condition" + * fragment that specifies the comparison to perform, e.g. for + * "if-le" you would use "le". + * + * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le + */ + /* if-cmp vA, vB, +CCCC */ + .extern MterpProfileBranch + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC) + GET_VREG a0, a2 # a0 <- vA + GET_VREG a1, a3 # a1 <- vB + beqc a0, a1, 1f + li rINST, 2 # offset if branch not taken +1: +#if MTERP_PROFILE_BRANCHES + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rINST + jal MterpProfileBranch # (self, shadow_frame, offset) + bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST +#endif + dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2 + lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue + move a0, rINST # a0 <- offset + FETCH_INST # load rINST + bltz a0, MterpCheckSuspendAndContinue # suspend check if backwards branch + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_if_ne: /* 0x33 */ +/* File: mips64/op_if_ne.S */ +/* File: mips64/bincmp.S */ + /* + * Generic two-operand compare-and-branch operation. Provide a "condition" + * fragment that specifies the comparison to perform, e.g. for + * "if-le" you would use "le". + * + * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le + */ + /* if-cmp vA, vB, +CCCC */ + .extern MterpProfileBranch + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC) + GET_VREG a0, a2 # a0 <- vA + GET_VREG a1, a3 # a1 <- vB + bnec a0, a1, 1f + li rINST, 2 # offset if branch not taken +1: +#if MTERP_PROFILE_BRANCHES + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rINST + jal MterpProfileBranch # (self, shadow_frame, offset) + bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST +#endif + dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2 + lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue + move a0, rINST # a0 <- offset + FETCH_INST # load rINST + bltz a0, MterpCheckSuspendAndContinue # suspend check if backwards branch + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_if_lt: /* 0x34 */ +/* File: mips64/op_if_lt.S */ +/* File: mips64/bincmp.S */ + /* + * Generic two-operand compare-and-branch operation. Provide a "condition" + * fragment that specifies the comparison to perform, e.g. for + * "if-le" you would use "le". + * + * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le + */ + /* if-cmp vA, vB, +CCCC */ + .extern MterpProfileBranch + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC) + GET_VREG a0, a2 # a0 <- vA + GET_VREG a1, a3 # a1 <- vB + bltc a0, a1, 1f + li rINST, 2 # offset if branch not taken +1: +#if MTERP_PROFILE_BRANCHES + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rINST + jal MterpProfileBranch # (self, shadow_frame, offset) + bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST +#endif + dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2 + lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue + move a0, rINST # a0 <- offset + FETCH_INST # load rINST + bltz a0, MterpCheckSuspendAndContinue # suspend check if backwards branch + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_if_ge: /* 0x35 */ +/* File: mips64/op_if_ge.S */ +/* File: mips64/bincmp.S */ + /* + * Generic two-operand compare-and-branch operation. Provide a "condition" + * fragment that specifies the comparison to perform, e.g. for + * "if-le" you would use "le". + * + * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le + */ + /* if-cmp vA, vB, +CCCC */ + .extern MterpProfileBranch + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC) + GET_VREG a0, a2 # a0 <- vA + GET_VREG a1, a3 # a1 <- vB + bgec a0, a1, 1f + li rINST, 2 # offset if branch not taken +1: +#if MTERP_PROFILE_BRANCHES + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rINST + jal MterpProfileBranch # (self, shadow_frame, offset) + bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST +#endif + dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2 + lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue + move a0, rINST # a0 <- offset + FETCH_INST # load rINST + bltz a0, MterpCheckSuspendAndContinue # suspend check if backwards branch + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_if_gt: /* 0x36 */ +/* File: mips64/op_if_gt.S */ +/* File: mips64/bincmp.S */ + /* + * Generic two-operand compare-and-branch operation. Provide a "condition" + * fragment that specifies the comparison to perform, e.g. for + * "if-le" you would use "le". + * + * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le + */ + /* if-cmp vA, vB, +CCCC */ + .extern MterpProfileBranch + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC) + GET_VREG a0, a2 # a0 <- vA + GET_VREG a1, a3 # a1 <- vB + bgtc a0, a1, 1f + li rINST, 2 # offset if branch not taken +1: +#if MTERP_PROFILE_BRANCHES + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rINST + jal MterpProfileBranch # (self, shadow_frame, offset) + bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST +#endif + dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2 + lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue + move a0, rINST # a0 <- offset + FETCH_INST # load rINST + bltz a0, MterpCheckSuspendAndContinue # suspend check if backwards branch + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_if_le: /* 0x37 */ +/* File: mips64/op_if_le.S */ +/* File: mips64/bincmp.S */ + /* + * Generic two-operand compare-and-branch operation. Provide a "condition" + * fragment that specifies the comparison to perform, e.g. for + * "if-le" you would use "le". + * + * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le + */ + /* if-cmp vA, vB, +CCCC */ + .extern MterpProfileBranch + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + lh rINST, 2(rPC) # rINST <- offset (sign-extended CCCC) + GET_VREG a0, a2 # a0 <- vA + GET_VREG a1, a3 # a1 <- vB + blec a0, a1, 1f + li rINST, 2 # offset if branch not taken +1: +#if MTERP_PROFILE_BRANCHES + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rINST + jal MterpProfileBranch # (self, shadow_frame, offset) + bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST +#endif + dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2 + lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue + move a0, rINST # a0 <- offset + FETCH_INST # load rINST + bltz a0, MterpCheckSuspendAndContinue # suspend check if backwards branch + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_if_eqz: /* 0x38 */ +/* File: mips64/op_if_eqz.S */ +/* File: mips64/zcmp.S */ + /* + * Generic one-operand compare-and-branch operation. Provide a "condition" + * fragment that specifies the comparison to perform, e.g. for + * "if-lez" you would use "le". + * + * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez + */ + /* if-cmp vAA, +BBBB */ + .extern MterpProfileBranch + srl a2, rINST, 8 # a2 <- AA + lh rINST, 2(rPC) # rINST <- offset (sign-extended BBBB) + GET_VREG a0, a2 # a0 <- vAA + beqzc a0, 1f + li rINST, 2 # offset if branch not taken +1: +#if MTERP_PROFILE_BRANCHES + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rINST + jal MterpProfileBranch # (self, shadow_frame, offset) + bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST +#endif + dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2 + lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue + move a0, rINST # a0 <- offset + FETCH_INST # load rINST + bltz a0, MterpCheckSuspendAndContinue # suspend check if backwards branch + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_if_nez: /* 0x39 */ +/* File: mips64/op_if_nez.S */ +/* File: mips64/zcmp.S */ + /* + * Generic one-operand compare-and-branch operation. Provide a "condition" + * fragment that specifies the comparison to perform, e.g. for + * "if-lez" you would use "le". + * + * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez + */ + /* if-cmp vAA, +BBBB */ + .extern MterpProfileBranch + srl a2, rINST, 8 # a2 <- AA + lh rINST, 2(rPC) # rINST <- offset (sign-extended BBBB) + GET_VREG a0, a2 # a0 <- vAA + bnezc a0, 1f + li rINST, 2 # offset if branch not taken +1: +#if MTERP_PROFILE_BRANCHES + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rINST + jal MterpProfileBranch # (self, shadow_frame, offset) + bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST +#endif + dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2 + lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue + move a0, rINST # a0 <- offset + FETCH_INST # load rINST + bltz a0, MterpCheckSuspendAndContinue # suspend check if backwards branch + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_if_ltz: /* 0x3a */ +/* File: mips64/op_if_ltz.S */ +/* File: mips64/zcmp.S */ + /* + * Generic one-operand compare-and-branch operation. Provide a "condition" + * fragment that specifies the comparison to perform, e.g. for + * "if-lez" you would use "le". + * + * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez + */ + /* if-cmp vAA, +BBBB */ + .extern MterpProfileBranch + srl a2, rINST, 8 # a2 <- AA + lh rINST, 2(rPC) # rINST <- offset (sign-extended BBBB) + GET_VREG a0, a2 # a0 <- vAA + bltzc a0, 1f + li rINST, 2 # offset if branch not taken +1: +#if MTERP_PROFILE_BRANCHES + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rINST + jal MterpProfileBranch # (self, shadow_frame, offset) + bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST +#endif + dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2 + lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue + move a0, rINST # a0 <- offset + FETCH_INST # load rINST + bltz a0, MterpCheckSuspendAndContinue # suspend check if backwards branch + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_if_gez: /* 0x3b */ +/* File: mips64/op_if_gez.S */ +/* File: mips64/zcmp.S */ + /* + * Generic one-operand compare-and-branch operation. Provide a "condition" + * fragment that specifies the comparison to perform, e.g. for + * "if-lez" you would use "le". + * + * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez + */ + /* if-cmp vAA, +BBBB */ + .extern MterpProfileBranch + srl a2, rINST, 8 # a2 <- AA + lh rINST, 2(rPC) # rINST <- offset (sign-extended BBBB) + GET_VREG a0, a2 # a0 <- vAA + bgezc a0, 1f + li rINST, 2 # offset if branch not taken +1: +#if MTERP_PROFILE_BRANCHES + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rINST + jal MterpProfileBranch # (self, shadow_frame, offset) + bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST +#endif + dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2 + lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue + move a0, rINST # a0 <- offset + FETCH_INST # load rINST + bltz a0, MterpCheckSuspendAndContinue # suspend check if backwards branch + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_if_gtz: /* 0x3c */ +/* File: mips64/op_if_gtz.S */ +/* File: mips64/zcmp.S */ + /* + * Generic one-operand compare-and-branch operation. Provide a "condition" + * fragment that specifies the comparison to perform, e.g. for + * "if-lez" you would use "le". + * + * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez + */ + /* if-cmp vAA, +BBBB */ + .extern MterpProfileBranch + srl a2, rINST, 8 # a2 <- AA + lh rINST, 2(rPC) # rINST <- offset (sign-extended BBBB) + GET_VREG a0, a2 # a0 <- vAA + bgtzc a0, 1f + li rINST, 2 # offset if branch not taken +1: +#if MTERP_PROFILE_BRANCHES + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rINST + jal MterpProfileBranch # (self, shadow_frame, offset) + bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST +#endif + dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2 + lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue + move a0, rINST # a0 <- offset + FETCH_INST # load rINST + bltz a0, MterpCheckSuspendAndContinue # suspend check if backwards branch + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_if_lez: /* 0x3d */ +/* File: mips64/op_if_lez.S */ +/* File: mips64/zcmp.S */ + /* + * Generic one-operand compare-and-branch operation. Provide a "condition" + * fragment that specifies the comparison to perform, e.g. for + * "if-lez" you would use "le". + * + * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez + */ + /* if-cmp vAA, +BBBB */ + .extern MterpProfileBranch + srl a2, rINST, 8 # a2 <- AA + lh rINST, 2(rPC) # rINST <- offset (sign-extended BBBB) + GET_VREG a0, a2 # a0 <- vAA + blezc a0, 1f + li rINST, 2 # offset if branch not taken +1: +#if MTERP_PROFILE_BRANCHES + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rINST + jal MterpProfileBranch # (self, shadow_frame, offset) + bnezc v0, MterpOnStackReplacement # Note: offset must be in rINST +#endif + dlsa rPC, rINST, rPC, 1 # rPC <- rPC + offset * 2 + lw ra, THREAD_FLAGS_OFFSET(rSELF) # Preload flags for MterpCheckSuspendAndContinue + move a0, rINST # a0 <- offset + FETCH_INST # load rINST + bltz a0, MterpCheckSuspendAndContinue # suspend check if backwards branch + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_unused_3e: /* 0x3e */ +/* File: mips64/op_unused_3e.S */ +/* File: mips64/unused.S */ +/* + * Bail to reference interpreter to throw. + */ + b MterpFallback + + +/* ------------------------------ */ + .balign 128 +.L_op_unused_3f: /* 0x3f */ +/* File: mips64/op_unused_3f.S */ +/* File: mips64/unused.S */ +/* + * Bail to reference interpreter to throw. + */ + b MterpFallback + + +/* ------------------------------ */ + .balign 128 +.L_op_unused_40: /* 0x40 */ +/* File: mips64/op_unused_40.S */ +/* File: mips64/unused.S */ +/* + * Bail to reference interpreter to throw. + */ + b MterpFallback + + +/* ------------------------------ */ + .balign 128 +.L_op_unused_41: /* 0x41 */ +/* File: mips64/op_unused_41.S */ +/* File: mips64/unused.S */ +/* + * Bail to reference interpreter to throw. + */ + b MterpFallback + + +/* ------------------------------ */ + .balign 128 +.L_op_unused_42: /* 0x42 */ +/* File: mips64/op_unused_42.S */ +/* File: mips64/unused.S */ +/* + * Bail to reference interpreter to throw. + */ + b MterpFallback + + +/* ------------------------------ */ + .balign 128 +.L_op_unused_43: /* 0x43 */ +/* File: mips64/op_unused_43.S */ +/* File: mips64/unused.S */ +/* + * Bail to reference interpreter to throw. + */ + b MterpFallback + + +/* ------------------------------ */ + .balign 128 +.L_op_aget: /* 0x44 */ +/* File: mips64/op_aget.S */ + /* + * Array get, 32 bits or less. vAA <- vBB[vCC]. + * + * for: aget, aget-boolean, aget-byte, aget-char, aget-short + * + * NOTE: assumes data offset for arrays is the same for all non-wide types. + * If this changes, specialize. + */ + /* op vAA, vBB, vCC */ + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + srl a4, rINST, 8 # a4 <- AA + GET_VREG_U a0, a2 # a0 <- vBB (array object) + GET_VREG a1, a3 # a1 <- vCC (requested index) + beqz a0, common_errNullObject # bail if null array object + lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length + .if 2 + # [d]lsa does not support shift count of 0. + dlsa a0, a1, a0, 2 # a0 <- arrayObj + index*width + .else + daddu a0, a1, a0 # a0 <- arrayObj + index*width + .endif + bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + lw a2, MIRROR_INT_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC] + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a2, a4 # vAA <- a2 + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_aget_wide: /* 0x45 */ +/* File: mips64/op_aget_wide.S */ + /* + * Array get, 64 bits. vAA <- vBB[vCC]. + * + */ + /* aget-wide vAA, vBB, vCC */ + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + srl a4, rINST, 8 # a4 <- AA + GET_VREG_U a0, a2 # a0 <- vBB (array object) + GET_VREG a1, a3 # a1 <- vCC (requested index) + beqz a0, common_errNullObject # bail if null array object + lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length + dlsa a0, a1, a0, 3 # a0 <- arrayObj + index*width + bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + lw a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0) + lw a3, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0) + dinsu a2, a3, 32, 32 # a2 <- vBB[vCC] + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a2, a4 # vAA <- a2 + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_aget_object: /* 0x46 */ +/* File: mips64/op_aget_object.S */ + /* + * Array object get. vAA <- vBB[vCC]. + * + * for: aget-object + */ + /* op vAA, vBB, vCC */ + .extern artAGetObjectFromMterp + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + EXPORT_PC + GET_VREG_U a0, a2 # a0 <- vBB (array object) + GET_VREG a1, a3 # a1 <- vCC (requested index) + jal artAGetObjectFromMterp # (array, index) + ld a1, THREAD_EXCEPTION_OFFSET(rSELF) + srl a4, rINST, 8 # a4 <- AA + PREFETCH_INST 2 + bnez a1, MterpException + SET_VREG_OBJECT v0, a4 # vAA <- v0 + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_aget_boolean: /* 0x47 */ +/* File: mips64/op_aget_boolean.S */ +/* File: mips64/op_aget.S */ + /* + * Array get, 32 bits or less. vAA <- vBB[vCC]. + * + * for: aget, aget-boolean, aget-byte, aget-char, aget-short + * + * NOTE: assumes data offset for arrays is the same for all non-wide types. + * If this changes, specialize. + */ + /* op vAA, vBB, vCC */ + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + srl a4, rINST, 8 # a4 <- AA + GET_VREG_U a0, a2 # a0 <- vBB (array object) + GET_VREG a1, a3 # a1 <- vCC (requested index) + beqz a0, common_errNullObject # bail if null array object + lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length + .if 0 + # [d]lsa does not support shift count of 0. + dlsa a0, a1, a0, 0 # a0 <- arrayObj + index*width + .else + daddu a0, a1, a0 # a0 <- arrayObj + index*width + .endif + bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + lbu a2, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC] + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a2, a4 # vAA <- a2 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_aget_byte: /* 0x48 */ +/* File: mips64/op_aget_byte.S */ +/* File: mips64/op_aget.S */ + /* + * Array get, 32 bits or less. vAA <- vBB[vCC]. + * + * for: aget, aget-boolean, aget-byte, aget-char, aget-short + * + * NOTE: assumes data offset for arrays is the same for all non-wide types. + * If this changes, specialize. + */ + /* op vAA, vBB, vCC */ + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + srl a4, rINST, 8 # a4 <- AA + GET_VREG_U a0, a2 # a0 <- vBB (array object) + GET_VREG a1, a3 # a1 <- vCC (requested index) + beqz a0, common_errNullObject # bail if null array object + lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length + .if 0 + # [d]lsa does not support shift count of 0. + dlsa a0, a1, a0, 0 # a0 <- arrayObj + index*width + .else + daddu a0, a1, a0 # a0 <- arrayObj + index*width + .endif + bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + lb a2, MIRROR_BYTE_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC] + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a2, a4 # vAA <- a2 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_aget_char: /* 0x49 */ +/* File: mips64/op_aget_char.S */ +/* File: mips64/op_aget.S */ + /* + * Array get, 32 bits or less. vAA <- vBB[vCC]. + * + * for: aget, aget-boolean, aget-byte, aget-char, aget-short + * + * NOTE: assumes data offset for arrays is the same for all non-wide types. + * If this changes, specialize. + */ + /* op vAA, vBB, vCC */ + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + srl a4, rINST, 8 # a4 <- AA + GET_VREG_U a0, a2 # a0 <- vBB (array object) + GET_VREG a1, a3 # a1 <- vCC (requested index) + beqz a0, common_errNullObject # bail if null array object + lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length + .if 1 + # [d]lsa does not support shift count of 0. + dlsa a0, a1, a0, 1 # a0 <- arrayObj + index*width + .else + daddu a0, a1, a0 # a0 <- arrayObj + index*width + .endif + bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + lhu a2, MIRROR_CHAR_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC] + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a2, a4 # vAA <- a2 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_aget_short: /* 0x4a */ +/* File: mips64/op_aget_short.S */ +/* File: mips64/op_aget.S */ + /* + * Array get, 32 bits or less. vAA <- vBB[vCC]. + * + * for: aget, aget-boolean, aget-byte, aget-char, aget-short + * + * NOTE: assumes data offset for arrays is the same for all non-wide types. + * If this changes, specialize. + */ + /* op vAA, vBB, vCC */ + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + srl a4, rINST, 8 # a4 <- AA + GET_VREG_U a0, a2 # a0 <- vBB (array object) + GET_VREG a1, a3 # a1 <- vCC (requested index) + beqz a0, common_errNullObject # bail if null array object + lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length + .if 1 + # [d]lsa does not support shift count of 0. + dlsa a0, a1, a0, 1 # a0 <- arrayObj + index*width + .else + daddu a0, a1, a0 # a0 <- arrayObj + index*width + .endif + bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + lh a2, MIRROR_SHORT_ARRAY_DATA_OFFSET(a0) # a2 <- vBB[vCC] + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a2, a4 # vAA <- a2 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_aput: /* 0x4b */ +/* File: mips64/op_aput.S */ + /* + * Array put, 32 bits or less. vBB[vCC] <- vAA. + * + * for: aput, aput-boolean, aput-byte, aput-char, aput-short + * + * NOTE: this assumes data offset for arrays is the same for all non-wide types. + * If this changes, specialize. + */ + /* op vAA, vBB, vCC */ + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + srl a4, rINST, 8 # a4 <- AA + GET_VREG_U a0, a2 # a0 <- vBB (array object) + GET_VREG a1, a3 # a1 <- vCC (requested index) + beqz a0, common_errNullObject # bail if null array object + lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length + .if 2 + # [d]lsa does not support shift count of 0. + dlsa a0, a1, a0, 2 # a0 <- arrayObj + index*width + .else + daddu a0, a1, a0 # a0 <- arrayObj + index*width + .endif + bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_VREG a2, a4 # a2 <- vAA + GET_INST_OPCODE v0 # extract opcode from rINST + sw a2, MIRROR_INT_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2 + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_aput_wide: /* 0x4c */ +/* File: mips64/op_aput_wide.S */ + /* + * Array put, 64 bits. vBB[vCC] <- vAA. + * + */ + /* aput-wide vAA, vBB, vCC */ + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + srl a4, rINST, 8 # a4 <- AA + GET_VREG_U a0, a2 # a0 <- vBB (array object) + GET_VREG a1, a3 # a1 <- vCC (requested index) + beqz a0, common_errNullObject # bail if null array object + lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length + dlsa a0, a1, a0, 3 # a0 <- arrayObj + index*width + bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail + GET_VREG_WIDE a2, a4 # a2 <- vAA + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + sw a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0) + dsrl32 a2, a2, 0 + sw a2, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0) # vBB[vCC] <- a2 + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_aput_object: /* 0x4d */ +/* File: mips64/op_aput_object.S */ + /* + * Store an object into an array. vBB[vCC] <- vAA. + */ + /* op vAA, vBB, vCC */ + .extern MterpAputObject + EXPORT_PC + daddu a0, rFP, OFF_FP_SHADOWFRAME + move a1, rPC + move a2, rINST + jal MterpAputObject + beqzc v0, MterpPossibleException + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_aput_boolean: /* 0x4e */ +/* File: mips64/op_aput_boolean.S */ +/* File: mips64/op_aput.S */ + /* + * Array put, 32 bits or less. vBB[vCC] <- vAA. + * + * for: aput, aput-boolean, aput-byte, aput-char, aput-short + * + * NOTE: this assumes data offset for arrays is the same for all non-wide types. + * If this changes, specialize. + */ + /* op vAA, vBB, vCC */ + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + srl a4, rINST, 8 # a4 <- AA + GET_VREG_U a0, a2 # a0 <- vBB (array object) + GET_VREG a1, a3 # a1 <- vCC (requested index) + beqz a0, common_errNullObject # bail if null array object + lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length + .if 0 + # [d]lsa does not support shift count of 0. + dlsa a0, a1, a0, 0 # a0 <- arrayObj + index*width + .else + daddu a0, a1, a0 # a0 <- arrayObj + index*width + .endif + bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_VREG a2, a4 # a2 <- vAA + GET_INST_OPCODE v0 # extract opcode from rINST + sb a2, MIRROR_BOOLEAN_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_aput_byte: /* 0x4f */ +/* File: mips64/op_aput_byte.S */ +/* File: mips64/op_aput.S */ + /* + * Array put, 32 bits or less. vBB[vCC] <- vAA. + * + * for: aput, aput-boolean, aput-byte, aput-char, aput-short + * + * NOTE: this assumes data offset for arrays is the same for all non-wide types. + * If this changes, specialize. + */ + /* op vAA, vBB, vCC */ + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + srl a4, rINST, 8 # a4 <- AA + GET_VREG_U a0, a2 # a0 <- vBB (array object) + GET_VREG a1, a3 # a1 <- vCC (requested index) + beqz a0, common_errNullObject # bail if null array object + lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length + .if 0 + # [d]lsa does not support shift count of 0. + dlsa a0, a1, a0, 0 # a0 <- arrayObj + index*width + .else + daddu a0, a1, a0 # a0 <- arrayObj + index*width + .endif + bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_VREG a2, a4 # a2 <- vAA + GET_INST_OPCODE v0 # extract opcode from rINST + sb a2, MIRROR_BYTE_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_aput_char: /* 0x50 */ +/* File: mips64/op_aput_char.S */ +/* File: mips64/op_aput.S */ + /* + * Array put, 32 bits or less. vBB[vCC] <- vAA. + * + * for: aput, aput-boolean, aput-byte, aput-char, aput-short + * + * NOTE: this assumes data offset for arrays is the same for all non-wide types. + * If this changes, specialize. + */ + /* op vAA, vBB, vCC */ + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + srl a4, rINST, 8 # a4 <- AA + GET_VREG_U a0, a2 # a0 <- vBB (array object) + GET_VREG a1, a3 # a1 <- vCC (requested index) + beqz a0, common_errNullObject # bail if null array object + lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length + .if 1 + # [d]lsa does not support shift count of 0. + dlsa a0, a1, a0, 1 # a0 <- arrayObj + index*width + .else + daddu a0, a1, a0 # a0 <- arrayObj + index*width + .endif + bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_VREG a2, a4 # a2 <- vAA + GET_INST_OPCODE v0 # extract opcode from rINST + sh a2, MIRROR_CHAR_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_aput_short: /* 0x51 */ +/* File: mips64/op_aput_short.S */ +/* File: mips64/op_aput.S */ + /* + * Array put, 32 bits or less. vBB[vCC] <- vAA. + * + * for: aput, aput-boolean, aput-byte, aput-char, aput-short + * + * NOTE: this assumes data offset for arrays is the same for all non-wide types. + * If this changes, specialize. + */ + /* op vAA, vBB, vCC */ + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + srl a4, rINST, 8 # a4 <- AA + GET_VREG_U a0, a2 # a0 <- vBB (array object) + GET_VREG a1, a3 # a1 <- vCC (requested index) + beqz a0, common_errNullObject # bail if null array object + lw a3, MIRROR_ARRAY_LENGTH_OFFSET(a0) # a3 <- arrayObj->length + .if 1 + # [d]lsa does not support shift count of 0. + dlsa a0, a1, a0, 1 # a0 <- arrayObj + index*width + .else + daddu a0, a1, a0 # a0 <- arrayObj + index*width + .endif + bgeu a1, a3, common_errArrayIndex # unsigned compare: index >= length, bail + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_VREG a2, a4 # a2 <- vAA + GET_INST_OPCODE v0 # extract opcode from rINST + sh a2, MIRROR_SHORT_ARRAY_DATA_OFFSET(a0) # vBB[vCC] <- a2 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_iget: /* 0x52 */ +/* File: mips64/op_iget.S */ + /* + * General instance field get. + * + * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short + */ + .extern artGet32InstanceFromCode + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref CCCC + srl a1, rINST, 12 # a1 <- B + GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer + ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer + move a3, rSELF # a3 <- self + jal artGet32InstanceFromCode + ld a3, THREAD_EXCEPTION_OFFSET(rSELF) + ext a2, rINST, 8, 4 # a2 <- A + PREFETCH_INST 2 + bnez a3, MterpPossibleException # bail out + .if 0 + SET_VREG_OBJECT v0, a2 # fp[A] <- v0 + .else + SET_VREG v0, a2 # fp[A] <- v0 + .endif + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_iget_wide: /* 0x53 */ +/* File: mips64/op_iget_wide.S */ + /* + * 64-bit instance field get. + * + * for: iget-wide + */ + .extern artGet64InstanceFromCode + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref CCCC + srl a1, rINST, 12 # a1 <- B + GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer + ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer + move a3, rSELF # a3 <- self + jal artGet64InstanceFromCode + ld a3, THREAD_EXCEPTION_OFFSET(rSELF) + ext a2, rINST, 8, 4 # a2 <- A + PREFETCH_INST 2 + bnez a3, MterpPossibleException # bail out + SET_VREG_WIDE v0, a2 # fp[A] <- v0 + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_iget_object: /* 0x54 */ +/* File: mips64/op_iget_object.S */ +/* File: mips64/op_iget.S */ + /* + * General instance field get. + * + * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short + */ + .extern artGetObjInstanceFromCode + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref CCCC + srl a1, rINST, 12 # a1 <- B + GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer + ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer + move a3, rSELF # a3 <- self + jal artGetObjInstanceFromCode + ld a3, THREAD_EXCEPTION_OFFSET(rSELF) + ext a2, rINST, 8, 4 # a2 <- A + PREFETCH_INST 2 + bnez a3, MterpPossibleException # bail out + .if 1 + SET_VREG_OBJECT v0, a2 # fp[A] <- v0 + .else + SET_VREG v0, a2 # fp[A] <- v0 + .endif + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_iget_boolean: /* 0x55 */ +/* File: mips64/op_iget_boolean.S */ +/* File: mips64/op_iget.S */ + /* + * General instance field get. + * + * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short + */ + .extern artGetBooleanInstanceFromCode + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref CCCC + srl a1, rINST, 12 # a1 <- B + GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer + ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer + move a3, rSELF # a3 <- self + jal artGetBooleanInstanceFromCode + ld a3, THREAD_EXCEPTION_OFFSET(rSELF) + ext a2, rINST, 8, 4 # a2 <- A + PREFETCH_INST 2 + bnez a3, MterpPossibleException # bail out + .if 0 + SET_VREG_OBJECT v0, a2 # fp[A] <- v0 + .else + SET_VREG v0, a2 # fp[A] <- v0 + .endif + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_iget_byte: /* 0x56 */ +/* File: mips64/op_iget_byte.S */ +/* File: mips64/op_iget.S */ + /* + * General instance field get. + * + * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short + */ + .extern artGetByteInstanceFromCode + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref CCCC + srl a1, rINST, 12 # a1 <- B + GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer + ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer + move a3, rSELF # a3 <- self + jal artGetByteInstanceFromCode + ld a3, THREAD_EXCEPTION_OFFSET(rSELF) + ext a2, rINST, 8, 4 # a2 <- A + PREFETCH_INST 2 + bnez a3, MterpPossibleException # bail out + .if 0 + SET_VREG_OBJECT v0, a2 # fp[A] <- v0 + .else + SET_VREG v0, a2 # fp[A] <- v0 + .endif + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_iget_char: /* 0x57 */ +/* File: mips64/op_iget_char.S */ +/* File: mips64/op_iget.S */ + /* + * General instance field get. + * + * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short + */ + .extern artGetCharInstanceFromCode + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref CCCC + srl a1, rINST, 12 # a1 <- B + GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer + ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer + move a3, rSELF # a3 <- self + jal artGetCharInstanceFromCode + ld a3, THREAD_EXCEPTION_OFFSET(rSELF) + ext a2, rINST, 8, 4 # a2 <- A + PREFETCH_INST 2 + bnez a3, MterpPossibleException # bail out + .if 0 + SET_VREG_OBJECT v0, a2 # fp[A] <- v0 + .else + SET_VREG v0, a2 # fp[A] <- v0 + .endif + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_iget_short: /* 0x58 */ +/* File: mips64/op_iget_short.S */ +/* File: mips64/op_iget.S */ + /* + * General instance field get. + * + * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short + */ + .extern artGetShortInstanceFromCode + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref CCCC + srl a1, rINST, 12 # a1 <- B + GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer + ld a2, OFF_FP_METHOD(rFP) # a2 <- referrer + move a3, rSELF # a3 <- self + jal artGetShortInstanceFromCode + ld a3, THREAD_EXCEPTION_OFFSET(rSELF) + ext a2, rINST, 8, 4 # a2 <- A + PREFETCH_INST 2 + bnez a3, MterpPossibleException # bail out + .if 0 + SET_VREG_OBJECT v0, a2 # fp[A] <- v0 + .else + SET_VREG v0, a2 # fp[A] <- v0 + .endif + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_iput: /* 0x59 */ +/* File: mips64/op_iput.S */ + /* + * General 32-bit instance field put. + * + * for: iput, iput-boolean, iput-byte, iput-char, iput-short + */ + /* op vA, vB, field//CCCC */ + .extern artSet32InstanceFromMterp + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref CCCC + srl a1, rINST, 12 # a1 <- B + GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer + ext a2, rINST, 8, 4 # a2 <- A + GET_VREG a2, a2 # a2 <- fp[A] + ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer + PREFETCH_INST 2 + jal artSet32InstanceFromMterp + bnez v0, MterpPossibleException # bail out + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_iput_wide: /* 0x5a */ +/* File: mips64/op_iput_wide.S */ + /* iput-wide vA, vB, field//CCCC */ + .extern artSet64InstanceFromMterp + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref CCCC + srl a1, rINST, 12 # a1 <- B + GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer + ext a2, rINST, 8, 4 # a2 <- A + dlsa a2, a2, rFP, 2 # a2 <- &fp[A] + ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer + PREFETCH_INST 2 + jal artSet64InstanceFromMterp + bnez v0, MterpPossibleException # bail out + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_iput_object: /* 0x5b */ +/* File: mips64/op_iput_object.S */ + .extern MterpIputObject + EXPORT_PC + daddu a0, rFP, OFF_FP_SHADOWFRAME + move a1, rPC + move a2, rINST + move a3, rSELF + jal MterpIputObject + beqzc v0, MterpException + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_iput_boolean: /* 0x5c */ +/* File: mips64/op_iput_boolean.S */ +/* File: mips64/op_iput.S */ + /* + * General 32-bit instance field put. + * + * for: iput, iput-boolean, iput-byte, iput-char, iput-short + */ + /* op vA, vB, field//CCCC */ + .extern artSet8InstanceFromMterp + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref CCCC + srl a1, rINST, 12 # a1 <- B + GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer + ext a2, rINST, 8, 4 # a2 <- A + GET_VREG a2, a2 # a2 <- fp[A] + ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer + PREFETCH_INST 2 + jal artSet8InstanceFromMterp + bnez v0, MterpPossibleException # bail out + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_iput_byte: /* 0x5d */ +/* File: mips64/op_iput_byte.S */ +/* File: mips64/op_iput.S */ + /* + * General 32-bit instance field put. + * + * for: iput, iput-boolean, iput-byte, iput-char, iput-short + */ + /* op vA, vB, field//CCCC */ + .extern artSet8InstanceFromMterp + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref CCCC + srl a1, rINST, 12 # a1 <- B + GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer + ext a2, rINST, 8, 4 # a2 <- A + GET_VREG a2, a2 # a2 <- fp[A] + ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer + PREFETCH_INST 2 + jal artSet8InstanceFromMterp + bnez v0, MterpPossibleException # bail out + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_iput_char: /* 0x5e */ +/* File: mips64/op_iput_char.S */ +/* File: mips64/op_iput.S */ + /* + * General 32-bit instance field put. + * + * for: iput, iput-boolean, iput-byte, iput-char, iput-short + */ + /* op vA, vB, field//CCCC */ + .extern artSet16InstanceFromMterp + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref CCCC + srl a1, rINST, 12 # a1 <- B + GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer + ext a2, rINST, 8, 4 # a2 <- A + GET_VREG a2, a2 # a2 <- fp[A] + ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer + PREFETCH_INST 2 + jal artSet16InstanceFromMterp + bnez v0, MterpPossibleException # bail out + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_iput_short: /* 0x5f */ +/* File: mips64/op_iput_short.S */ +/* File: mips64/op_iput.S */ + /* + * General 32-bit instance field put. + * + * for: iput, iput-boolean, iput-byte, iput-char, iput-short + */ + /* op vA, vB, field//CCCC */ + .extern artSet16InstanceFromMterp + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref CCCC + srl a1, rINST, 12 # a1 <- B + GET_VREG_U a1, a1 # a1 <- fp[B], the object pointer + ext a2, rINST, 8, 4 # a2 <- A + GET_VREG a2, a2 # a2 <- fp[A] + ld a3, OFF_FP_METHOD(rFP) # a3 <- referrer + PREFETCH_INST 2 + jal artSet16InstanceFromMterp + bnez v0, MterpPossibleException # bail out + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_sget: /* 0x60 */ +/* File: mips64/op_sget.S */ + /* + * General SGET handler wrapper. + * + * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short + */ + /* op vAA, field//BBBB */ + .extern artGet32StaticFromCode + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref BBBB + ld a1, OFF_FP_METHOD(rFP) + move a2, rSELF + jal artGet32StaticFromCode + ld a3, THREAD_EXCEPTION_OFFSET(rSELF) + srl a2, rINST, 8 # a2 <- AA + + PREFETCH_INST 2 + bnez a3, MterpException # bail out + .if 0 + SET_VREG_OBJECT v0, a2 # fp[AA] <- v0 + .else + SET_VREG v0, a2 # fp[AA] <- v0 + .endif + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 + +/* ------------------------------ */ + .balign 128 +.L_op_sget_wide: /* 0x61 */ +/* File: mips64/op_sget_wide.S */ + /* + * SGET_WIDE handler wrapper. + * + */ + /* sget-wide vAA, field//BBBB */ + .extern artGet64StaticFromCode + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref BBBB + ld a1, OFF_FP_METHOD(rFP) + move a2, rSELF + jal artGet64StaticFromCode + ld a3, THREAD_EXCEPTION_OFFSET(rSELF) + srl a4, rINST, 8 # a4 <- AA + bnez a3, MterpException # bail out + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + SET_VREG_WIDE v0, a4 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_sget_object: /* 0x62 */ +/* File: mips64/op_sget_object.S */ +/* File: mips64/op_sget.S */ + /* + * General SGET handler wrapper. + * + * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short + */ + /* op vAA, field//BBBB */ + .extern artGetObjStaticFromCode + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref BBBB + ld a1, OFF_FP_METHOD(rFP) + move a2, rSELF + jal artGetObjStaticFromCode + ld a3, THREAD_EXCEPTION_OFFSET(rSELF) + srl a2, rINST, 8 # a2 <- AA + + PREFETCH_INST 2 + bnez a3, MterpException # bail out + .if 1 + SET_VREG_OBJECT v0, a2 # fp[AA] <- v0 + .else + SET_VREG v0, a2 # fp[AA] <- v0 + .endif + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 + + +/* ------------------------------ */ + .balign 128 +.L_op_sget_boolean: /* 0x63 */ +/* File: mips64/op_sget_boolean.S */ +/* File: mips64/op_sget.S */ + /* + * General SGET handler wrapper. + * + * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short + */ + /* op vAA, field//BBBB */ + .extern artGetBooleanStaticFromCode + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref BBBB + ld a1, OFF_FP_METHOD(rFP) + move a2, rSELF + jal artGetBooleanStaticFromCode + ld a3, THREAD_EXCEPTION_OFFSET(rSELF) + srl a2, rINST, 8 # a2 <- AA + and v0, v0, 0xff + PREFETCH_INST 2 + bnez a3, MterpException # bail out + .if 0 + SET_VREG_OBJECT v0, a2 # fp[AA] <- v0 + .else + SET_VREG v0, a2 # fp[AA] <- v0 + .endif + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 + + +/* ------------------------------ */ + .balign 128 +.L_op_sget_byte: /* 0x64 */ +/* File: mips64/op_sget_byte.S */ +/* File: mips64/op_sget.S */ + /* + * General SGET handler wrapper. + * + * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short + */ + /* op vAA, field//BBBB */ + .extern artGetByteStaticFromCode + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref BBBB + ld a1, OFF_FP_METHOD(rFP) + move a2, rSELF + jal artGetByteStaticFromCode + ld a3, THREAD_EXCEPTION_OFFSET(rSELF) + srl a2, rINST, 8 # a2 <- AA + seb v0, v0 + PREFETCH_INST 2 + bnez a3, MterpException # bail out + .if 0 + SET_VREG_OBJECT v0, a2 # fp[AA] <- v0 + .else + SET_VREG v0, a2 # fp[AA] <- v0 + .endif + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 + + +/* ------------------------------ */ + .balign 128 +.L_op_sget_char: /* 0x65 */ +/* File: mips64/op_sget_char.S */ +/* File: mips64/op_sget.S */ + /* + * General SGET handler wrapper. + * + * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short + */ + /* op vAA, field//BBBB */ + .extern artGetCharStaticFromCode + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref BBBB + ld a1, OFF_FP_METHOD(rFP) + move a2, rSELF + jal artGetCharStaticFromCode + ld a3, THREAD_EXCEPTION_OFFSET(rSELF) + srl a2, rINST, 8 # a2 <- AA + and v0, v0, 0xffff + PREFETCH_INST 2 + bnez a3, MterpException # bail out + .if 0 + SET_VREG_OBJECT v0, a2 # fp[AA] <- v0 + .else + SET_VREG v0, a2 # fp[AA] <- v0 + .endif + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 + + +/* ------------------------------ */ + .balign 128 +.L_op_sget_short: /* 0x66 */ +/* File: mips64/op_sget_short.S */ +/* File: mips64/op_sget.S */ + /* + * General SGET handler wrapper. + * + * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short + */ + /* op vAA, field//BBBB */ + .extern artGetShortStaticFromCode + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref BBBB + ld a1, OFF_FP_METHOD(rFP) + move a2, rSELF + jal artGetShortStaticFromCode + ld a3, THREAD_EXCEPTION_OFFSET(rSELF) + srl a2, rINST, 8 # a2 <- AA + seh v0, v0 + PREFETCH_INST 2 + bnez a3, MterpException # bail out + .if 0 + SET_VREG_OBJECT v0, a2 # fp[AA] <- v0 + .else + SET_VREG v0, a2 # fp[AA] <- v0 + .endif + ADVANCE 2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 + + +/* ------------------------------ */ + .balign 128 +.L_op_sput: /* 0x67 */ +/* File: mips64/op_sput.S */ + /* + * General SPUT handler wrapper. + * + * for: sput, sput-boolean, sput-byte, sput-char, sput-short + */ + /* op vAA, field//BBBB */ + .extern artSet32StaticFromCode + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref BBBB + srl a3, rINST, 8 # a3 <- AA + GET_VREG a1, a3 # a1 <- fp[AA] + ld a2, OFF_FP_METHOD(rFP) + move a3, rSELF + PREFETCH_INST 2 # Get next inst, but don't advance rPC + jal artSet32StaticFromCode + bnezc v0, MterpException # 0 on success + ADVANCE 2 # Past exception point - now advance rPC + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_sput_wide: /* 0x68 */ +/* File: mips64/op_sput_wide.S */ + /* + * SPUT_WIDE handler wrapper. + * + */ + /* sput-wide vAA, field//BBBB */ + .extern artSet64IndirectStaticFromMterp + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref BBBB + ld a1, OFF_FP_METHOD(rFP) + srl a2, rINST, 8 # a2 <- AA + dlsa a2, a2, rFP, 2 + move a3, rSELF + PREFETCH_INST 2 # Get next inst, but don't advance rPC + jal artSet64IndirectStaticFromMterp + bnezc v0, MterpException # 0 on success, -1 on failure + ADVANCE 2 # Past exception point - now advance rPC + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_sput_object: /* 0x69 */ +/* File: mips64/op_sput_object.S */ + .extern MterpSputObject + EXPORT_PC + daddu a0, rFP, OFF_FP_SHADOWFRAME + move a1, rPC + move a2, rINST + move a3, rSELF + jal MterpSputObject + beqzc v0, MterpException + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_sput_boolean: /* 0x6a */ +/* File: mips64/op_sput_boolean.S */ +/* File: mips64/op_sput.S */ + /* + * General SPUT handler wrapper. + * + * for: sput, sput-boolean, sput-byte, sput-char, sput-short + */ + /* op vAA, field//BBBB */ + .extern artSet8StaticFromCode + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref BBBB + srl a3, rINST, 8 # a3 <- AA + GET_VREG a1, a3 # a1 <- fp[AA] + ld a2, OFF_FP_METHOD(rFP) + move a3, rSELF + PREFETCH_INST 2 # Get next inst, but don't advance rPC + jal artSet8StaticFromCode + bnezc v0, MterpException # 0 on success + ADVANCE 2 # Past exception point - now advance rPC + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_sput_byte: /* 0x6b */ +/* File: mips64/op_sput_byte.S */ +/* File: mips64/op_sput.S */ + /* + * General SPUT handler wrapper. + * + * for: sput, sput-boolean, sput-byte, sput-char, sput-short + */ + /* op vAA, field//BBBB */ + .extern artSet8StaticFromCode + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref BBBB + srl a3, rINST, 8 # a3 <- AA + GET_VREG a1, a3 # a1 <- fp[AA] + ld a2, OFF_FP_METHOD(rFP) + move a3, rSELF + PREFETCH_INST 2 # Get next inst, but don't advance rPC + jal artSet8StaticFromCode + bnezc v0, MterpException # 0 on success + ADVANCE 2 # Past exception point - now advance rPC + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_sput_char: /* 0x6c */ +/* File: mips64/op_sput_char.S */ +/* File: mips64/op_sput.S */ + /* + * General SPUT handler wrapper. + * + * for: sput, sput-boolean, sput-byte, sput-char, sput-short + */ + /* op vAA, field//BBBB */ + .extern artSet16StaticFromCode + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref BBBB + srl a3, rINST, 8 # a3 <- AA + GET_VREG a1, a3 # a1 <- fp[AA] + ld a2, OFF_FP_METHOD(rFP) + move a3, rSELF + PREFETCH_INST 2 # Get next inst, but don't advance rPC + jal artSet16StaticFromCode + bnezc v0, MterpException # 0 on success + ADVANCE 2 # Past exception point - now advance rPC + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_sput_short: /* 0x6d */ +/* File: mips64/op_sput_short.S */ +/* File: mips64/op_sput.S */ + /* + * General SPUT handler wrapper. + * + * for: sput, sput-boolean, sput-byte, sput-char, sput-short + */ + /* op vAA, field//BBBB */ + .extern artSet16StaticFromCode + EXPORT_PC + lhu a0, 2(rPC) # a0 <- field ref BBBB + srl a3, rINST, 8 # a3 <- AA + GET_VREG a1, a3 # a1 <- fp[AA] + ld a2, OFF_FP_METHOD(rFP) + move a3, rSELF + PREFETCH_INST 2 # Get next inst, but don't advance rPC + jal artSet16StaticFromCode + bnezc v0, MterpException # 0 on success + ADVANCE 2 # Past exception point - now advance rPC + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_invoke_virtual: /* 0x6e */ +/* File: mips64/op_invoke_virtual.S */ +/* File: mips64/invoke.S */ + /* + * Generic invoke handler wrapper. + */ + /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ + /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ + .extern MterpInvokeVirtual + .extern MterpShouldSwitchInterpreters + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rPC + move a3, rINST + jal MterpInvokeVirtual + beqzc v0, MterpException + FETCH_ADVANCE_INST 3 + jal MterpShouldSwitchInterpreters + bnezc v0, MterpFallback + GET_INST_OPCODE v0 + GOTO_OPCODE v0 + + /* + * Handle a virtual method call. + * + * for: invoke-virtual, invoke-virtual/range + */ + /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ + /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ + +/* ------------------------------ */ + .balign 128 +.L_op_invoke_super: /* 0x6f */ +/* File: mips64/op_invoke_super.S */ +/* File: mips64/invoke.S */ + /* + * Generic invoke handler wrapper. + */ + /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ + /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ + .extern MterpInvokeSuper + .extern MterpShouldSwitchInterpreters + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rPC + move a3, rINST + jal MterpInvokeSuper + beqzc v0, MterpException + FETCH_ADVANCE_INST 3 + jal MterpShouldSwitchInterpreters + bnezc v0, MterpFallback + GET_INST_OPCODE v0 + GOTO_OPCODE v0 + + /* + * Handle a "super" method call. + * + * for: invoke-super, invoke-super/range + */ + /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ + /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ + +/* ------------------------------ */ + .balign 128 +.L_op_invoke_direct: /* 0x70 */ +/* File: mips64/op_invoke_direct.S */ +/* File: mips64/invoke.S */ + /* + * Generic invoke handler wrapper. + */ + /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ + /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ + .extern MterpInvokeDirect + .extern MterpShouldSwitchInterpreters + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rPC + move a3, rINST + jal MterpInvokeDirect + beqzc v0, MterpException + FETCH_ADVANCE_INST 3 + jal MterpShouldSwitchInterpreters + bnezc v0, MterpFallback + GET_INST_OPCODE v0 + GOTO_OPCODE v0 + + +/* ------------------------------ */ + .balign 128 +.L_op_invoke_static: /* 0x71 */ +/* File: mips64/op_invoke_static.S */ +/* File: mips64/invoke.S */ + /* + * Generic invoke handler wrapper. + */ + /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ + /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ + .extern MterpInvokeStatic + .extern MterpShouldSwitchInterpreters + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rPC + move a3, rINST + jal MterpInvokeStatic + beqzc v0, MterpException + FETCH_ADVANCE_INST 3 + jal MterpShouldSwitchInterpreters + bnezc v0, MterpFallback + GET_INST_OPCODE v0 + GOTO_OPCODE v0 + + +/* ------------------------------ */ + .balign 128 +.L_op_invoke_interface: /* 0x72 */ +/* File: mips64/op_invoke_interface.S */ +/* File: mips64/invoke.S */ + /* + * Generic invoke handler wrapper. + */ + /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ + /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ + .extern MterpInvokeInterface + .extern MterpShouldSwitchInterpreters + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rPC + move a3, rINST + jal MterpInvokeInterface + beqzc v0, MterpException + FETCH_ADVANCE_INST 3 + jal MterpShouldSwitchInterpreters + bnezc v0, MterpFallback + GET_INST_OPCODE v0 + GOTO_OPCODE v0 + + /* + * Handle an interface method call. + * + * for: invoke-interface, invoke-interface/range + */ + /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ + /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ + +/* ------------------------------ */ + .balign 128 +.L_op_return_void_no_barrier: /* 0x73 */ +/* File: mips64/op_return_void_no_barrier.S */ + .extern MterpSuspendCheck + lw ra, THREAD_FLAGS_OFFSET(rSELF) + move a0, rSELF + and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + beqzc ra, 1f + jal MterpSuspendCheck # (self) +1: + li a0, 0 + b MterpReturn + +/* ------------------------------ */ + .balign 128 +.L_op_invoke_virtual_range: /* 0x74 */ +/* File: mips64/op_invoke_virtual_range.S */ +/* File: mips64/invoke.S */ + /* + * Generic invoke handler wrapper. + */ + /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ + /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ + .extern MterpInvokeVirtualRange + .extern MterpShouldSwitchInterpreters + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rPC + move a3, rINST + jal MterpInvokeVirtualRange + beqzc v0, MterpException + FETCH_ADVANCE_INST 3 + jal MterpShouldSwitchInterpreters + bnezc v0, MterpFallback + GET_INST_OPCODE v0 + GOTO_OPCODE v0 + + +/* ------------------------------ */ + .balign 128 +.L_op_invoke_super_range: /* 0x75 */ +/* File: mips64/op_invoke_super_range.S */ +/* File: mips64/invoke.S */ + /* + * Generic invoke handler wrapper. + */ + /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ + /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ + .extern MterpInvokeSuperRange + .extern MterpShouldSwitchInterpreters + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rPC + move a3, rINST + jal MterpInvokeSuperRange + beqzc v0, MterpException + FETCH_ADVANCE_INST 3 + jal MterpShouldSwitchInterpreters + bnezc v0, MterpFallback + GET_INST_OPCODE v0 + GOTO_OPCODE v0 + + +/* ------------------------------ */ + .balign 128 +.L_op_invoke_direct_range: /* 0x76 */ +/* File: mips64/op_invoke_direct_range.S */ +/* File: mips64/invoke.S */ + /* + * Generic invoke handler wrapper. + */ + /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ + /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ + .extern MterpInvokeDirectRange + .extern MterpShouldSwitchInterpreters + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rPC + move a3, rINST + jal MterpInvokeDirectRange + beqzc v0, MterpException + FETCH_ADVANCE_INST 3 + jal MterpShouldSwitchInterpreters + bnezc v0, MterpFallback + GET_INST_OPCODE v0 + GOTO_OPCODE v0 + + +/* ------------------------------ */ + .balign 128 +.L_op_invoke_static_range: /* 0x77 */ +/* File: mips64/op_invoke_static_range.S */ +/* File: mips64/invoke.S */ + /* + * Generic invoke handler wrapper. + */ + /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ + /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ + .extern MterpInvokeStaticRange + .extern MterpShouldSwitchInterpreters + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rPC + move a3, rINST + jal MterpInvokeStaticRange + beqzc v0, MterpException + FETCH_ADVANCE_INST 3 + jal MterpShouldSwitchInterpreters + bnezc v0, MterpFallback + GET_INST_OPCODE v0 + GOTO_OPCODE v0 + + +/* ------------------------------ */ + .balign 128 +.L_op_invoke_interface_range: /* 0x78 */ +/* File: mips64/op_invoke_interface_range.S */ +/* File: mips64/invoke.S */ + /* + * Generic invoke handler wrapper. + */ + /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ + /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ + .extern MterpInvokeInterfaceRange + .extern MterpShouldSwitchInterpreters + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rPC + move a3, rINST + jal MterpInvokeInterfaceRange + beqzc v0, MterpException + FETCH_ADVANCE_INST 3 + jal MterpShouldSwitchInterpreters + bnezc v0, MterpFallback + GET_INST_OPCODE v0 + GOTO_OPCODE v0 + + +/* ------------------------------ */ + .balign 128 +.L_op_unused_79: /* 0x79 */ +/* File: mips64/op_unused_79.S */ +/* File: mips64/unused.S */ +/* + * Bail to reference interpreter to throw. + */ + b MterpFallback + + +/* ------------------------------ */ + .balign 128 +.L_op_unused_7a: /* 0x7a */ +/* File: mips64/op_unused_7a.S */ +/* File: mips64/unused.S */ +/* + * Bail to reference interpreter to throw. + */ + b MterpFallback + + +/* ------------------------------ */ + .balign 128 +.L_op_neg_int: /* 0x7b */ +/* File: mips64/op_neg_int.S */ +/* File: mips64/unop.S */ + /* + * Generic 32-bit unary operation. Provide an "instr" line that + * specifies an instruction that performs "a0 = op a0". + * + * for: int-to-byte, int-to-char, int-to-short, + * not-int, neg-int + */ + /* unop vA, vB */ + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a3 # a0 <- vB + ext a2, rINST, 8, 4 # a2 <- A + # optional op + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + subu a0, zero, a0 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_not_int: /* 0x7c */ +/* File: mips64/op_not_int.S */ +/* File: mips64/unop.S */ + /* + * Generic 32-bit unary operation. Provide an "instr" line that + * specifies an instruction that performs "a0 = op a0". + * + * for: int-to-byte, int-to-char, int-to-short, + * not-int, neg-int + */ + /* unop vA, vB */ + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a3 # a0 <- vB + ext a2, rINST, 8, 4 # a2 <- A + # optional op + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + nor a0, zero, a0 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_neg_long: /* 0x7d */ +/* File: mips64/op_neg_long.S */ +/* File: mips64/unopWide.S */ + /* + * Generic 64-bit unary operation. Provide an "instr" line that + * specifies an instruction that performs "a0 = op a0". + * + * For: not-long, neg-long + */ + /* unop vA, vB */ + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_WIDE a0, a3 # a0 <- vB + ext a2, rINST, 8, 4 # a2 <- A + # optional op + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + dsubu a0, zero, a0 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_not_long: /* 0x7e */ +/* File: mips64/op_not_long.S */ +/* File: mips64/unopWide.S */ + /* + * Generic 64-bit unary operation. Provide an "instr" line that + * specifies an instruction that performs "a0 = op a0". + * + * For: not-long, neg-long + */ + /* unop vA, vB */ + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_WIDE a0, a3 # a0 <- vB + ext a2, rINST, 8, 4 # a2 <- A + # optional op + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + nor a0, zero, a0 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_neg_float: /* 0x7f */ +/* File: mips64/op_neg_float.S */ +/* File: mips64/fcvtHeader.S */ + /* + * Loads a specified register from vB. Used primarily for conversions + * from or to a floating-point type. + * + * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to + * store the result in vA and jump to the next instruction. + * + * For: int-to-float, int-to-double, long-to-float, long-to-double, + * float-to-int, float-to-long, float-to-double, double-to-int, + * double-to-long, double-to-float, neg-float, neg-double. + */ + ext a1, rINST, 8, 4 # a1 <- A + srl a2, rINST, 12 # a2 <- B + GET_VREG_FLOAT f0, a2 + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + + neg.s f0, f0 +/* File: mips64/fcvtFooter.S */ + /* + * Stores a specified register containing the result of conversion + * from or to a floating-point type and jumps to the next instruction. + * + * Expects a1 to contain the destination Dalvik register number. + * a1 is set up by fcvtHeader.S. + * + * For: int-to-float, int-to-double, long-to-float, long-to-double, + * float-to-int, float-to-long, float-to-double, double-to-int, + * double-to-long, double-to-float, neg-float, neg-double. + * + * Note that this file can't be included after a break in other files + * and in those files its contents appear as a copy. + * See: float-to-int, float-to-long, double-to-int, double-to-long. + */ + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_FLOAT f0, a1 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_neg_double: /* 0x80 */ +/* File: mips64/op_neg_double.S */ +/* File: mips64/fcvtHeader.S */ + /* + * Loads a specified register from vB. Used primarily for conversions + * from or to a floating-point type. + * + * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to + * store the result in vA and jump to the next instruction. + * + * For: int-to-float, int-to-double, long-to-float, long-to-double, + * float-to-int, float-to-long, float-to-double, double-to-int, + * double-to-long, double-to-float, neg-float, neg-double. + */ + ext a1, rINST, 8, 4 # a1 <- A + srl a2, rINST, 12 # a2 <- B + GET_VREG_DOUBLE f0, a2 + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + + neg.d f0, f0 +/* File: mips64/fcvtFooter.S */ + /* + * Stores a specified register containing the result of conversion + * from or to a floating-point type and jumps to the next instruction. + * + * Expects a1 to contain the destination Dalvik register number. + * a1 is set up by fcvtHeader.S. + * + * For: int-to-float, int-to-double, long-to-float, long-to-double, + * float-to-int, float-to-long, float-to-double, double-to-int, + * double-to-long, double-to-float, neg-float, neg-double. + * + * Note that this file can't be included after a break in other files + * and in those files its contents appear as a copy. + * See: float-to-int, float-to-long, double-to-int, double-to-long. + */ + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_DOUBLE f0, a1 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_int_to_long: /* 0x81 */ +/* File: mips64/op_int_to_long.S */ + /* int-to-long vA, vB */ + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a3 # a0 <- vB (sign-extended to 64 bits) + ext a2, rINST, 8, 4 # a2 <- A + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vA <- vB + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_int_to_float: /* 0x82 */ +/* File: mips64/op_int_to_float.S */ + /* + * Conversion from or to floating-point happens in a floating-point register. + * Therefore we load the input and store the output into or from a + * floating-point register irrespective of the type. + */ +/* File: mips64/fcvtHeader.S */ + /* + * Loads a specified register from vB. Used primarily for conversions + * from or to a floating-point type. + * + * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to + * store the result in vA and jump to the next instruction. + * + * For: int-to-float, int-to-double, long-to-float, long-to-double, + * float-to-int, float-to-long, float-to-double, double-to-int, + * double-to-long, double-to-float, neg-float, neg-double. + */ + ext a1, rINST, 8, 4 # a1 <- A + srl a2, rINST, 12 # a2 <- B + GET_VREG_FLOAT f0, a2 + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + + cvt.s.w f0, f0 +/* File: mips64/fcvtFooter.S */ + /* + * Stores a specified register containing the result of conversion + * from or to a floating-point type and jumps to the next instruction. + * + * Expects a1 to contain the destination Dalvik register number. + * a1 is set up by fcvtHeader.S. + * + * For: int-to-float, int-to-double, long-to-float, long-to-double, + * float-to-int, float-to-long, float-to-double, double-to-int, + * double-to-long, double-to-float, neg-float, neg-double. + * + * Note that this file can't be included after a break in other files + * and in those files its contents appear as a copy. + * See: float-to-int, float-to-long, double-to-int, double-to-long. + */ + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_FLOAT f0, a1 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_int_to_double: /* 0x83 */ +/* File: mips64/op_int_to_double.S */ + /* + * Conversion from or to floating-point happens in a floating-point register. + * Therefore we load the input and store the output into or from a + * floating-point register irrespective of the type. + */ +/* File: mips64/fcvtHeader.S */ + /* + * Loads a specified register from vB. Used primarily for conversions + * from or to a floating-point type. + * + * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to + * store the result in vA and jump to the next instruction. + * + * For: int-to-float, int-to-double, long-to-float, long-to-double, + * float-to-int, float-to-long, float-to-double, double-to-int, + * double-to-long, double-to-float, neg-float, neg-double. + */ + ext a1, rINST, 8, 4 # a1 <- A + srl a2, rINST, 12 # a2 <- B + GET_VREG_FLOAT f0, a2 + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + + cvt.d.w f0, f0 +/* File: mips64/fcvtFooter.S */ + /* + * Stores a specified register containing the result of conversion + * from or to a floating-point type and jumps to the next instruction. + * + * Expects a1 to contain the destination Dalvik register number. + * a1 is set up by fcvtHeader.S. + * + * For: int-to-float, int-to-double, long-to-float, long-to-double, + * float-to-int, float-to-long, float-to-double, double-to-int, + * double-to-long, double-to-float, neg-float, neg-double. + * + * Note that this file can't be included after a break in other files + * and in those files its contents appear as a copy. + * See: float-to-int, float-to-long, double-to-int, double-to-long. + */ + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_DOUBLE f0, a1 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_long_to_int: /* 0x84 */ +/* File: mips64/op_long_to_int.S */ +/* we ignore the high word, making this equivalent to a 32-bit reg move */ +/* File: mips64/op_move.S */ + /* for move, move-object, long-to-int */ + /* op vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + GET_VREG a0, a3 # a0 <- vB + GET_INST_OPCODE v0 # extract opcode from rINST + .if 0 + SET_VREG_OBJECT a0, a2 # vA <- vB + .else + SET_VREG a0, a2 # vA <- vB + .endif + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_long_to_float: /* 0x85 */ +/* File: mips64/op_long_to_float.S */ + /* + * Conversion from or to floating-point happens in a floating-point register. + * Therefore we load the input and store the output into or from a + * floating-point register irrespective of the type. + */ +/* File: mips64/fcvtHeader.S */ + /* + * Loads a specified register from vB. Used primarily for conversions + * from or to a floating-point type. + * + * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to + * store the result in vA and jump to the next instruction. + * + * For: int-to-float, int-to-double, long-to-float, long-to-double, + * float-to-int, float-to-long, float-to-double, double-to-int, + * double-to-long, double-to-float, neg-float, neg-double. + */ + ext a1, rINST, 8, 4 # a1 <- A + srl a2, rINST, 12 # a2 <- B + GET_VREG_DOUBLE f0, a2 + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + + cvt.s.l f0, f0 +/* File: mips64/fcvtFooter.S */ + /* + * Stores a specified register containing the result of conversion + * from or to a floating-point type and jumps to the next instruction. + * + * Expects a1 to contain the destination Dalvik register number. + * a1 is set up by fcvtHeader.S. + * + * For: int-to-float, int-to-double, long-to-float, long-to-double, + * float-to-int, float-to-long, float-to-double, double-to-int, + * double-to-long, double-to-float, neg-float, neg-double. + * + * Note that this file can't be included after a break in other files + * and in those files its contents appear as a copy. + * See: float-to-int, float-to-long, double-to-int, double-to-long. + */ + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_FLOAT f0, a1 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_long_to_double: /* 0x86 */ +/* File: mips64/op_long_to_double.S */ + /* + * Conversion from or to floating-point happens in a floating-point register. + * Therefore we load the input and store the output into or from a + * floating-point register irrespective of the type. + */ +/* File: mips64/fcvtHeader.S */ + /* + * Loads a specified register from vB. Used primarily for conversions + * from or to a floating-point type. + * + * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to + * store the result in vA and jump to the next instruction. + * + * For: int-to-float, int-to-double, long-to-float, long-to-double, + * float-to-int, float-to-long, float-to-double, double-to-int, + * double-to-long, double-to-float, neg-float, neg-double. + */ + ext a1, rINST, 8, 4 # a1 <- A + srl a2, rINST, 12 # a2 <- B + GET_VREG_DOUBLE f0, a2 + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + + cvt.d.l f0, f0 +/* File: mips64/fcvtFooter.S */ + /* + * Stores a specified register containing the result of conversion + * from or to a floating-point type and jumps to the next instruction. + * + * Expects a1 to contain the destination Dalvik register number. + * a1 is set up by fcvtHeader.S. + * + * For: int-to-float, int-to-double, long-to-float, long-to-double, + * float-to-int, float-to-long, float-to-double, double-to-int, + * double-to-long, double-to-float, neg-float, neg-double. + * + * Note that this file can't be included after a break in other files + * and in those files its contents appear as a copy. + * See: float-to-int, float-to-long, double-to-int, double-to-long. + */ + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_DOUBLE f0, a1 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_float_to_int: /* 0x87 */ +/* File: mips64/op_float_to_int.S */ +/* File: mips64/fcvtHeader.S */ + /* + * Loads a specified register from vB. Used primarily for conversions + * from or to a floating-point type. + * + * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to + * store the result in vA and jump to the next instruction. + * + * For: int-to-float, int-to-double, long-to-float, long-to-double, + * float-to-int, float-to-long, float-to-double, double-to-int, + * double-to-long, double-to-float, neg-float, neg-double. + */ + ext a1, rINST, 8, 4 # a1 <- A + srl a2, rINST, 12 # a2 <- B + GET_VREG_FLOAT f0, a2 + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + + /* + * TODO: simplify this when the MIPS64R6 emulator + * supports NAN2008=1. + */ + li t0, INT_MIN_AS_FLOAT + mtc1 t0, f1 + cmp.le.s f1, f1, f0 + bc1nez f1, .Lop_float_to_int_trunc + cmp.eq.s f1, f0, f0 + li t0, INT_MIN + mfc1 t1, f1 + and t0, t0, t1 + b .Lop_float_to_int_done + +/* ------------------------------ */ + .balign 128 +.L_op_float_to_long: /* 0x88 */ +/* File: mips64/op_float_to_long.S */ +/* File: mips64/fcvtHeader.S */ + /* + * Loads a specified register from vB. Used primarily for conversions + * from or to a floating-point type. + * + * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to + * store the result in vA and jump to the next instruction. + * + * For: int-to-float, int-to-double, long-to-float, long-to-double, + * float-to-int, float-to-long, float-to-double, double-to-int, + * double-to-long, double-to-float, neg-float, neg-double. + */ + ext a1, rINST, 8, 4 # a1 <- A + srl a2, rINST, 12 # a2 <- B + GET_VREG_FLOAT f0, a2 + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + + /* + * TODO: simplify this when the MIPS64R6 emulator + * supports NAN2008=1. + */ + li t0, LONG_MIN_AS_FLOAT + mtc1 t0, f1 + cmp.le.s f1, f1, f0 + bc1nez f1, .Lop_float_to_long_trunc + cmp.eq.s f1, f0, f0 + dli t0, LONG_MIN + mfc1 t1, f1 + and t0, t0, t1 + b .Lop_float_to_long_done + +/* ------------------------------ */ + .balign 128 +.L_op_float_to_double: /* 0x89 */ +/* File: mips64/op_float_to_double.S */ + /* + * Conversion from or to floating-point happens in a floating-point register. + * Therefore we load the input and store the output into or from a + * floating-point register irrespective of the type. + */ +/* File: mips64/fcvtHeader.S */ + /* + * Loads a specified register from vB. Used primarily for conversions + * from or to a floating-point type. + * + * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to + * store the result in vA and jump to the next instruction. + * + * For: int-to-float, int-to-double, long-to-float, long-to-double, + * float-to-int, float-to-long, float-to-double, double-to-int, + * double-to-long, double-to-float, neg-float, neg-double. + */ + ext a1, rINST, 8, 4 # a1 <- A + srl a2, rINST, 12 # a2 <- B + GET_VREG_FLOAT f0, a2 + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + + cvt.d.s f0, f0 +/* File: mips64/fcvtFooter.S */ + /* + * Stores a specified register containing the result of conversion + * from or to a floating-point type and jumps to the next instruction. + * + * Expects a1 to contain the destination Dalvik register number. + * a1 is set up by fcvtHeader.S. + * + * For: int-to-float, int-to-double, long-to-float, long-to-double, + * float-to-int, float-to-long, float-to-double, double-to-int, + * double-to-long, double-to-float, neg-float, neg-double. + * + * Note that this file can't be included after a break in other files + * and in those files its contents appear as a copy. + * See: float-to-int, float-to-long, double-to-int, double-to-long. + */ + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_DOUBLE f0, a1 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_double_to_int: /* 0x8a */ +/* File: mips64/op_double_to_int.S */ +/* File: mips64/fcvtHeader.S */ + /* + * Loads a specified register from vB. Used primarily for conversions + * from or to a floating-point type. + * + * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to + * store the result in vA and jump to the next instruction. + * + * For: int-to-float, int-to-double, long-to-float, long-to-double, + * float-to-int, float-to-long, float-to-double, double-to-int, + * double-to-long, double-to-float, neg-float, neg-double. + */ + ext a1, rINST, 8, 4 # a1 <- A + srl a2, rINST, 12 # a2 <- B + GET_VREG_DOUBLE f0, a2 + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + + /* + * TODO: simplify this when the MIPS64R6 emulator + * supports NAN2008=1. + */ + dli t0, INT_MIN_AS_DOUBLE + dmtc1 t0, f1 + cmp.le.d f1, f1, f0 + bc1nez f1, .Lop_double_to_int_trunc + cmp.eq.d f1, f0, f0 + li t0, INT_MIN + mfc1 t1, f1 + and t0, t0, t1 + b .Lop_double_to_int_done + +/* ------------------------------ */ + .balign 128 +.L_op_double_to_long: /* 0x8b */ +/* File: mips64/op_double_to_long.S */ +/* File: mips64/fcvtHeader.S */ + /* + * Loads a specified register from vB. Used primarily for conversions + * from or to a floating-point type. + * + * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to + * store the result in vA and jump to the next instruction. + * + * For: int-to-float, int-to-double, long-to-float, long-to-double, + * float-to-int, float-to-long, float-to-double, double-to-int, + * double-to-long, double-to-float, neg-float, neg-double. + */ + ext a1, rINST, 8, 4 # a1 <- A + srl a2, rINST, 12 # a2 <- B + GET_VREG_DOUBLE f0, a2 + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + + /* + * TODO: simplify this when the MIPS64R6 emulator + * supports NAN2008=1. + */ + dli t0, LONG_MIN_AS_DOUBLE + dmtc1 t0, f1 + cmp.le.d f1, f1, f0 + bc1nez f1, .Lop_double_to_long_trunc + cmp.eq.d f1, f0, f0 + dli t0, LONG_MIN + mfc1 t1, f1 + and t0, t0, t1 + b .Lop_double_to_long_done + +/* ------------------------------ */ + .balign 128 +.L_op_double_to_float: /* 0x8c */ +/* File: mips64/op_double_to_float.S */ + /* + * Conversion from or to floating-point happens in a floating-point register. + * Therefore we load the input and store the output into or from a + * floating-point register irrespective of the type. + */ +/* File: mips64/fcvtHeader.S */ + /* + * Loads a specified register from vB. Used primarily for conversions + * from or to a floating-point type. + * + * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to + * store the result in vA and jump to the next instruction. + * + * For: int-to-float, int-to-double, long-to-float, long-to-double, + * float-to-int, float-to-long, float-to-double, double-to-int, + * double-to-long, double-to-float, neg-float, neg-double. + */ + ext a1, rINST, 8, 4 # a1 <- A + srl a2, rINST, 12 # a2 <- B + GET_VREG_DOUBLE f0, a2 + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + + cvt.s.d f0, f0 +/* File: mips64/fcvtFooter.S */ + /* + * Stores a specified register containing the result of conversion + * from or to a floating-point type and jumps to the next instruction. + * + * Expects a1 to contain the destination Dalvik register number. + * a1 is set up by fcvtHeader.S. + * + * For: int-to-float, int-to-double, long-to-float, long-to-double, + * float-to-int, float-to-long, float-to-double, double-to-int, + * double-to-long, double-to-float, neg-float, neg-double. + * + * Note that this file can't be included after a break in other files + * and in those files its contents appear as a copy. + * See: float-to-int, float-to-long, double-to-int, double-to-long. + */ + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_FLOAT f0, a1 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_int_to_byte: /* 0x8d */ +/* File: mips64/op_int_to_byte.S */ +/* File: mips64/unop.S */ + /* + * Generic 32-bit unary operation. Provide an "instr" line that + * specifies an instruction that performs "a0 = op a0". + * + * for: int-to-byte, int-to-char, int-to-short, + * not-int, neg-int + */ + /* unop vA, vB */ + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a3 # a0 <- vB + ext a2, rINST, 8, 4 # a2 <- A + # optional op + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + seb a0, a0 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_int_to_char: /* 0x8e */ +/* File: mips64/op_int_to_char.S */ +/* File: mips64/unop.S */ + /* + * Generic 32-bit unary operation. Provide an "instr" line that + * specifies an instruction that performs "a0 = op a0". + * + * for: int-to-byte, int-to-char, int-to-short, + * not-int, neg-int + */ + /* unop vA, vB */ + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a3 # a0 <- vB + ext a2, rINST, 8, 4 # a2 <- A + # optional op + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + and a0, a0, 0xffff # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_int_to_short: /* 0x8f */ +/* File: mips64/op_int_to_short.S */ +/* File: mips64/unop.S */ + /* + * Generic 32-bit unary operation. Provide an "instr" line that + * specifies an instruction that performs "a0 = op a0". + * + * for: int-to-byte, int-to-char, int-to-short, + * not-int, neg-int + */ + /* unop vA, vB */ + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a3 # a0 <- vB + ext a2, rINST, 8, 4 # a2 <- A + # optional op + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + seh a0, a0 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_add_int: /* 0x90 */ +/* File: mips64/op_add_int.S */ +/* File: mips64/binop.S */ + /* + * Generic 32-bit binary operation. Provide an "instr" line that + * specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vCC (a1). Useful for integer division and modulus. Note that we + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, + * xor-int, shl-int, shr-int, ushr-int + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG a0, a2 # a0 <- vBB + GET_VREG a1, a3 # a1 <- vCC + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + addu a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_sub_int: /* 0x91 */ +/* File: mips64/op_sub_int.S */ +/* File: mips64/binop.S */ + /* + * Generic 32-bit binary operation. Provide an "instr" line that + * specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vCC (a1). Useful for integer division and modulus. Note that we + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, + * xor-int, shl-int, shr-int, ushr-int + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG a0, a2 # a0 <- vBB + GET_VREG a1, a3 # a1 <- vCC + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + subu a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_mul_int: /* 0x92 */ +/* File: mips64/op_mul_int.S */ +/* File: mips64/binop.S */ + /* + * Generic 32-bit binary operation. Provide an "instr" line that + * specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vCC (a1). Useful for integer division and modulus. Note that we + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, + * xor-int, shl-int, shr-int, ushr-int + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG a0, a2 # a0 <- vBB + GET_VREG a1, a3 # a1 <- vCC + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + mul a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_div_int: /* 0x93 */ +/* File: mips64/op_div_int.S */ +/* File: mips64/binop.S */ + /* + * Generic 32-bit binary operation. Provide an "instr" line that + * specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vCC (a1). Useful for integer division and modulus. Note that we + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, + * xor-int, shl-int, shr-int, ushr-int + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG a0, a2 # a0 <- vBB + GET_VREG a1, a3 # a1 <- vCC + .if 1 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + div a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_rem_int: /* 0x94 */ +/* File: mips64/op_rem_int.S */ +/* File: mips64/binop.S */ + /* + * Generic 32-bit binary operation. Provide an "instr" line that + * specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vCC (a1). Useful for integer division and modulus. Note that we + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, + * xor-int, shl-int, shr-int, ushr-int + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG a0, a2 # a0 <- vBB + GET_VREG a1, a3 # a1 <- vCC + .if 1 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + mod a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_and_int: /* 0x95 */ +/* File: mips64/op_and_int.S */ +/* File: mips64/binop.S */ + /* + * Generic 32-bit binary operation. Provide an "instr" line that + * specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vCC (a1). Useful for integer division and modulus. Note that we + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, + * xor-int, shl-int, shr-int, ushr-int + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG a0, a2 # a0 <- vBB + GET_VREG a1, a3 # a1 <- vCC + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + and a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_or_int: /* 0x96 */ +/* File: mips64/op_or_int.S */ +/* File: mips64/binop.S */ + /* + * Generic 32-bit binary operation. Provide an "instr" line that + * specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vCC (a1). Useful for integer division and modulus. Note that we + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, + * xor-int, shl-int, shr-int, ushr-int + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG a0, a2 # a0 <- vBB + GET_VREG a1, a3 # a1 <- vCC + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + or a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_xor_int: /* 0x97 */ +/* File: mips64/op_xor_int.S */ +/* File: mips64/binop.S */ + /* + * Generic 32-bit binary operation. Provide an "instr" line that + * specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vCC (a1). Useful for integer division and modulus. Note that we + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, + * xor-int, shl-int, shr-int, ushr-int + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG a0, a2 # a0 <- vBB + GET_VREG a1, a3 # a1 <- vCC + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + xor a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_shl_int: /* 0x98 */ +/* File: mips64/op_shl_int.S */ +/* File: mips64/binop.S */ + /* + * Generic 32-bit binary operation. Provide an "instr" line that + * specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vCC (a1). Useful for integer division and modulus. Note that we + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, + * xor-int, shl-int, shr-int, ushr-int + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG a0, a2 # a0 <- vBB + GET_VREG a1, a3 # a1 <- vCC + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + sll a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_shr_int: /* 0x99 */ +/* File: mips64/op_shr_int.S */ +/* File: mips64/binop.S */ + /* + * Generic 32-bit binary operation. Provide an "instr" line that + * specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vCC (a1). Useful for integer division and modulus. Note that we + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, + * xor-int, shl-int, shr-int, ushr-int + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG a0, a2 # a0 <- vBB + GET_VREG a1, a3 # a1 <- vCC + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + sra a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_ushr_int: /* 0x9a */ +/* File: mips64/op_ushr_int.S */ +/* File: mips64/binop.S */ + /* + * Generic 32-bit binary operation. Provide an "instr" line that + * specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vCC (a1). Useful for integer division and modulus. Note that we + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, + * xor-int, shl-int, shr-int, ushr-int + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG a0, a2 # a0 <- vBB + GET_VREG a1, a3 # a1 <- vCC + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + srl a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_add_long: /* 0x9b */ +/* File: mips64/op_add_long.S */ +/* File: mips64/binopWide.S */ + /* + * Generic 64-bit binary operation. Provide an "instr" line that + * specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vCC (a1). Useful for integer division and modulus. Note that we + * *don't* check for (LONG_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long, + * xor-long, shl-long, shr-long, ushr-long + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_WIDE a0, a2 # a0 <- vBB + GET_VREG_WIDE a1, a3 # a1 <- vCC + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + daddu a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_sub_long: /* 0x9c */ +/* File: mips64/op_sub_long.S */ +/* File: mips64/binopWide.S */ + /* + * Generic 64-bit binary operation. Provide an "instr" line that + * specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vCC (a1). Useful for integer division and modulus. Note that we + * *don't* check for (LONG_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long, + * xor-long, shl-long, shr-long, ushr-long + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_WIDE a0, a2 # a0 <- vBB + GET_VREG_WIDE a1, a3 # a1 <- vCC + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + dsubu a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_mul_long: /* 0x9d */ +/* File: mips64/op_mul_long.S */ +/* File: mips64/binopWide.S */ + /* + * Generic 64-bit binary operation. Provide an "instr" line that + * specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vCC (a1). Useful for integer division and modulus. Note that we + * *don't* check for (LONG_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long, + * xor-long, shl-long, shr-long, ushr-long + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_WIDE a0, a2 # a0 <- vBB + GET_VREG_WIDE a1, a3 # a1 <- vCC + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + dmul a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_div_long: /* 0x9e */ +/* File: mips64/op_div_long.S */ +/* File: mips64/binopWide.S */ + /* + * Generic 64-bit binary operation. Provide an "instr" line that + * specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vCC (a1). Useful for integer division and modulus. Note that we + * *don't* check for (LONG_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long, + * xor-long, shl-long, shr-long, ushr-long + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_WIDE a0, a2 # a0 <- vBB + GET_VREG_WIDE a1, a3 # a1 <- vCC + .if 1 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + ddiv a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_rem_long: /* 0x9f */ +/* File: mips64/op_rem_long.S */ +/* File: mips64/binopWide.S */ + /* + * Generic 64-bit binary operation. Provide an "instr" line that + * specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vCC (a1). Useful for integer division and modulus. Note that we + * *don't* check for (LONG_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long, + * xor-long, shl-long, shr-long, ushr-long + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_WIDE a0, a2 # a0 <- vBB + GET_VREG_WIDE a1, a3 # a1 <- vCC + .if 1 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + dmod a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_and_long: /* 0xa0 */ +/* File: mips64/op_and_long.S */ +/* File: mips64/binopWide.S */ + /* + * Generic 64-bit binary operation. Provide an "instr" line that + * specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vCC (a1). Useful for integer division and modulus. Note that we + * *don't* check for (LONG_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long, + * xor-long, shl-long, shr-long, ushr-long + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_WIDE a0, a2 # a0 <- vBB + GET_VREG_WIDE a1, a3 # a1 <- vCC + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + and a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_or_long: /* 0xa1 */ +/* File: mips64/op_or_long.S */ +/* File: mips64/binopWide.S */ + /* + * Generic 64-bit binary operation. Provide an "instr" line that + * specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vCC (a1). Useful for integer division and modulus. Note that we + * *don't* check for (LONG_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long, + * xor-long, shl-long, shr-long, ushr-long + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_WIDE a0, a2 # a0 <- vBB + GET_VREG_WIDE a1, a3 # a1 <- vCC + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + or a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_xor_long: /* 0xa2 */ +/* File: mips64/op_xor_long.S */ +/* File: mips64/binopWide.S */ + /* + * Generic 64-bit binary operation. Provide an "instr" line that + * specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vCC (a1). Useful for integer division and modulus. Note that we + * *don't* check for (LONG_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long, + * xor-long, shl-long, shr-long, ushr-long + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_WIDE a0, a2 # a0 <- vBB + GET_VREG_WIDE a1, a3 # a1 <- vCC + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + xor a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_shl_long: /* 0xa3 */ +/* File: mips64/op_shl_long.S */ +/* File: mips64/binopWide.S */ + /* + * Generic 64-bit binary operation. Provide an "instr" line that + * specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vCC (a1). Useful for integer division and modulus. Note that we + * *don't* check for (LONG_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long, + * xor-long, shl-long, shr-long, ushr-long + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_WIDE a0, a2 # a0 <- vBB + GET_VREG_WIDE a1, a3 # a1 <- vCC + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + dsll a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_shr_long: /* 0xa4 */ +/* File: mips64/op_shr_long.S */ +/* File: mips64/binopWide.S */ + /* + * Generic 64-bit binary operation. Provide an "instr" line that + * specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vCC (a1). Useful for integer division and modulus. Note that we + * *don't* check for (LONG_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long, + * xor-long, shl-long, shr-long, ushr-long + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_WIDE a0, a2 # a0 <- vBB + GET_VREG_WIDE a1, a3 # a1 <- vCC + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + dsra a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_ushr_long: /* 0xa5 */ +/* File: mips64/op_ushr_long.S */ +/* File: mips64/binopWide.S */ + /* + * Generic 64-bit binary operation. Provide an "instr" line that + * specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vCC (a1). Useful for integer division and modulus. Note that we + * *don't* check for (LONG_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long, + * xor-long, shl-long, shr-long, ushr-long + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_WIDE a0, a2 # a0 <- vBB + GET_VREG_WIDE a1, a3 # a1 <- vCC + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + dsrl a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a4 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_add_float: /* 0xa6 */ +/* File: mips64/op_add_float.S */ +/* File: mips64/fbinop.S */ + /*: + * Generic 32-bit floating-point operation. + * + * For: add-float, sub-float, mul-float, div-float. + * form: <op> f0, f0, f1 + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_FLOAT f0, a2 # f0 <- vBB + GET_VREG_FLOAT f1, a3 # f1 <- vCC + add.s f0, f0, f1 # f0 <- f0 op f1 + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_FLOAT f0, a4 # vAA <- f0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_sub_float: /* 0xa7 */ +/* File: mips64/op_sub_float.S */ +/* File: mips64/fbinop.S */ + /*: + * Generic 32-bit floating-point operation. + * + * For: add-float, sub-float, mul-float, div-float. + * form: <op> f0, f0, f1 + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_FLOAT f0, a2 # f0 <- vBB + GET_VREG_FLOAT f1, a3 # f1 <- vCC + sub.s f0, f0, f1 # f0 <- f0 op f1 + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_FLOAT f0, a4 # vAA <- f0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_mul_float: /* 0xa8 */ +/* File: mips64/op_mul_float.S */ +/* File: mips64/fbinop.S */ + /*: + * Generic 32-bit floating-point operation. + * + * For: add-float, sub-float, mul-float, div-float. + * form: <op> f0, f0, f1 + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_FLOAT f0, a2 # f0 <- vBB + GET_VREG_FLOAT f1, a3 # f1 <- vCC + mul.s f0, f0, f1 # f0 <- f0 op f1 + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_FLOAT f0, a4 # vAA <- f0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_div_float: /* 0xa9 */ +/* File: mips64/op_div_float.S */ +/* File: mips64/fbinop.S */ + /*: + * Generic 32-bit floating-point operation. + * + * For: add-float, sub-float, mul-float, div-float. + * form: <op> f0, f0, f1 + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_FLOAT f0, a2 # f0 <- vBB + GET_VREG_FLOAT f1, a3 # f1 <- vCC + div.s f0, f0, f1 # f0 <- f0 op f1 + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_FLOAT f0, a4 # vAA <- f0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_rem_float: /* 0xaa */ +/* File: mips64/op_rem_float.S */ + /* rem-float vAA, vBB, vCC */ + .extern fmodf + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_FLOAT f12, a2 # f12 <- vBB + GET_VREG_FLOAT f13, a3 # f13 <- vCC + jal fmodf # f0 <- f12 op f13 + srl a4, rINST, 8 # a4 <- AA + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_FLOAT f0, a4 # vAA <- f0 + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_add_double: /* 0xab */ +/* File: mips64/op_add_double.S */ +/* File: mips64/fbinopWide.S */ + /*: + * Generic 64-bit floating-point operation. + * + * For: add-double, sub-double, mul-double, div-double. + * form: <op> f0, f0, f1 + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_DOUBLE f0, a2 # f0 <- vBB + GET_VREG_DOUBLE f1, a3 # f1 <- vCC + add.d f0, f0, f1 # f0 <- f0 op f1 + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_DOUBLE f0, a4 # vAA <- f0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_sub_double: /* 0xac */ +/* File: mips64/op_sub_double.S */ +/* File: mips64/fbinopWide.S */ + /*: + * Generic 64-bit floating-point operation. + * + * For: add-double, sub-double, mul-double, div-double. + * form: <op> f0, f0, f1 + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_DOUBLE f0, a2 # f0 <- vBB + GET_VREG_DOUBLE f1, a3 # f1 <- vCC + sub.d f0, f0, f1 # f0 <- f0 op f1 + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_DOUBLE f0, a4 # vAA <- f0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_mul_double: /* 0xad */ +/* File: mips64/op_mul_double.S */ +/* File: mips64/fbinopWide.S */ + /*: + * Generic 64-bit floating-point operation. + * + * For: add-double, sub-double, mul-double, div-double. + * form: <op> f0, f0, f1 + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_DOUBLE f0, a2 # f0 <- vBB + GET_VREG_DOUBLE f1, a3 # f1 <- vCC + mul.d f0, f0, f1 # f0 <- f0 op f1 + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_DOUBLE f0, a4 # vAA <- f0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_div_double: /* 0xae */ +/* File: mips64/op_div_double.S */ +/* File: mips64/fbinopWide.S */ + /*: + * Generic 64-bit floating-point operation. + * + * For: add-double, sub-double, mul-double, div-double. + * form: <op> f0, f0, f1 + */ + /* binop vAA, vBB, vCC */ + srl a4, rINST, 8 # a4 <- AA + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_DOUBLE f0, a2 # f0 <- vBB + GET_VREG_DOUBLE f1, a3 # f1 <- vCC + div.d f0, f0, f1 # f0 <- f0 op f1 + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_DOUBLE f0, a4 # vAA <- f0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_rem_double: /* 0xaf */ +/* File: mips64/op_rem_double.S */ + /* rem-double vAA, vBB, vCC */ + .extern fmod + lbu a2, 2(rPC) # a2 <- BB + lbu a3, 3(rPC) # a3 <- CC + GET_VREG_DOUBLE f12, a2 # f12 <- vBB + GET_VREG_DOUBLE f13, a3 # f13 <- vCC + jal fmod # f0 <- f12 op f13 + srl a4, rINST, 8 # a4 <- AA + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_DOUBLE f0, a4 # vAA <- f0 + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_add_int_2addr: /* 0xb0 */ +/* File: mips64/op_add_int_2addr.S */ +/* File: mips64/binop2addr.S */ + /* + * Generic 32-bit "/2addr" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vB (a1). Useful for integer division and modulus. Note that we + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, + * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, + * shl-int/2addr, shr-int/2addr, ushr-int/2addr + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a2 # a0 <- vA + GET_VREG a1, a3 # a1 <- vB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + # optional op + addu a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_sub_int_2addr: /* 0xb1 */ +/* File: mips64/op_sub_int_2addr.S */ +/* File: mips64/binop2addr.S */ + /* + * Generic 32-bit "/2addr" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vB (a1). Useful for integer division and modulus. Note that we + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, + * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, + * shl-int/2addr, shr-int/2addr, ushr-int/2addr + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a2 # a0 <- vA + GET_VREG a1, a3 # a1 <- vB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + # optional op + subu a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_mul_int_2addr: /* 0xb2 */ +/* File: mips64/op_mul_int_2addr.S */ +/* File: mips64/binop2addr.S */ + /* + * Generic 32-bit "/2addr" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vB (a1). Useful for integer division and modulus. Note that we + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, + * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, + * shl-int/2addr, shr-int/2addr, ushr-int/2addr + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a2 # a0 <- vA + GET_VREG a1, a3 # a1 <- vB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + # optional op + mul a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_div_int_2addr: /* 0xb3 */ +/* File: mips64/op_div_int_2addr.S */ +/* File: mips64/binop2addr.S */ + /* + * Generic 32-bit "/2addr" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vB (a1). Useful for integer division and modulus. Note that we + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, + * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, + * shl-int/2addr, shr-int/2addr, ushr-int/2addr + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a2 # a0 <- vA + GET_VREG a1, a3 # a1 <- vB + .if 1 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + # optional op + div a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_rem_int_2addr: /* 0xb4 */ +/* File: mips64/op_rem_int_2addr.S */ +/* File: mips64/binop2addr.S */ + /* + * Generic 32-bit "/2addr" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vB (a1). Useful for integer division and modulus. Note that we + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, + * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, + * shl-int/2addr, shr-int/2addr, ushr-int/2addr + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a2 # a0 <- vA + GET_VREG a1, a3 # a1 <- vB + .if 1 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + # optional op + mod a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_and_int_2addr: /* 0xb5 */ +/* File: mips64/op_and_int_2addr.S */ +/* File: mips64/binop2addr.S */ + /* + * Generic 32-bit "/2addr" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vB (a1). Useful for integer division and modulus. Note that we + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, + * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, + * shl-int/2addr, shr-int/2addr, ushr-int/2addr + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a2 # a0 <- vA + GET_VREG a1, a3 # a1 <- vB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + # optional op + and a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_or_int_2addr: /* 0xb6 */ +/* File: mips64/op_or_int_2addr.S */ +/* File: mips64/binop2addr.S */ + /* + * Generic 32-bit "/2addr" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vB (a1). Useful for integer division and modulus. Note that we + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, + * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, + * shl-int/2addr, shr-int/2addr, ushr-int/2addr + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a2 # a0 <- vA + GET_VREG a1, a3 # a1 <- vB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + # optional op + or a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_xor_int_2addr: /* 0xb7 */ +/* File: mips64/op_xor_int_2addr.S */ +/* File: mips64/binop2addr.S */ + /* + * Generic 32-bit "/2addr" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vB (a1). Useful for integer division and modulus. Note that we + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, + * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, + * shl-int/2addr, shr-int/2addr, ushr-int/2addr + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a2 # a0 <- vA + GET_VREG a1, a3 # a1 <- vB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + # optional op + xor a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_shl_int_2addr: /* 0xb8 */ +/* File: mips64/op_shl_int_2addr.S */ +/* File: mips64/binop2addr.S */ + /* + * Generic 32-bit "/2addr" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vB (a1). Useful for integer division and modulus. Note that we + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, + * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, + * shl-int/2addr, shr-int/2addr, ushr-int/2addr + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a2 # a0 <- vA + GET_VREG a1, a3 # a1 <- vB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + # optional op + sll a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_shr_int_2addr: /* 0xb9 */ +/* File: mips64/op_shr_int_2addr.S */ +/* File: mips64/binop2addr.S */ + /* + * Generic 32-bit "/2addr" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vB (a1). Useful for integer division and modulus. Note that we + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, + * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, + * shl-int/2addr, shr-int/2addr, ushr-int/2addr + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a2 # a0 <- vA + GET_VREG a1, a3 # a1 <- vB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + # optional op + sra a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_ushr_int_2addr: /* 0xba */ +/* File: mips64/op_ushr_int_2addr.S */ +/* File: mips64/binop2addr.S */ + /* + * Generic 32-bit "/2addr" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vB (a1). Useful for integer division and modulus. Note that we + * *don't* check for (INT_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, + * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, + * shl-int/2addr, shr-int/2addr, ushr-int/2addr + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a2 # a0 <- vA + GET_VREG a1, a3 # a1 <- vB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + # optional op + srl a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_add_long_2addr: /* 0xbb */ +/* File: mips64/op_add_long_2addr.S */ +/* File: mips64/binopWide2addr.S */ + /* + * Generic 64-bit "/2addr" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vB (a1). Useful for integer division and modulus. Note that we + * *don't* check for (LONG_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr, + * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr, + * shl-long/2addr, shr-long/2addr, ushr-long/2addr + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_WIDE a0, a2 # a0 <- vA + GET_VREG_WIDE a1, a3 # a1 <- vB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + # optional op + daddu a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_sub_long_2addr: /* 0xbc */ +/* File: mips64/op_sub_long_2addr.S */ +/* File: mips64/binopWide2addr.S */ + /* + * Generic 64-bit "/2addr" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vB (a1). Useful for integer division and modulus. Note that we + * *don't* check for (LONG_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr, + * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr, + * shl-long/2addr, shr-long/2addr, ushr-long/2addr + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_WIDE a0, a2 # a0 <- vA + GET_VREG_WIDE a1, a3 # a1 <- vB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + # optional op + dsubu a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_mul_long_2addr: /* 0xbd */ +/* File: mips64/op_mul_long_2addr.S */ +/* File: mips64/binopWide2addr.S */ + /* + * Generic 64-bit "/2addr" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vB (a1). Useful for integer division and modulus. Note that we + * *don't* check for (LONG_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr, + * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr, + * shl-long/2addr, shr-long/2addr, ushr-long/2addr + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_WIDE a0, a2 # a0 <- vA + GET_VREG_WIDE a1, a3 # a1 <- vB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + # optional op + dmul a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_div_long_2addr: /* 0xbe */ +/* File: mips64/op_div_long_2addr.S */ +/* File: mips64/binopWide2addr.S */ + /* + * Generic 64-bit "/2addr" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vB (a1). Useful for integer division and modulus. Note that we + * *don't* check for (LONG_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr, + * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr, + * shl-long/2addr, shr-long/2addr, ushr-long/2addr + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_WIDE a0, a2 # a0 <- vA + GET_VREG_WIDE a1, a3 # a1 <- vB + .if 1 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + # optional op + ddiv a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_rem_long_2addr: /* 0xbf */ +/* File: mips64/op_rem_long_2addr.S */ +/* File: mips64/binopWide2addr.S */ + /* + * Generic 64-bit "/2addr" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vB (a1). Useful for integer division and modulus. Note that we + * *don't* check for (LONG_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr, + * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr, + * shl-long/2addr, shr-long/2addr, ushr-long/2addr + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_WIDE a0, a2 # a0 <- vA + GET_VREG_WIDE a1, a3 # a1 <- vB + .if 1 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + # optional op + dmod a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_and_long_2addr: /* 0xc0 */ +/* File: mips64/op_and_long_2addr.S */ +/* File: mips64/binopWide2addr.S */ + /* + * Generic 64-bit "/2addr" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vB (a1). Useful for integer division and modulus. Note that we + * *don't* check for (LONG_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr, + * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr, + * shl-long/2addr, shr-long/2addr, ushr-long/2addr + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_WIDE a0, a2 # a0 <- vA + GET_VREG_WIDE a1, a3 # a1 <- vB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + # optional op + and a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_or_long_2addr: /* 0xc1 */ +/* File: mips64/op_or_long_2addr.S */ +/* File: mips64/binopWide2addr.S */ + /* + * Generic 64-bit "/2addr" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vB (a1). Useful for integer division and modulus. Note that we + * *don't* check for (LONG_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr, + * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr, + * shl-long/2addr, shr-long/2addr, ushr-long/2addr + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_WIDE a0, a2 # a0 <- vA + GET_VREG_WIDE a1, a3 # a1 <- vB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + # optional op + or a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_xor_long_2addr: /* 0xc2 */ +/* File: mips64/op_xor_long_2addr.S */ +/* File: mips64/binopWide2addr.S */ + /* + * Generic 64-bit "/2addr" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vB (a1). Useful for integer division and modulus. Note that we + * *don't* check for (LONG_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr, + * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr, + * shl-long/2addr, shr-long/2addr, ushr-long/2addr + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_WIDE a0, a2 # a0 <- vA + GET_VREG_WIDE a1, a3 # a1 <- vB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + # optional op + xor a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_shl_long_2addr: /* 0xc3 */ +/* File: mips64/op_shl_long_2addr.S */ +/* File: mips64/binopWide2addr.S */ + /* + * Generic 64-bit "/2addr" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vB (a1). Useful for integer division and modulus. Note that we + * *don't* check for (LONG_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr, + * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr, + * shl-long/2addr, shr-long/2addr, ushr-long/2addr + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_WIDE a0, a2 # a0 <- vA + GET_VREG_WIDE a1, a3 # a1 <- vB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + # optional op + dsll a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_shr_long_2addr: /* 0xc4 */ +/* File: mips64/op_shr_long_2addr.S */ +/* File: mips64/binopWide2addr.S */ + /* + * Generic 64-bit "/2addr" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vB (a1). Useful for integer division and modulus. Note that we + * *don't* check for (LONG_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr, + * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr, + * shl-long/2addr, shr-long/2addr, ushr-long/2addr + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_WIDE a0, a2 # a0 <- vA + GET_VREG_WIDE a1, a3 # a1 <- vB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + # optional op + dsra a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_ushr_long_2addr: /* 0xc5 */ +/* File: mips64/op_ushr_long_2addr.S */ +/* File: mips64/binopWide2addr.S */ + /* + * Generic 64-bit "/2addr" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be a MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * vB (a1). Useful for integer division and modulus. Note that we + * *don't* check for (LONG_MIN / -1) here, because the CPU handles it + * correctly. + * + * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr, + * rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr, + * shl-long/2addr, shr-long/2addr, ushr-long/2addr + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_WIDE a0, a2 # a0 <- vA + GET_VREG_WIDE a1, a3 # a1 <- vB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + # optional op + dsrl a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_add_float_2addr: /* 0xc6 */ +/* File: mips64/op_add_float_2addr.S */ +/* File: mips64/fbinop2addr.S */ + /*: + * Generic 32-bit "/2addr" floating-point operation. + * + * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr. + * form: <op> f0, f0, f1 + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_FLOAT f0, a2 # f0 <- vA + GET_VREG_FLOAT f1, a3 # f1 <- vB + add.s f0, f0, f1 # f0 <- f0 op f1 + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_FLOAT f0, a2 # vA <- f0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_sub_float_2addr: /* 0xc7 */ +/* File: mips64/op_sub_float_2addr.S */ +/* File: mips64/fbinop2addr.S */ + /*: + * Generic 32-bit "/2addr" floating-point operation. + * + * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr. + * form: <op> f0, f0, f1 + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_FLOAT f0, a2 # f0 <- vA + GET_VREG_FLOAT f1, a3 # f1 <- vB + sub.s f0, f0, f1 # f0 <- f0 op f1 + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_FLOAT f0, a2 # vA <- f0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_mul_float_2addr: /* 0xc8 */ +/* File: mips64/op_mul_float_2addr.S */ +/* File: mips64/fbinop2addr.S */ + /*: + * Generic 32-bit "/2addr" floating-point operation. + * + * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr. + * form: <op> f0, f0, f1 + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_FLOAT f0, a2 # f0 <- vA + GET_VREG_FLOAT f1, a3 # f1 <- vB + mul.s f0, f0, f1 # f0 <- f0 op f1 + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_FLOAT f0, a2 # vA <- f0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_div_float_2addr: /* 0xc9 */ +/* File: mips64/op_div_float_2addr.S */ +/* File: mips64/fbinop2addr.S */ + /*: + * Generic 32-bit "/2addr" floating-point operation. + * + * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr. + * form: <op> f0, f0, f1 + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_FLOAT f0, a2 # f0 <- vA + GET_VREG_FLOAT f1, a3 # f1 <- vB + div.s f0, f0, f1 # f0 <- f0 op f1 + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_FLOAT f0, a2 # vA <- f0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_rem_float_2addr: /* 0xca */ +/* File: mips64/op_rem_float_2addr.S */ + /* rem-float/2addr vA, vB */ + .extern fmodf + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_FLOAT f12, a2 # f12 <- vA + GET_VREG_FLOAT f13, a3 # f13 <- vB + jal fmodf # f0 <- f12 op f13 + ext a2, rINST, 8, 4 # a2 <- A + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_FLOAT f0, a2 # vA <- f0 + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_add_double_2addr: /* 0xcb */ +/* File: mips64/op_add_double_2addr.S */ +/* File: mips64/fbinopWide2addr.S */ + /*: + * Generic 64-bit "/2addr" floating-point operation. + * + * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr. + * form: <op> f0, f0, f1 + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_DOUBLE f0, a2 # f0 <- vA + GET_VREG_DOUBLE f1, a3 # f1 <- vB + add.d f0, f0, f1 # f0 <- f0 op f1 + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_DOUBLE f0, a2 # vA <- f0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_sub_double_2addr: /* 0xcc */ +/* File: mips64/op_sub_double_2addr.S */ +/* File: mips64/fbinopWide2addr.S */ + /*: + * Generic 64-bit "/2addr" floating-point operation. + * + * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr. + * form: <op> f0, f0, f1 + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_DOUBLE f0, a2 # f0 <- vA + GET_VREG_DOUBLE f1, a3 # f1 <- vB + sub.d f0, f0, f1 # f0 <- f0 op f1 + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_DOUBLE f0, a2 # vA <- f0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_mul_double_2addr: /* 0xcd */ +/* File: mips64/op_mul_double_2addr.S */ +/* File: mips64/fbinopWide2addr.S */ + /*: + * Generic 64-bit "/2addr" floating-point operation. + * + * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr. + * form: <op> f0, f0, f1 + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_DOUBLE f0, a2 # f0 <- vA + GET_VREG_DOUBLE f1, a3 # f1 <- vB + mul.d f0, f0, f1 # f0 <- f0 op f1 + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_DOUBLE f0, a2 # vA <- f0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_div_double_2addr: /* 0xce */ +/* File: mips64/op_div_double_2addr.S */ +/* File: mips64/fbinopWide2addr.S */ + /*: + * Generic 64-bit "/2addr" floating-point operation. + * + * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr. + * form: <op> f0, f0, f1 + */ + /* binop/2addr vA, vB */ + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_DOUBLE f0, a2 # f0 <- vA + GET_VREG_DOUBLE f1, a3 # f1 <- vB + div.d f0, f0, f1 # f0 <- f0 op f1 + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_DOUBLE f0, a2 # vA <- f0 + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_rem_double_2addr: /* 0xcf */ +/* File: mips64/op_rem_double_2addr.S */ + /* rem-double/2addr vA, vB */ + .extern fmod + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG_DOUBLE f12, a2 # f12 <- vA + GET_VREG_DOUBLE f13, a3 # f13 <- vB + jal fmod # f0 <- f12 op f13 + ext a2, rINST, 8, 4 # a2 <- A + FETCH_ADVANCE_INST 1 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_DOUBLE f0, a2 # vA <- f0 + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_add_int_lit16: /* 0xd0 */ +/* File: mips64/op_add_int_lit16.S */ +/* File: mips64/binopLit16.S */ + /* + * Generic 32-bit "lit16" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be an MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * CCCC (a1). Useful for integer division and modulus. + * + * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, + * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 + */ + /* binop/lit16 vA, vB, #+CCCC */ + lh a1, 2(rPC) # a1 <- sign-extended CCCC + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a3 # a0 <- vB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + addu a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + + +/* ------------------------------ */ + .balign 128 +.L_op_rsub_int: /* 0xd1 */ +/* File: mips64/op_rsub_int.S */ +/* File: mips64/binopLit16.S */ + /* + * Generic 32-bit "lit16" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be an MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * CCCC (a1). Useful for integer division and modulus. + * + * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, + * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 + */ + /* binop/lit16 vA, vB, #+CCCC */ + lh a1, 2(rPC) # a1 <- sign-extended CCCC + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a3 # a0 <- vB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + subu a0, a1, a0 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + + +/* ------------------------------ */ + .balign 128 +.L_op_mul_int_lit16: /* 0xd2 */ +/* File: mips64/op_mul_int_lit16.S */ +/* File: mips64/binopLit16.S */ + /* + * Generic 32-bit "lit16" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be an MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * CCCC (a1). Useful for integer division and modulus. + * + * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, + * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 + */ + /* binop/lit16 vA, vB, #+CCCC */ + lh a1, 2(rPC) # a1 <- sign-extended CCCC + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a3 # a0 <- vB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + mul a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + + +/* ------------------------------ */ + .balign 128 +.L_op_div_int_lit16: /* 0xd3 */ +/* File: mips64/op_div_int_lit16.S */ +/* File: mips64/binopLit16.S */ + /* + * Generic 32-bit "lit16" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be an MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * CCCC (a1). Useful for integer division and modulus. + * + * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, + * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 + */ + /* binop/lit16 vA, vB, #+CCCC */ + lh a1, 2(rPC) # a1 <- sign-extended CCCC + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a3 # a0 <- vB + .if 1 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + div a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + + +/* ------------------------------ */ + .balign 128 +.L_op_rem_int_lit16: /* 0xd4 */ +/* File: mips64/op_rem_int_lit16.S */ +/* File: mips64/binopLit16.S */ + /* + * Generic 32-bit "lit16" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be an MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * CCCC (a1). Useful for integer division and modulus. + * + * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, + * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 + */ + /* binop/lit16 vA, vB, #+CCCC */ + lh a1, 2(rPC) # a1 <- sign-extended CCCC + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a3 # a0 <- vB + .if 1 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + mod a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + + +/* ------------------------------ */ + .balign 128 +.L_op_and_int_lit16: /* 0xd5 */ +/* File: mips64/op_and_int_lit16.S */ +/* File: mips64/binopLit16.S */ + /* + * Generic 32-bit "lit16" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be an MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * CCCC (a1). Useful for integer division and modulus. + * + * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, + * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 + */ + /* binop/lit16 vA, vB, #+CCCC */ + lh a1, 2(rPC) # a1 <- sign-extended CCCC + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a3 # a0 <- vB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + and a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + + +/* ------------------------------ */ + .balign 128 +.L_op_or_int_lit16: /* 0xd6 */ +/* File: mips64/op_or_int_lit16.S */ +/* File: mips64/binopLit16.S */ + /* + * Generic 32-bit "lit16" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be an MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * CCCC (a1). Useful for integer division and modulus. + * + * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, + * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 + */ + /* binop/lit16 vA, vB, #+CCCC */ + lh a1, 2(rPC) # a1 <- sign-extended CCCC + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a3 # a0 <- vB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + or a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + + +/* ------------------------------ */ + .balign 128 +.L_op_xor_int_lit16: /* 0xd7 */ +/* File: mips64/op_xor_int_lit16.S */ +/* File: mips64/binopLit16.S */ + /* + * Generic 32-bit "lit16" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be an MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * CCCC (a1). Useful for integer division and modulus. + * + * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, + * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 + */ + /* binop/lit16 vA, vB, #+CCCC */ + lh a1, 2(rPC) # a1 <- sign-extended CCCC + ext a2, rINST, 8, 4 # a2 <- A + ext a3, rINST, 12, 4 # a3 <- B + GET_VREG a0, a3 # a0 <- vB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + xor a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + + +/* ------------------------------ */ + .balign 128 +.L_op_add_int_lit8: /* 0xd8 */ +/* File: mips64/op_add_int_lit8.S */ +/* File: mips64/binopLit8.S */ + /* + * Generic 32-bit "lit8" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be an MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * CC (a1). Useful for integer division and modulus. + * + * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, + * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, + * shl-int/lit8, shr-int/lit8, ushr-int/lit8 + */ + /* binop/lit8 vAA, vBB, #+CC */ + lbu a3, 2(rPC) # a3 <- BB + lb a1, 3(rPC) # a1 <- sign-extended CC + srl a2, rINST, 8 # a2 <- AA + GET_VREG a0, a3 # a0 <- vBB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + addu a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + + +/* ------------------------------ */ + .balign 128 +.L_op_rsub_int_lit8: /* 0xd9 */ +/* File: mips64/op_rsub_int_lit8.S */ +/* File: mips64/binopLit8.S */ + /* + * Generic 32-bit "lit8" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be an MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * CC (a1). Useful for integer division and modulus. + * + * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, + * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, + * shl-int/lit8, shr-int/lit8, ushr-int/lit8 + */ + /* binop/lit8 vAA, vBB, #+CC */ + lbu a3, 2(rPC) # a3 <- BB + lb a1, 3(rPC) # a1 <- sign-extended CC + srl a2, rINST, 8 # a2 <- AA + GET_VREG a0, a3 # a0 <- vBB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + subu a0, a1, a0 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + + +/* ------------------------------ */ + .balign 128 +.L_op_mul_int_lit8: /* 0xda */ +/* File: mips64/op_mul_int_lit8.S */ +/* File: mips64/binopLit8.S */ + /* + * Generic 32-bit "lit8" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be an MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * CC (a1). Useful for integer division and modulus. + * + * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, + * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, + * shl-int/lit8, shr-int/lit8, ushr-int/lit8 + */ + /* binop/lit8 vAA, vBB, #+CC */ + lbu a3, 2(rPC) # a3 <- BB + lb a1, 3(rPC) # a1 <- sign-extended CC + srl a2, rINST, 8 # a2 <- AA + GET_VREG a0, a3 # a0 <- vBB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + mul a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + + +/* ------------------------------ */ + .balign 128 +.L_op_div_int_lit8: /* 0xdb */ +/* File: mips64/op_div_int_lit8.S */ +/* File: mips64/binopLit8.S */ + /* + * Generic 32-bit "lit8" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be an MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * CC (a1). Useful for integer division and modulus. + * + * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, + * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, + * shl-int/lit8, shr-int/lit8, ushr-int/lit8 + */ + /* binop/lit8 vAA, vBB, #+CC */ + lbu a3, 2(rPC) # a3 <- BB + lb a1, 3(rPC) # a1 <- sign-extended CC + srl a2, rINST, 8 # a2 <- AA + GET_VREG a0, a3 # a0 <- vBB + .if 1 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + div a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + + +/* ------------------------------ */ + .balign 128 +.L_op_rem_int_lit8: /* 0xdc */ +/* File: mips64/op_rem_int_lit8.S */ +/* File: mips64/binopLit8.S */ + /* + * Generic 32-bit "lit8" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be an MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * CC (a1). Useful for integer division and modulus. + * + * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, + * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, + * shl-int/lit8, shr-int/lit8, ushr-int/lit8 + */ + /* binop/lit8 vAA, vBB, #+CC */ + lbu a3, 2(rPC) # a3 <- BB + lb a1, 3(rPC) # a1 <- sign-extended CC + srl a2, rINST, 8 # a2 <- AA + GET_VREG a0, a3 # a0 <- vBB + .if 1 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + mod a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + + +/* ------------------------------ */ + .balign 128 +.L_op_and_int_lit8: /* 0xdd */ +/* File: mips64/op_and_int_lit8.S */ +/* File: mips64/binopLit8.S */ + /* + * Generic 32-bit "lit8" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be an MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * CC (a1). Useful for integer division and modulus. + * + * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, + * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, + * shl-int/lit8, shr-int/lit8, ushr-int/lit8 + */ + /* binop/lit8 vAA, vBB, #+CC */ + lbu a3, 2(rPC) # a3 <- BB + lb a1, 3(rPC) # a1 <- sign-extended CC + srl a2, rINST, 8 # a2 <- AA + GET_VREG a0, a3 # a0 <- vBB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + and a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + + +/* ------------------------------ */ + .balign 128 +.L_op_or_int_lit8: /* 0xde */ +/* File: mips64/op_or_int_lit8.S */ +/* File: mips64/binopLit8.S */ + /* + * Generic 32-bit "lit8" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be an MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * CC (a1). Useful for integer division and modulus. + * + * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, + * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, + * shl-int/lit8, shr-int/lit8, ushr-int/lit8 + */ + /* binop/lit8 vAA, vBB, #+CC */ + lbu a3, 2(rPC) # a3 <- BB + lb a1, 3(rPC) # a1 <- sign-extended CC + srl a2, rINST, 8 # a2 <- AA + GET_VREG a0, a3 # a0 <- vBB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + or a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + + +/* ------------------------------ */ + .balign 128 +.L_op_xor_int_lit8: /* 0xdf */ +/* File: mips64/op_xor_int_lit8.S */ +/* File: mips64/binopLit8.S */ + /* + * Generic 32-bit "lit8" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be an MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * CC (a1). Useful for integer division and modulus. + * + * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, + * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, + * shl-int/lit8, shr-int/lit8, ushr-int/lit8 + */ + /* binop/lit8 vAA, vBB, #+CC */ + lbu a3, 2(rPC) # a3 <- BB + lb a1, 3(rPC) # a1 <- sign-extended CC + srl a2, rINST, 8 # a2 <- AA + GET_VREG a0, a3 # a0 <- vBB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + xor a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + + +/* ------------------------------ */ + .balign 128 +.L_op_shl_int_lit8: /* 0xe0 */ +/* File: mips64/op_shl_int_lit8.S */ +/* File: mips64/binopLit8.S */ + /* + * Generic 32-bit "lit8" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be an MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * CC (a1). Useful for integer division and modulus. + * + * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, + * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, + * shl-int/lit8, shr-int/lit8, ushr-int/lit8 + */ + /* binop/lit8 vAA, vBB, #+CC */ + lbu a3, 2(rPC) # a3 <- BB + lb a1, 3(rPC) # a1 <- sign-extended CC + srl a2, rINST, 8 # a2 <- AA + GET_VREG a0, a3 # a0 <- vBB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + sll a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + + +/* ------------------------------ */ + .balign 128 +.L_op_shr_int_lit8: /* 0xe1 */ +/* File: mips64/op_shr_int_lit8.S */ +/* File: mips64/binopLit8.S */ + /* + * Generic 32-bit "lit8" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be an MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * CC (a1). Useful for integer division and modulus. + * + * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, + * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, + * shl-int/lit8, shr-int/lit8, ushr-int/lit8 + */ + /* binop/lit8 vAA, vBB, #+CC */ + lbu a3, 2(rPC) # a3 <- BB + lb a1, 3(rPC) # a1 <- sign-extended CC + srl a2, rINST, 8 # a2 <- AA + GET_VREG a0, a3 # a0 <- vBB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + sra a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + + +/* ------------------------------ */ + .balign 128 +.L_op_ushr_int_lit8: /* 0xe2 */ +/* File: mips64/op_ushr_int_lit8.S */ +/* File: mips64/binopLit8.S */ + /* + * Generic 32-bit "lit8" binary operation. Provide an "instr" line + * that specifies an instruction that performs "result = a0 op a1". + * This could be an MIPS instruction or a function call. (If the result + * comes back in a register other than a0, you can override "result".) + * + * If "chkzero" is set to 1, we perform a divide-by-zero check on + * CC (a1). Useful for integer division and modulus. + * + * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, + * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, + * shl-int/lit8, shr-int/lit8, ushr-int/lit8 + */ + /* binop/lit8 vAA, vBB, #+CC */ + lbu a3, 2(rPC) # a3 <- BB + lb a1, 3(rPC) # a1 <- sign-extended CC + srl a2, rINST, 8 # a2 <- AA + GET_VREG a0, a3 # a0 <- vBB + .if 0 + beqz a1, common_errDivideByZero # is second operand zero? + .endif + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + # optional op + srl a0, a0, a1 # a0 <- op, a0-a3 changed + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG a0, a2 # vAA <- a0 + GOTO_OPCODE v0 # jump to next instruction + + + +/* ------------------------------ */ + .balign 128 +.L_op_iget_quick: /* 0xe3 */ +/* File: mips64/op_iget_quick.S */ + /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */ + /* op vA, vB, offset//CCCC */ + srl a2, rINST, 12 # a2 <- B + lhu a1, 2(rPC) # a1 <- field byte offset + GET_VREG_U a3, a2 # a3 <- object we're operating on + ext a4, rINST, 8, 4 # a4 <- A + daddu a1, a1, a3 + beqz a3, common_errNullObject # object was null + lw a0, 0(a1) # a0 <- obj.field + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + SET_VREG a0, a4 # fp[A] <- a0 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_iget_wide_quick: /* 0xe4 */ +/* File: mips64/op_iget_wide_quick.S */ + /* iget-wide-quick vA, vB, offset//CCCC */ + srl a2, rINST, 12 # a2 <- B + lhu a4, 2(rPC) # a4 <- field byte offset + GET_VREG_U a3, a2 # a3 <- object we're operating on + ext a2, rINST, 8, 4 # a2 <- A + beqz a3, common_errNullObject # object was null + daddu a4, a3, a4 # create direct pointer + lw a0, 0(a4) + lw a1, 4(a4) + dinsu a0, a1, 32, 32 + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + SET_VREG_WIDE a0, a2 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_iget_object_quick: /* 0xe5 */ +/* File: mips64/op_iget_object_quick.S */ + /* For: iget-object-quick */ + /* op vA, vB, offset//CCCC */ + .extern artIGetObjectFromMterp + srl a2, rINST, 12 # a2 <- B + lhu a1, 2(rPC) # a1 <- field byte offset + EXPORT_PC + GET_VREG_U a0, a2 # a0 <- object we're operating on + jal artIGetObjectFromMterp # (obj, offset) + ld a3, THREAD_EXCEPTION_OFFSET(rSELF) + ext a2, rINST, 8, 4 # a2 <- A + PREFETCH_INST 2 + bnez a3, MterpPossibleException # bail out + SET_VREG_OBJECT v0, a2 # fp[A] <- v0 + ADVANCE 2 # advance rPC + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_iput_quick: /* 0xe6 */ +/* File: mips64/op_iput_quick.S */ + /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */ + /* op vA, vB, offset//CCCC */ + srl a2, rINST, 12 # a2 <- B + lhu a1, 2(rPC) # a1 <- field byte offset + GET_VREG_U a3, a2 # a3 <- fp[B], the object pointer + ext a2, rINST, 8, 4 # a2 <- A + beqz a3, common_errNullObject # object was null + GET_VREG a0, a2 # a0 <- fp[A] + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + daddu a1, a1, a3 + sw a0, 0(a1) # obj.field <- a0 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_iput_wide_quick: /* 0xe7 */ +/* File: mips64/op_iput_wide_quick.S */ + /* iput-wide-quick vA, vB, offset//CCCC */ + srl a2, rINST, 12 # a2 <- B + lhu a3, 2(rPC) # a3 <- field byte offset + GET_VREG_U a2, a2 # a2 <- fp[B], the object pointer + ext a0, rINST, 8, 4 # a0 <- A + beqz a2, common_errNullObject # object was null + GET_VREG_WIDE a0, a0 # a0 <- fp[A] + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + daddu a1, a2, a3 # create a direct pointer + sw a0, 0(a1) + dsrl32 a0, a0, 0 + sw a0, 4(a1) + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_iput_object_quick: /* 0xe8 */ +/* File: mips64/op_iput_object_quick.S */ + .extern MterpIputObjectQuick + EXPORT_PC + daddu a0, rFP, OFF_FP_SHADOWFRAME + move a1, rPC + move a2, rINST + jal MterpIputObjectQuick + beqzc v0, MterpException + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* ------------------------------ */ + .balign 128 +.L_op_invoke_virtual_quick: /* 0xe9 */ +/* File: mips64/op_invoke_virtual_quick.S */ +/* File: mips64/invoke.S */ + /* + * Generic invoke handler wrapper. + */ + /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ + /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ + .extern MterpInvokeVirtualQuick + .extern MterpShouldSwitchInterpreters + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rPC + move a3, rINST + jal MterpInvokeVirtualQuick + beqzc v0, MterpException + FETCH_ADVANCE_INST 3 + jal MterpShouldSwitchInterpreters + bnezc v0, MterpFallback + GET_INST_OPCODE v0 + GOTO_OPCODE v0 + + +/* ------------------------------ */ + .balign 128 +.L_op_invoke_virtual_range_quick: /* 0xea */ +/* File: mips64/op_invoke_virtual_range_quick.S */ +/* File: mips64/invoke.S */ + /* + * Generic invoke handler wrapper. + */ + /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ + /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ + .extern MterpInvokeVirtualQuickRange + .extern MterpShouldSwitchInterpreters + EXPORT_PC + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rPC + move a3, rINST + jal MterpInvokeVirtualQuickRange + beqzc v0, MterpException + FETCH_ADVANCE_INST 3 + jal MterpShouldSwitchInterpreters + bnezc v0, MterpFallback + GET_INST_OPCODE v0 + GOTO_OPCODE v0 + + +/* ------------------------------ */ + .balign 128 +.L_op_iput_boolean_quick: /* 0xeb */ +/* File: mips64/op_iput_boolean_quick.S */ +/* File: mips64/op_iput_quick.S */ + /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */ + /* op vA, vB, offset//CCCC */ + srl a2, rINST, 12 # a2 <- B + lhu a1, 2(rPC) # a1 <- field byte offset + GET_VREG_U a3, a2 # a3 <- fp[B], the object pointer + ext a2, rINST, 8, 4 # a2 <- A + beqz a3, common_errNullObject # object was null + GET_VREG a0, a2 # a0 <- fp[A] + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + daddu a1, a1, a3 + sb a0, 0(a1) # obj.field <- a0 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_iput_byte_quick: /* 0xec */ +/* File: mips64/op_iput_byte_quick.S */ +/* File: mips64/op_iput_quick.S */ + /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */ + /* op vA, vB, offset//CCCC */ + srl a2, rINST, 12 # a2 <- B + lhu a1, 2(rPC) # a1 <- field byte offset + GET_VREG_U a3, a2 # a3 <- fp[B], the object pointer + ext a2, rINST, 8, 4 # a2 <- A + beqz a3, common_errNullObject # object was null + GET_VREG a0, a2 # a0 <- fp[A] + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + daddu a1, a1, a3 + sb a0, 0(a1) # obj.field <- a0 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_iput_char_quick: /* 0xed */ +/* File: mips64/op_iput_char_quick.S */ +/* File: mips64/op_iput_quick.S */ + /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */ + /* op vA, vB, offset//CCCC */ + srl a2, rINST, 12 # a2 <- B + lhu a1, 2(rPC) # a1 <- field byte offset + GET_VREG_U a3, a2 # a3 <- fp[B], the object pointer + ext a2, rINST, 8, 4 # a2 <- A + beqz a3, common_errNullObject # object was null + GET_VREG a0, a2 # a0 <- fp[A] + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + daddu a1, a1, a3 + sh a0, 0(a1) # obj.field <- a0 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_iput_short_quick: /* 0xee */ +/* File: mips64/op_iput_short_quick.S */ +/* File: mips64/op_iput_quick.S */ + /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */ + /* op vA, vB, offset//CCCC */ + srl a2, rINST, 12 # a2 <- B + lhu a1, 2(rPC) # a1 <- field byte offset + GET_VREG_U a3, a2 # a3 <- fp[B], the object pointer + ext a2, rINST, 8, 4 # a2 <- A + beqz a3, common_errNullObject # object was null + GET_VREG a0, a2 # a0 <- fp[A] + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + daddu a1, a1, a3 + sh a0, 0(a1) # obj.field <- a0 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_iget_boolean_quick: /* 0xef */ +/* File: mips64/op_iget_boolean_quick.S */ +/* File: mips64/op_iget_quick.S */ + /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */ + /* op vA, vB, offset//CCCC */ + srl a2, rINST, 12 # a2 <- B + lhu a1, 2(rPC) # a1 <- field byte offset + GET_VREG_U a3, a2 # a3 <- object we're operating on + ext a4, rINST, 8, 4 # a4 <- A + daddu a1, a1, a3 + beqz a3, common_errNullObject # object was null + lbu a0, 0(a1) # a0 <- obj.field + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + SET_VREG a0, a4 # fp[A] <- a0 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_iget_byte_quick: /* 0xf0 */ +/* File: mips64/op_iget_byte_quick.S */ +/* File: mips64/op_iget_quick.S */ + /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */ + /* op vA, vB, offset//CCCC */ + srl a2, rINST, 12 # a2 <- B + lhu a1, 2(rPC) # a1 <- field byte offset + GET_VREG_U a3, a2 # a3 <- object we're operating on + ext a4, rINST, 8, 4 # a4 <- A + daddu a1, a1, a3 + beqz a3, common_errNullObject # object was null + lb a0, 0(a1) # a0 <- obj.field + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + SET_VREG a0, a4 # fp[A] <- a0 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_iget_char_quick: /* 0xf1 */ +/* File: mips64/op_iget_char_quick.S */ +/* File: mips64/op_iget_quick.S */ + /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */ + /* op vA, vB, offset//CCCC */ + srl a2, rINST, 12 # a2 <- B + lhu a1, 2(rPC) # a1 <- field byte offset + GET_VREG_U a3, a2 # a3 <- object we're operating on + ext a4, rINST, 8, 4 # a4 <- A + daddu a1, a1, a3 + beqz a3, common_errNullObject # object was null + lhu a0, 0(a1) # a0 <- obj.field + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + SET_VREG a0, a4 # fp[A] <- a0 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_iget_short_quick: /* 0xf2 */ +/* File: mips64/op_iget_short_quick.S */ +/* File: mips64/op_iget_quick.S */ + /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */ + /* op vA, vB, offset//CCCC */ + srl a2, rINST, 12 # a2 <- B + lhu a1, 2(rPC) # a1 <- field byte offset + GET_VREG_U a3, a2 # a3 <- object we're operating on + ext a4, rINST, 8, 4 # a4 <- A + daddu a1, a1, a3 + beqz a3, common_errNullObject # object was null + lh a0, 0(a1) # a0 <- obj.field + FETCH_ADVANCE_INST 2 # advance rPC, load rINST + SET_VREG a0, a4 # fp[A] <- a0 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + + +/* ------------------------------ */ + .balign 128 +.L_op_invoke_lambda: /* 0xf3 */ +/* Transfer stub to alternate interpreter */ + b MterpFallback + +/* ------------------------------ */ + .balign 128 +.L_op_unused_f4: /* 0xf4 */ +/* File: mips64/op_unused_f4.S */ +/* File: mips64/unused.S */ +/* + * Bail to reference interpreter to throw. + */ + b MterpFallback + + +/* ------------------------------ */ + .balign 128 +.L_op_capture_variable: /* 0xf5 */ +/* Transfer stub to alternate interpreter */ + b MterpFallback + +/* ------------------------------ */ + .balign 128 +.L_op_create_lambda: /* 0xf6 */ +/* Transfer stub to alternate interpreter */ + b MterpFallback + +/* ------------------------------ */ + .balign 128 +.L_op_liberate_variable: /* 0xf7 */ +/* Transfer stub to alternate interpreter */ + b MterpFallback + +/* ------------------------------ */ + .balign 128 +.L_op_box_lambda: /* 0xf8 */ +/* Transfer stub to alternate interpreter */ + b MterpFallback + +/* ------------------------------ */ + .balign 128 +.L_op_unbox_lambda: /* 0xf9 */ +/* Transfer stub to alternate interpreter */ + b MterpFallback + +/* ------------------------------ */ + .balign 128 +.L_op_unused_fa: /* 0xfa */ +/* File: mips64/op_unused_fa.S */ +/* File: mips64/unused.S */ +/* + * Bail to reference interpreter to throw. + */ + b MterpFallback + + +/* ------------------------------ */ + .balign 128 +.L_op_unused_fb: /* 0xfb */ +/* File: mips64/op_unused_fb.S */ +/* File: mips64/unused.S */ +/* + * Bail to reference interpreter to throw. + */ + b MterpFallback + + +/* ------------------------------ */ + .balign 128 +.L_op_unused_fc: /* 0xfc */ +/* File: mips64/op_unused_fc.S */ +/* File: mips64/unused.S */ +/* + * Bail to reference interpreter to throw. + */ + b MterpFallback + + +/* ------------------------------ */ + .balign 128 +.L_op_unused_fd: /* 0xfd */ +/* File: mips64/op_unused_fd.S */ +/* File: mips64/unused.S */ +/* + * Bail to reference interpreter to throw. + */ + b MterpFallback + + +/* ------------------------------ */ + .balign 128 +.L_op_unused_fe: /* 0xfe */ +/* File: mips64/op_unused_fe.S */ +/* File: mips64/unused.S */ +/* + * Bail to reference interpreter to throw. + */ + b MterpFallback + + +/* ------------------------------ */ + .balign 128 +.L_op_unused_ff: /* 0xff */ +/* File: mips64/op_unused_ff.S */ +/* File: mips64/unused.S */ +/* + * Bail to reference interpreter to throw. + */ + b MterpFallback + + + .balign 128 + .size artMterpAsmInstructionStart, .-artMterpAsmInstructionStart + .global artMterpAsmInstructionEnd +artMterpAsmInstructionEnd: + +/* + * =========================================================================== + * Sister implementations + * =========================================================================== + */ + .global artMterpAsmSisterStart + .type artMterpAsmSisterStart, %function + .text + .balign 4 +artMterpAsmSisterStart: + +/* continuation for op_float_to_int */ +.Lop_float_to_int_trunc: + trunc.w.s f0, f0 + mfc1 t0, f0 +.Lop_float_to_int_done: + /* Can't include fcvtFooter.S after break */ + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG t0, a1 + GOTO_OPCODE v0 # jump to next instruction + +/* continuation for op_float_to_long */ +.Lop_float_to_long_trunc: + trunc.l.s f0, f0 + dmfc1 t0, f0 +.Lop_float_to_long_done: + /* Can't include fcvtFooter.S after break */ + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE t0, a1 + GOTO_OPCODE v0 # jump to next instruction + +/* continuation for op_double_to_int */ +.Lop_double_to_int_trunc: + trunc.w.d f0, f0 + mfc1 t0, f0 +.Lop_double_to_int_done: + /* Can't include fcvtFooter.S after break */ + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG t0, a1 + GOTO_OPCODE v0 # jump to next instruction + +/* continuation for op_double_to_long */ +.Lop_double_to_long_trunc: + trunc.l.d f0, f0 + dmfc1 t0, f0 +.Lop_double_to_long_done: + /* Can't include fcvtFooter.S after break */ + GET_INST_OPCODE v0 # extract opcode from rINST + SET_VREG_WIDE t0, a1 + GOTO_OPCODE v0 # jump to next instruction + + .size artMterpAsmSisterStart, .-artMterpAsmSisterStart + .global artMterpAsmSisterEnd +artMterpAsmSisterEnd: + + + .global artMterpAsmAltInstructionStart + .type artMterpAsmAltInstructionStart, %function + .text + +artMterpAsmAltInstructionStart = .L_ALT_op_nop +/* ------------------------------ */ + .balign 128 +.L_ALT_op_nop: /* 0x00 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (0 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_move: /* 0x01 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (1 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_move_from16: /* 0x02 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (2 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_move_16: /* 0x03 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (3 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_move_wide: /* 0x04 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (4 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_move_wide_from16: /* 0x05 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (5 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_move_wide_16: /* 0x06 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (6 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_move_object: /* 0x07 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (7 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_move_object_from16: /* 0x08 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (8 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_move_object_16: /* 0x09 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (9 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_move_result: /* 0x0a */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (10 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_move_result_wide: /* 0x0b */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (11 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_move_result_object: /* 0x0c */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (12 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_move_exception: /* 0x0d */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (13 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_return_void: /* 0x0e */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (14 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_return: /* 0x0f */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (15 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_return_wide: /* 0x10 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (16 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_return_object: /* 0x11 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (17 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_const_4: /* 0x12 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (18 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_const_16: /* 0x13 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (19 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_const: /* 0x14 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (20 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_const_high16: /* 0x15 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (21 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_const_wide_16: /* 0x16 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (22 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_const_wide_32: /* 0x17 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (23 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_const_wide: /* 0x18 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (24 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_const_wide_high16: /* 0x19 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (25 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_const_string: /* 0x1a */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (26 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_const_string_jumbo: /* 0x1b */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (27 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_const_class: /* 0x1c */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (28 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_monitor_enter: /* 0x1d */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (29 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_monitor_exit: /* 0x1e */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (30 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_check_cast: /* 0x1f */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (31 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_instance_of: /* 0x20 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (32 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_array_length: /* 0x21 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (33 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_new_instance: /* 0x22 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (34 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_new_array: /* 0x23 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (35 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_filled_new_array: /* 0x24 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (36 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_filled_new_array_range: /* 0x25 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (37 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_fill_array_data: /* 0x26 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (38 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_throw: /* 0x27 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (39 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_goto: /* 0x28 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (40 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_goto_16: /* 0x29 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (41 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_goto_32: /* 0x2a */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (42 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_packed_switch: /* 0x2b */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (43 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_sparse_switch: /* 0x2c */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (44 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_cmpl_float: /* 0x2d */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (45 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_cmpg_float: /* 0x2e */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (46 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_cmpl_double: /* 0x2f */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (47 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_cmpg_double: /* 0x30 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (48 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_cmp_long: /* 0x31 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (49 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_if_eq: /* 0x32 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (50 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_if_ne: /* 0x33 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (51 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_if_lt: /* 0x34 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (52 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_if_ge: /* 0x35 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (53 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_if_gt: /* 0x36 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (54 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_if_le: /* 0x37 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (55 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_if_eqz: /* 0x38 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (56 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_if_nez: /* 0x39 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (57 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_if_ltz: /* 0x3a */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (58 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_if_gez: /* 0x3b */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (59 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_if_gtz: /* 0x3c */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (60 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_if_lez: /* 0x3d */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (61 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_unused_3e: /* 0x3e */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (62 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_unused_3f: /* 0x3f */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (63 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_unused_40: /* 0x40 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (64 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_unused_41: /* 0x41 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (65 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_unused_42: /* 0x42 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (66 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_unused_43: /* 0x43 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (67 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_aget: /* 0x44 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (68 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_aget_wide: /* 0x45 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (69 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_aget_object: /* 0x46 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (70 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_aget_boolean: /* 0x47 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (71 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_aget_byte: /* 0x48 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (72 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_aget_char: /* 0x49 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (73 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_aget_short: /* 0x4a */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (74 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_aput: /* 0x4b */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (75 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_aput_wide: /* 0x4c */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (76 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_aput_object: /* 0x4d */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (77 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_aput_boolean: /* 0x4e */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (78 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_aput_byte: /* 0x4f */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (79 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_aput_char: /* 0x50 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (80 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_aput_short: /* 0x51 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (81 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iget: /* 0x52 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (82 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iget_wide: /* 0x53 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (83 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iget_object: /* 0x54 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (84 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iget_boolean: /* 0x55 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (85 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iget_byte: /* 0x56 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (86 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iget_char: /* 0x57 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (87 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iget_short: /* 0x58 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (88 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iput: /* 0x59 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (89 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iput_wide: /* 0x5a */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (90 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iput_object: /* 0x5b */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (91 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iput_boolean: /* 0x5c */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (92 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iput_byte: /* 0x5d */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (93 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iput_char: /* 0x5e */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (94 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iput_short: /* 0x5f */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (95 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_sget: /* 0x60 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (96 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_sget_wide: /* 0x61 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (97 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_sget_object: /* 0x62 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (98 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_sget_boolean: /* 0x63 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (99 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_sget_byte: /* 0x64 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (100 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_sget_char: /* 0x65 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (101 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_sget_short: /* 0x66 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (102 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_sput: /* 0x67 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (103 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_sput_wide: /* 0x68 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (104 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_sput_object: /* 0x69 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (105 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_sput_boolean: /* 0x6a */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (106 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_sput_byte: /* 0x6b */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (107 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_sput_char: /* 0x6c */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (108 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_sput_short: /* 0x6d */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (109 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_invoke_virtual: /* 0x6e */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (110 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_invoke_super: /* 0x6f */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (111 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_invoke_direct: /* 0x70 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (112 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_invoke_static: /* 0x71 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (113 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_invoke_interface: /* 0x72 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (114 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_return_void_no_barrier: /* 0x73 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (115 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_invoke_virtual_range: /* 0x74 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (116 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_invoke_super_range: /* 0x75 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (117 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_invoke_direct_range: /* 0x76 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (118 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_invoke_static_range: /* 0x77 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (119 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_invoke_interface_range: /* 0x78 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (120 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_unused_79: /* 0x79 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (121 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_unused_7a: /* 0x7a */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (122 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_neg_int: /* 0x7b */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (123 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_not_int: /* 0x7c */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (124 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_neg_long: /* 0x7d */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (125 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_not_long: /* 0x7e */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (126 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_neg_float: /* 0x7f */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (127 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_neg_double: /* 0x80 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (128 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_int_to_long: /* 0x81 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (129 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_int_to_float: /* 0x82 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (130 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_int_to_double: /* 0x83 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (131 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_long_to_int: /* 0x84 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (132 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_long_to_float: /* 0x85 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (133 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_long_to_double: /* 0x86 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (134 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_float_to_int: /* 0x87 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (135 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_float_to_long: /* 0x88 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (136 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_float_to_double: /* 0x89 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (137 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_double_to_int: /* 0x8a */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (138 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_double_to_long: /* 0x8b */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (139 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_double_to_float: /* 0x8c */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (140 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_int_to_byte: /* 0x8d */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (141 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_int_to_char: /* 0x8e */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (142 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_int_to_short: /* 0x8f */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (143 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_add_int: /* 0x90 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (144 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_sub_int: /* 0x91 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (145 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_mul_int: /* 0x92 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (146 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_div_int: /* 0x93 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (147 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_rem_int: /* 0x94 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (148 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_and_int: /* 0x95 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (149 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_or_int: /* 0x96 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (150 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_xor_int: /* 0x97 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (151 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_shl_int: /* 0x98 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (152 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_shr_int: /* 0x99 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (153 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_ushr_int: /* 0x9a */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (154 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_add_long: /* 0x9b */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (155 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_sub_long: /* 0x9c */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (156 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_mul_long: /* 0x9d */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (157 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_div_long: /* 0x9e */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (158 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_rem_long: /* 0x9f */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (159 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_and_long: /* 0xa0 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (160 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_or_long: /* 0xa1 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (161 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_xor_long: /* 0xa2 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (162 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_shl_long: /* 0xa3 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (163 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_shr_long: /* 0xa4 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (164 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_ushr_long: /* 0xa5 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (165 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_add_float: /* 0xa6 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (166 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_sub_float: /* 0xa7 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (167 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_mul_float: /* 0xa8 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (168 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_div_float: /* 0xa9 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (169 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_rem_float: /* 0xaa */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (170 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_add_double: /* 0xab */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (171 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_sub_double: /* 0xac */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (172 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_mul_double: /* 0xad */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (173 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_div_double: /* 0xae */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (174 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_rem_double: /* 0xaf */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (175 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_add_int_2addr: /* 0xb0 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (176 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_sub_int_2addr: /* 0xb1 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (177 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_mul_int_2addr: /* 0xb2 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (178 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_div_int_2addr: /* 0xb3 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (179 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_rem_int_2addr: /* 0xb4 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (180 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_and_int_2addr: /* 0xb5 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (181 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_or_int_2addr: /* 0xb6 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (182 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_xor_int_2addr: /* 0xb7 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (183 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_shl_int_2addr: /* 0xb8 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (184 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_shr_int_2addr: /* 0xb9 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (185 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_ushr_int_2addr: /* 0xba */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (186 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_add_long_2addr: /* 0xbb */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (187 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_sub_long_2addr: /* 0xbc */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (188 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_mul_long_2addr: /* 0xbd */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (189 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_div_long_2addr: /* 0xbe */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (190 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_rem_long_2addr: /* 0xbf */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (191 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_and_long_2addr: /* 0xc0 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (192 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_or_long_2addr: /* 0xc1 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (193 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_xor_long_2addr: /* 0xc2 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (194 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_shl_long_2addr: /* 0xc3 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (195 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_shr_long_2addr: /* 0xc4 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (196 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_ushr_long_2addr: /* 0xc5 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (197 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_add_float_2addr: /* 0xc6 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (198 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_sub_float_2addr: /* 0xc7 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (199 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_mul_float_2addr: /* 0xc8 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (200 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_div_float_2addr: /* 0xc9 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (201 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_rem_float_2addr: /* 0xca */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (202 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_add_double_2addr: /* 0xcb */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (203 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_sub_double_2addr: /* 0xcc */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (204 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_mul_double_2addr: /* 0xcd */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (205 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_div_double_2addr: /* 0xce */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (206 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_rem_double_2addr: /* 0xcf */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (207 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_add_int_lit16: /* 0xd0 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (208 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_rsub_int: /* 0xd1 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (209 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_mul_int_lit16: /* 0xd2 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (210 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_div_int_lit16: /* 0xd3 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (211 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_rem_int_lit16: /* 0xd4 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (212 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_and_int_lit16: /* 0xd5 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (213 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_or_int_lit16: /* 0xd6 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (214 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_xor_int_lit16: /* 0xd7 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (215 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_add_int_lit8: /* 0xd8 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (216 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_rsub_int_lit8: /* 0xd9 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (217 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_mul_int_lit8: /* 0xda */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (218 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_div_int_lit8: /* 0xdb */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (219 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_rem_int_lit8: /* 0xdc */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (220 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_and_int_lit8: /* 0xdd */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (221 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_or_int_lit8: /* 0xde */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (222 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_xor_int_lit8: /* 0xdf */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (223 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_shl_int_lit8: /* 0xe0 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (224 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_shr_int_lit8: /* 0xe1 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (225 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_ushr_int_lit8: /* 0xe2 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (226 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iget_quick: /* 0xe3 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (227 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iget_wide_quick: /* 0xe4 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (228 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iget_object_quick: /* 0xe5 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (229 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iput_quick: /* 0xe6 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (230 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iput_wide_quick: /* 0xe7 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (231 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iput_object_quick: /* 0xe8 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (232 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_invoke_virtual_quick: /* 0xe9 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (233 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_invoke_virtual_range_quick: /* 0xea */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (234 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iput_boolean_quick: /* 0xeb */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (235 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iput_byte_quick: /* 0xec */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (236 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iput_char_quick: /* 0xed */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (237 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iput_short_quick: /* 0xee */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (238 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iget_boolean_quick: /* 0xef */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (239 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iget_byte_quick: /* 0xf0 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (240 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iget_char_quick: /* 0xf1 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (241 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_iget_short_quick: /* 0xf2 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (242 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_invoke_lambda: /* 0xf3 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (243 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_unused_f4: /* 0xf4 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (244 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_capture_variable: /* 0xf5 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (245 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_create_lambda: /* 0xf6 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (246 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_liberate_variable: /* 0xf7 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (247 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_box_lambda: /* 0xf8 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (248 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_unbox_lambda: /* 0xf9 */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (249 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_unused_fa: /* 0xfa */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (250 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_unused_fb: /* 0xfb */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (251 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_unused_fc: /* 0xfc */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (252 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_unused_fd: /* 0xfd */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (253 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_unused_fe: /* 0xfe */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (254 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + +/* ------------------------------ */ + .balign 128 +.L_ALT_op_unused_ff: /* 0xff */ +/* File: mips64/alt_stub.S */ +/* + * Inter-instruction transfer stub. Call out to MterpCheckBefore to handle + * any interesting requests and then jump to the real instruction + * handler. Note that the call to MterpCheckBefore is done as a tail call. + */ + .extern MterpCheckBefore + EXPORT_PC + REFRESH_IBASE + dla ra, artMterpAsmInstructionStart + dla t9, MterpCheckBefore + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + daddu ra, ra, (255 * 128) # Addr of primary handler. + jalr zero, t9 # (self, shadow_frame) Note: tail call. + + .balign 128 + .size artMterpAsmAltInstructionStart, .-artMterpAsmAltInstructionStart + .global artMterpAsmAltInstructionEnd +artMterpAsmAltInstructionEnd: +/* File: mips64/footer.S */ +/* + * We've detected a condition that will result in an exception, but the exception + * has not yet been thrown. Just bail out to the reference interpreter to deal with it. + * TUNING: for consistency, we may want to just go ahead and handle these here. + */ + + .extern MterpLogDivideByZeroException +common_errDivideByZero: + EXPORT_PC +#if MTERP_LOGGING + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + jal MterpLogDivideByZeroException +#endif + b MterpCommonFallback + + .extern MterpLogArrayIndexException +common_errArrayIndex: + EXPORT_PC +#if MTERP_LOGGING + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + jal MterpLogArrayIndexException +#endif + b MterpCommonFallback + + .extern MterpLogNullObjectException +common_errNullObject: + EXPORT_PC +#if MTERP_LOGGING + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + jal MterpLogNullObjectException +#endif + b MterpCommonFallback + +/* + * If we're here, something is out of the ordinary. If there is a pending + * exception, handle it. Otherwise, roll back and retry with the reference + * interpreter. + */ +MterpPossibleException: + ld a0, THREAD_EXCEPTION_OFFSET(rSELF) + beqzc a0, MterpFallback # If not, fall back to reference interpreter. + /* intentional fallthrough - handle pending exception. */ +/* + * On return from a runtime helper routine, we've found a pending exception. + * Can we handle it here - or need to bail out to caller? + * + */ + .extern MterpHandleException + .extern MterpShouldSwitchInterpreters +MterpException: + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + jal MterpHandleException # (self, shadow_frame) + beqzc v0, MterpExceptionReturn # no local catch, back to caller. + ld a0, OFF_FP_CODE_ITEM(rFP) + lwu a1, OFF_FP_DEX_PC(rFP) + REFRESH_IBASE + daddu rPC, a0, CODEITEM_INSNS_OFFSET + dlsa rPC, a1, rPC, 1 # generate new dex_pc_ptr + /* Do we need to switch interpreters? */ + jal MterpShouldSwitchInterpreters + bnezc v0, MterpFallback + /* resume execution at catch block */ + EXPORT_PC + FETCH_INST + GET_INST_OPCODE v0 + GOTO_OPCODE v0 + /* NOTE: no fallthrough */ + +/* + * Check for suspend check request. Assumes rINST already loaded, rPC advanced and + * still needs to get the opcode and branch to it, and flags are in ra. + */ + .extern MterpSuspendCheck +MterpCheckSuspendAndContinue: + REFRESH_IBASE + and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + bnez ra, check1 + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction +check1: + EXPORT_PC + move a0, rSELF + jal MterpSuspendCheck # (self) + bnezc v0, MterpFallback # Something in the environment changed, switch interpreters + GET_INST_OPCODE v0 # extract opcode from rINST + GOTO_OPCODE v0 # jump to next instruction + +/* + * On-stack replacement has happened, and now we've returned from the compiled method. + */ +MterpOnStackReplacement: +#if MTERP_LOGGING + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + move a2, rINST # rINST contains offset + jal MterpLogOSR +#endif + li v0, 1 # Signal normal return + b MterpDone + +/* + * Bail out to reference interpreter. + */ + .extern MterpLogFallback +MterpFallback: + EXPORT_PC +#if MTERP_LOGGING + move a0, rSELF + daddu a1, rFP, OFF_FP_SHADOWFRAME + jal MterpLogFallback +#endif +MterpCommonFallback: + li v0, 0 # signal retry with reference interpreter. + b MterpDone + +/* + * We pushed some registers on the stack in ExecuteMterpImpl, then saved + * SP and RA. Here we restore SP, restore the registers, and then restore + * RA to PC. + * + * On entry: + * uint32_t* rFP (should still be live, pointer to base of vregs) + */ +MterpExceptionReturn: + li v0, 1 # signal return to caller. + b MterpDone +/* + * Returned value is expected in a0 and if it's not 64-bit, the 32 most + * significant bits of a0 must be 0. + */ +MterpReturn: + ld a2, OFF_FP_RESULT_REGISTER(rFP) + lw ra, THREAD_FLAGS_OFFSET(rSELF) + sd a0, 0(a2) + move a0, rSELF + and ra, ra, (THREAD_SUSPEND_REQUEST | THREAD_CHECKPOINT_REQUEST) + beqzc ra, check2 + jal MterpSuspendCheck # (self) +check2: + li v0, 1 # signal return to caller. +MterpDone: + ld s5, STACK_OFFSET_S5(sp) + .cfi_restore 21 + ld s4, STACK_OFFSET_S4(sp) + .cfi_restore 20 + ld s3, STACK_OFFSET_S3(sp) + .cfi_restore 19 + ld s2, STACK_OFFSET_S2(sp) + .cfi_restore 18 + ld s1, STACK_OFFSET_S1(sp) + .cfi_restore 17 + ld s0, STACK_OFFSET_S0(sp) + .cfi_restore 16 + + ld ra, STACK_OFFSET_RA(sp) + .cfi_restore 31 + + ld t8, STACK_OFFSET_GP(sp) + .cpreturn + .cfi_restore 28 + + .set noreorder + jr ra + daddu sp, sp, STACK_SIZE + .cfi_adjust_cfa_offset -STACK_SIZE + + .cfi_endproc + .size ExecuteMterpImpl, .-ExecuteMterpImpl + diff --git a/runtime/interpreter/mterp/out/mterp_x86_64.S b/runtime/interpreter/mterp/out/mterp_x86_64.S index 53fa50fc23..a1360e0934 100644 --- a/runtime/interpreter/mterp/out/mterp_x86_64.S +++ b/runtime/interpreter/mterp/out/mterp_x86_64.S @@ -169,13 +169,23 @@ unspecified registers or condition codes. #define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET) #define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET) +#define MTERP_PROFILE_BRANCHES 1 +#define MTERP_LOGGING 0 + /* - * - * The reference interpreter performs explicit suspect checks, which is somewhat wasteful. - * Dalvik's interpreter folded suspend checks into the jump table mechanism, and eventually - * mterp should do so as well. + * Profile branch. rINST should contain the offset. %eax is scratch. */ -#define MTERP_SUSPEND 0 +.macro MTERP_PROFILE_BRANCH +#ifdef MTERP_PROFILE_BRANCHES + EXPORT_PC + movq rSELF, OUT_ARG0 + leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1 + movl rINST, OUT_32_ARG2 + call SYMBOL(MterpProfileBranch) + testb %al, %al + jnz MterpOnStackReplacement +#endif +.endm /* * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must @@ -992,17 +1002,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * double to get a byte offset. */ /* goto +AA */ - movsbq rINSTbl, %rax # rax <- ssssssAA - addq %rax, %rax # rax <- AA * 2 - leaq (rPC, %rax), rPC + movsbq rINSTbl, rINSTq # rINSTq <- ssssssAA + MTERP_PROFILE_BRANCH + addq rINSTq, rINSTq # rINSTq <- AA * 2 + leaq (rPC, rINSTq), rPC FETCH_INST - jg 1f # AA * 2 > 0 => no suspend check -#if MTERP_SUSPEND - REFRESH_IBASE -#else - jmp MterpCheckSuspendAndContinue -#endif -1: + jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check GOTO_NEXT /* ------------------------------ */ @@ -1016,17 +1021,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * double to get a byte offset. */ /* goto/16 +AAAA */ - movswq 2(rPC), %rax # rax <- ssssAAAA - addq %rax, %rax # rax <- AA * 2 - leaq (rPC, %rax), rPC + movswq 2(rPC), rINSTq # rINSTq <- ssssAAAA + MTERP_PROFILE_BRANCH + addq rINSTq, rINSTq # rINSTq <- AA * 2 + leaq (rPC, rINSTq), rPC FETCH_INST - jg 1f # AA * 2 > 0 => no suspend check -#if MTERP_SUSPEND - REFRESH_IBASE -#else - jmp MterpCheckSuspendAndContinue -#endif -1: + jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check GOTO_NEXT /* ------------------------------ */ @@ -1043,17 +1043,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop * to convert from Dalvik offset to byte offset. */ /* goto/32 +AAAAAAAA */ - movslq 2(rPC), %rax # rax <- AAAAAAAA - addq %rax, %rax # rax <- AA * 2 - leaq (rPC, %rax), rPC + movslq 2(rPC), rINSTq # rINSTq <- AAAAAAAA + MTERP_PROFILE_BRANCH + addq rINSTq, rINSTq # rINSTq <- AA * 2 + leaq (rPC, rINSTq), rPC FETCH_INST - jg 1f # AA * 2 > 0 => no suspend check -#if MTERP_SUSPEND - REFRESH_IBASE -#else - jmp MterpCheckSuspendAndContinue -#endif -1: + jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check GOTO_NEXT /* ------------------------------ */ @@ -1074,17 +1069,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop leaq (rPC,OUT_ARG0,2), OUT_ARG0 # rcx <- PC + BBBBbbbb*2 GET_VREG OUT_32_ARG1, rINSTq # eax <- vAA call SYMBOL(MterpDoPackedSwitch) - addl %eax, %eax - movslq %eax, %rax - leaq (rPC, %rax), rPC + movslq %eax, rINSTq + MTERP_PROFILE_BRANCH + addq rINSTq, rINSTq + leaq (rPC, rINSTq), rPC FETCH_INST - jg 1f -#if MTERP_SUSPEND - REFRESH_IBASE -#else - jmp MterpCheckSuspendAndContinue -#endif -1: + jle MterpCheckSuspendAndContinue GOTO_NEXT /* ------------------------------ */ @@ -1106,17 +1096,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop leaq (rPC,OUT_ARG0,2), OUT_ARG0 # rcx <- PC + BBBBbbbb*2 GET_VREG OUT_32_ARG1, rINSTq # eax <- vAA call SYMBOL(MterpDoSparseSwitch) - addl %eax, %eax - movslq %eax, %rax - leaq (rPC, %rax), rPC + movslq %eax, rINSTq + MTERP_PROFILE_BRANCH + addq rINSTq, rINSTq + leaq (rPC, rINSTq), rPC FETCH_INST - jg 1f -#if MTERP_SUSPEND - REFRESH_IBASE -#else - jmp MterpCheckSuspendAndContinue -#endif -1: + jle MterpCheckSuspendAndContinue GOTO_NEXT @@ -1324,20 +1309,15 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop andb $0xf, %cl # rcx <- A GET_VREG %eax, %rcx # eax <- vA cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB) - movl $2, %eax # assume not taken + movl $2, rINST # assume not taken jne 1f - movswq 2(rPC),%rax # Get signed branch offset + movswq 2(rPC), rINSTq # Get signed branch offset 1: - addq %rax, %rax # rax <- AA * 2 - leaq (rPC, %rax), rPC + MTERP_PROFILE_BRANCH + addq rINSTq, rINSTq # rax <- AA * 2 + leaq (rPC, rINSTq), rPC FETCH_INST - jg 2f # AA * 2 > 0 => no suspend check -#if MTERP_SUSPEND - REFRESH_IBASE -#else - jmp MterpCheckSuspendAndContinue -#endif -2: + jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check GOTO_NEXT @@ -1359,20 +1339,15 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop andb $0xf, %cl # rcx <- A GET_VREG %eax, %rcx # eax <- vA cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB) - movl $2, %eax # assume not taken + movl $2, rINST # assume not taken je 1f - movswq 2(rPC),%rax # Get signed branch offset + movswq 2(rPC), rINSTq # Get signed branch offset 1: - addq %rax, %rax # rax <- AA * 2 - leaq (rPC, %rax), rPC + MTERP_PROFILE_BRANCH + addq rINSTq, rINSTq # rax <- AA * 2 + leaq (rPC, rINSTq), rPC FETCH_INST - jg 2f # AA * 2 > 0 => no suspend check -#if MTERP_SUSPEND - REFRESH_IBASE -#else - jmp MterpCheckSuspendAndContinue -#endif -2: + jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check GOTO_NEXT @@ -1394,20 +1369,15 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop andb $0xf, %cl # rcx <- A GET_VREG %eax, %rcx # eax <- vA cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB) - movl $2, %eax # assume not taken + movl $2, rINST # assume not taken jge 1f - movswq 2(rPC),%rax # Get signed branch offset + movswq 2(rPC), rINSTq # Get signed branch offset 1: - addq %rax, %rax # rax <- AA * 2 - leaq (rPC, %rax), rPC + MTERP_PROFILE_BRANCH + addq rINSTq, rINSTq # rax <- AA * 2 + leaq (rPC, rINSTq), rPC FETCH_INST - jg 2f # AA * 2 > 0 => no suspend check -#if MTERP_SUSPEND - REFRESH_IBASE -#else - jmp MterpCheckSuspendAndContinue -#endif -2: + jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check GOTO_NEXT @@ -1429,20 +1399,15 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop andb $0xf, %cl # rcx <- A GET_VREG %eax, %rcx # eax <- vA cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB) - movl $2, %eax # assume not taken + movl $2, rINST # assume not taken jl 1f - movswq 2(rPC),%rax # Get signed branch offset + movswq 2(rPC), rINSTq # Get signed branch offset 1: - addq %rax, %rax # rax <- AA * 2 - leaq (rPC, %rax), rPC + MTERP_PROFILE_BRANCH + addq rINSTq, rINSTq # rax <- AA * 2 + leaq (rPC, rINSTq), rPC FETCH_INST - jg 2f # AA * 2 > 0 => no suspend check -#if MTERP_SUSPEND - REFRESH_IBASE -#else - jmp MterpCheckSuspendAndContinue -#endif -2: + jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check GOTO_NEXT @@ -1464,20 +1429,15 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop andb $0xf, %cl # rcx <- A GET_VREG %eax, %rcx # eax <- vA cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB) - movl $2, %eax # assume not taken + movl $2, rINST # assume not taken jle 1f - movswq 2(rPC),%rax # Get signed branch offset + movswq 2(rPC), rINSTq # Get signed branch offset 1: - addq %rax, %rax # rax <- AA * 2 - leaq (rPC, %rax), rPC + MTERP_PROFILE_BRANCH + addq rINSTq, rINSTq # rax <- AA * 2 + leaq (rPC, rINSTq), rPC FETCH_INST - jg 2f # AA * 2 > 0 => no suspend check -#if MTERP_SUSPEND - REFRESH_IBASE -#else - jmp MterpCheckSuspendAndContinue -#endif -2: + jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check GOTO_NEXT @@ -1499,20 +1459,15 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop andb $0xf, %cl # rcx <- A GET_VREG %eax, %rcx # eax <- vA cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB) - movl $2, %eax # assume not taken + movl $2, rINST # assume not taken jg 1f - movswq 2(rPC),%rax # Get signed branch offset + movswq 2(rPC), rINSTq # Get signed branch offset 1: - addq %rax, %rax # rax <- AA * 2 - leaq (rPC, %rax), rPC + MTERP_PROFILE_BRANCH + addq rINSTq, rINSTq # rax <- AA * 2 + leaq (rPC, rINSTq), rPC FETCH_INST - jg 2f # AA * 2 > 0 => no suspend check -#if MTERP_SUSPEND - REFRESH_IBASE -#else - jmp MterpCheckSuspendAndContinue -#endif -2: + jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check GOTO_NEXT @@ -1530,20 +1485,15 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop */ /* if-cmp vAA, +BBBB */ cmpl $0, VREG_ADDRESS(rINSTq) # compare (vA, 0) - movl $2, %eax # assume branch not taken + movl $2, rINST # assume branch not taken jne 1f - movswq 2(rPC),%rax # fetch signed displacement + movswq 2(rPC), rINSTq # fetch signed displacement 1: - addq %rax, %rax # eax <- AA * 2 - leaq (rPC, %rax), rPC + MTERP_PROFILE_BRANCH + addq rINSTq, rINSTq # rINSTq <- AA * 2 + leaq (rPC, rINSTq), rPC FETCH_INST - jg 2f # AA * 2 > 0 => no suspend check -#if MTERP_SUSPEND - REFRESH_IBASE -#else - jmp MterpCheckSuspendAndContinue -#endif -2: + jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check GOTO_NEXT @@ -1561,20 +1511,15 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop */ /* if-cmp vAA, +BBBB */ cmpl $0, VREG_ADDRESS(rINSTq) # compare (vA, 0) - movl $2, %eax # assume branch not taken + movl $2, rINST # assume branch not taken je 1f - movswq 2(rPC),%rax # fetch signed displacement + movswq 2(rPC), rINSTq # fetch signed displacement 1: - addq %rax, %rax # eax <- AA * 2 - leaq (rPC, %rax), rPC + MTERP_PROFILE_BRANCH + addq rINSTq, rINSTq # rINSTq <- AA * 2 + leaq (rPC, rINSTq), rPC FETCH_INST - jg 2f # AA * 2 > 0 => no suspend check -#if MTERP_SUSPEND - REFRESH_IBASE -#else - jmp MterpCheckSuspendAndContinue -#endif -2: + jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check GOTO_NEXT @@ -1592,20 +1537,15 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop */ /* if-cmp vAA, +BBBB */ cmpl $0, VREG_ADDRESS(rINSTq) # compare (vA, 0) - movl $2, %eax # assume branch not taken + movl $2, rINST # assume branch not taken jge 1f - movswq 2(rPC),%rax # fetch signed displacement + movswq 2(rPC), rINSTq # fetch signed displacement 1: - addq %rax, %rax # eax <- AA * 2 - leaq (rPC, %rax), rPC + MTERP_PROFILE_BRANCH + addq rINSTq, rINSTq # rINSTq <- AA * 2 + leaq (rPC, rINSTq), rPC FETCH_INST - jg 2f # AA * 2 > 0 => no suspend check -#if MTERP_SUSPEND - REFRESH_IBASE -#else - jmp MterpCheckSuspendAndContinue -#endif -2: + jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check GOTO_NEXT @@ -1623,20 +1563,15 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop */ /* if-cmp vAA, +BBBB */ cmpl $0, VREG_ADDRESS(rINSTq) # compare (vA, 0) - movl $2, %eax # assume branch not taken + movl $2, rINST # assume branch not taken jl 1f - movswq 2(rPC),%rax # fetch signed displacement + movswq 2(rPC), rINSTq # fetch signed displacement 1: - addq %rax, %rax # eax <- AA * 2 - leaq (rPC, %rax), rPC + MTERP_PROFILE_BRANCH + addq rINSTq, rINSTq # rINSTq <- AA * 2 + leaq (rPC, rINSTq), rPC FETCH_INST - jg 2f # AA * 2 > 0 => no suspend check -#if MTERP_SUSPEND - REFRESH_IBASE -#else - jmp MterpCheckSuspendAndContinue -#endif -2: + jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check GOTO_NEXT @@ -1654,20 +1589,15 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop */ /* if-cmp vAA, +BBBB */ cmpl $0, VREG_ADDRESS(rINSTq) # compare (vA, 0) - movl $2, %eax # assume branch not taken + movl $2, rINST # assume branch not taken jle 1f - movswq 2(rPC),%rax # fetch signed displacement + movswq 2(rPC), rINSTq # fetch signed displacement 1: - addq %rax, %rax # eax <- AA * 2 - leaq (rPC, %rax), rPC + MTERP_PROFILE_BRANCH + addq rINSTq, rINSTq # rINSTq <- AA * 2 + leaq (rPC, rINSTq), rPC FETCH_INST - jg 2f # AA * 2 > 0 => no suspend check -#if MTERP_SUSPEND - REFRESH_IBASE -#else - jmp MterpCheckSuspendAndContinue -#endif -2: + jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check GOTO_NEXT @@ -1685,20 +1615,15 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop */ /* if-cmp vAA, +BBBB */ cmpl $0, VREG_ADDRESS(rINSTq) # compare (vA, 0) - movl $2, %eax # assume branch not taken + movl $2, rINST # assume branch not taken jg 1f - movswq 2(rPC),%rax # fetch signed displacement + movswq 2(rPC), rINSTq # fetch signed displacement 1: - addq %rax, %rax # eax <- AA * 2 - leaq (rPC, %rax), rPC + MTERP_PROFILE_BRANCH + addq rINSTq, rINSTq # rINSTq <- AA * 2 + leaq (rPC, rINSTq), rPC FETCH_INST - jg 2f # AA * 2 > 0 => no suspend check -#if MTERP_SUSPEND - REFRESH_IBASE -#else - jmp MterpCheckSuspendAndContinue -#endif -2: + jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check GOTO_NEXT @@ -2931,7 +2856,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop call SYMBOL(MterpInvokeVirtual) testb %al, %al jz MterpException - ADVANCE_PC_FETCH_AND_GOTO_NEXT 3 + ADVANCE_PC 3 + call SYMBOL(MterpShouldSwitchInterpreters) + testb %al, %al + jnz MterpFallback + FETCH_INST + GOTO_NEXT /* * Handle a virtual method call. @@ -2961,7 +2891,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop call SYMBOL(MterpInvokeSuper) testb %al, %al jz MterpException - ADVANCE_PC_FETCH_AND_GOTO_NEXT 3 + ADVANCE_PC 3 + call SYMBOL(MterpShouldSwitchInterpreters) + testb %al, %al + jnz MterpFallback + FETCH_INST + GOTO_NEXT /* * Handle a "super" method call. @@ -2991,7 +2926,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop call SYMBOL(MterpInvokeDirect) testb %al, %al jz MterpException - ADVANCE_PC_FETCH_AND_GOTO_NEXT 3 + ADVANCE_PC 3 + call SYMBOL(MterpShouldSwitchInterpreters) + testb %al, %al + jnz MterpFallback + FETCH_INST + GOTO_NEXT /* ------------------------------ */ @@ -3014,7 +2954,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop call SYMBOL(MterpInvokeStatic) testb %al, %al jz MterpException - ADVANCE_PC_FETCH_AND_GOTO_NEXT 3 + ADVANCE_PC 3 + call SYMBOL(MterpShouldSwitchInterpreters) + testb %al, %al + jnz MterpFallback + FETCH_INST + GOTO_NEXT @@ -3038,7 +2983,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop call SYMBOL(MterpInvokeInterface) testb %al, %al jz MterpException - ADVANCE_PC_FETCH_AND_GOTO_NEXT 3 + ADVANCE_PC 3 + call SYMBOL(MterpShouldSwitchInterpreters) + testb %al, %al + jnz MterpFallback + FETCH_INST + GOTO_NEXT /* * Handle an interface method call. @@ -3080,7 +3030,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop call SYMBOL(MterpInvokeVirtualRange) testb %al, %al jz MterpException - ADVANCE_PC_FETCH_AND_GOTO_NEXT 3 + ADVANCE_PC 3 + call SYMBOL(MterpShouldSwitchInterpreters) + testb %al, %al + jnz MterpFallback + FETCH_INST + GOTO_NEXT /* ------------------------------ */ @@ -3103,7 +3058,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop call SYMBOL(MterpInvokeSuperRange) testb %al, %al jz MterpException - ADVANCE_PC_FETCH_AND_GOTO_NEXT 3 + ADVANCE_PC 3 + call SYMBOL(MterpShouldSwitchInterpreters) + testb %al, %al + jnz MterpFallback + FETCH_INST + GOTO_NEXT /* ------------------------------ */ @@ -3126,7 +3086,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop call SYMBOL(MterpInvokeDirectRange) testb %al, %al jz MterpException - ADVANCE_PC_FETCH_AND_GOTO_NEXT 3 + ADVANCE_PC 3 + call SYMBOL(MterpShouldSwitchInterpreters) + testb %al, %al + jnz MterpFallback + FETCH_INST + GOTO_NEXT /* ------------------------------ */ @@ -3149,7 +3114,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop call SYMBOL(MterpInvokeStaticRange) testb %al, %al jz MterpException - ADVANCE_PC_FETCH_AND_GOTO_NEXT 3 + ADVANCE_PC 3 + call SYMBOL(MterpShouldSwitchInterpreters) + testb %al, %al + jnz MterpFallback + FETCH_INST + GOTO_NEXT /* ------------------------------ */ @@ -3172,7 +3142,12 @@ SYMBOL(artMterpAsmInstructionStart) = .L_op_nop call SYMBOL(MterpInvokeInterfaceRange) testb %al, %al jz MterpException - ADVANCE_PC_FETCH_AND_GOTO_NEXT 3 + ADVANCE_PC 3 + call SYMBOL(MterpShouldSwitchInterpreters) + testb %al, %al + jnz MterpFallback + FETCH_INST + GOTO_NEXT /* ------------------------------ */ @@ -5811,7 +5786,12 @@ movswl %ax, %eax call SYMBOL(MterpInvokeVirtualQuick) testb %al, %al jz MterpException - ADVANCE_PC_FETCH_AND_GOTO_NEXT 3 + ADVANCE_PC 3 + call SYMBOL(MterpShouldSwitchInterpreters) + testb %al, %al + jnz MterpFallback + FETCH_INST + GOTO_NEXT /* ------------------------------ */ @@ -5834,7 +5814,12 @@ movswl %ax, %eax call SYMBOL(MterpInvokeVirtualQuickRange) testb %al, %al jz MterpException - ADVANCE_PC_FETCH_AND_GOTO_NEXT 3 + ADVANCE_PC 3 + call SYMBOL(MterpShouldSwitchInterpreters) + testb %al, %al + jnz MterpFallback + FETCH_INST + GOTO_NEXT /* ------------------------------ */ @@ -11805,7 +11790,6 @@ SYMBOL(artMterpAsmAltInstructionEnd): * has not yet been thrown. Just bail out to the reference interpreter to deal with it. * TUNING: for consistency, we may want to just go ahead and handle these here. */ -#define MTERP_LOGGING 0 common_errDivideByZero: EXPORT_PC #if MTERP_LOGGING @@ -11891,13 +11875,17 @@ MterpException: call SYMBOL(MterpHandleException) testb %al, %al jz MterpExceptionReturn - REFRESH_IBASE movq OFF_FP_CODE_ITEM(rFP), %rax mov OFF_FP_DEX_PC(rFP), %ecx leaq CODEITEM_INSNS_OFFSET(%rax), rPC leaq (rPC, %rcx, 2), rPC movq rPC, OFF_FP_DEX_PC_PTR(rFP) + /* Do we need to switch interpreters? */ + call SYMBOL(MterpShouldSwitchInterpreters) + testb %al, %al + jnz MterpFallback /* resume execution at catch block */ + REFRESH_IBASE FETCH_INST GOTO_NEXT /* NOTE: no fallthrough */ @@ -11917,6 +11905,19 @@ MterpCheckSuspendAndContinue: GOTO_NEXT /* + * On-stack replacement has happened, and now we've returned from the compiled method. + */ +MterpOnStackReplacement: +#if MTERP_LOGGING + movq rSELF, OUT_ARG0 + leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1 + movl rINST, OUT_32_ARG2 + call SYMBOL(MterpLogOSR) +#endif + movl $1, %eax + jmp MterpDone + +/* * Bail out to reference interpreter. */ MterpFallback: diff --git a/runtime/interpreter/mterp/rebuild.sh b/runtime/interpreter/mterp/rebuild.sh index e3f043749f..ca3dcd9a13 100755 --- a/runtime/interpreter/mterp/rebuild.sh +++ b/runtime/interpreter/mterp/rebuild.sh @@ -20,5 +20,4 @@ # set -e -# for arch in arm x86 mips arm64 x86_64 mips64; do TARGET_ARCH_EXT=$arch make -f Makefile_mterp; done -for arch in arm x86 mips arm64 x86_64 ; do TARGET_ARCH_EXT=$arch make -f Makefile_mterp; done +for arch in arm x86 mips arm64 x86_64 mips64; do TARGET_ARCH_EXT=$arch make -f Makefile_mterp; done diff --git a/runtime/interpreter/mterp/x86_64/bincmp.S b/runtime/interpreter/mterp/x86_64/bincmp.S index 5e4225fd86..a16050b371 100644 --- a/runtime/interpreter/mterp/x86_64/bincmp.S +++ b/runtime/interpreter/mterp/x86_64/bincmp.S @@ -11,18 +11,13 @@ andb $$0xf, %cl # rcx <- A GET_VREG %eax, %rcx # eax <- vA cmpl VREG_ADDRESS(rINSTq), %eax # compare (vA, vB) - movl $$2, %eax # assume not taken + movl $$2, rINST # assume not taken j${revcmp} 1f - movswq 2(rPC),%rax # Get signed branch offset + movswq 2(rPC), rINSTq # Get signed branch offset 1: - addq %rax, %rax # rax <- AA * 2 - leaq (rPC, %rax), rPC + MTERP_PROFILE_BRANCH + addq rINSTq, rINSTq # rax <- AA * 2 + leaq (rPC, rINSTq), rPC FETCH_INST - jg 2f # AA * 2 > 0 => no suspend check -#if MTERP_SUSPEND - REFRESH_IBASE -#else - jmp MterpCheckSuspendAndContinue -#endif -2: + jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check GOTO_NEXT diff --git a/runtime/interpreter/mterp/x86_64/footer.S b/runtime/interpreter/mterp/x86_64/footer.S index cb60c0155e..573256b781 100644 --- a/runtime/interpreter/mterp/x86_64/footer.S +++ b/runtime/interpreter/mterp/x86_64/footer.S @@ -12,7 +12,6 @@ * has not yet been thrown. Just bail out to the reference interpreter to deal with it. * TUNING: for consistency, we may want to just go ahead and handle these here. */ -#define MTERP_LOGGING 0 common_errDivideByZero: EXPORT_PC #if MTERP_LOGGING @@ -98,13 +97,17 @@ MterpException: call SYMBOL(MterpHandleException) testb %al, %al jz MterpExceptionReturn - REFRESH_IBASE movq OFF_FP_CODE_ITEM(rFP), %rax mov OFF_FP_DEX_PC(rFP), %ecx leaq CODEITEM_INSNS_OFFSET(%rax), rPC leaq (rPC, %rcx, 2), rPC movq rPC, OFF_FP_DEX_PC_PTR(rFP) + /* Do we need to switch interpreters? */ + call SYMBOL(MterpShouldSwitchInterpreters) + testb %al, %al + jnz MterpFallback /* resume execution at catch block */ + REFRESH_IBASE FETCH_INST GOTO_NEXT /* NOTE: no fallthrough */ @@ -124,6 +127,19 @@ MterpCheckSuspendAndContinue: GOTO_NEXT /* + * On-stack replacement has happened, and now we've returned from the compiled method. + */ +MterpOnStackReplacement: +#if MTERP_LOGGING + movq rSELF, OUT_ARG0 + leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1 + movl rINST, OUT_32_ARG2 + call SYMBOL(MterpLogOSR) +#endif + movl $$1, %eax + jmp MterpDone + +/* * Bail out to reference interpreter. */ MterpFallback: diff --git a/runtime/interpreter/mterp/x86_64/header.S b/runtime/interpreter/mterp/x86_64/header.S index dfc7b53de8..eb84ea1eb5 100644 --- a/runtime/interpreter/mterp/x86_64/header.S +++ b/runtime/interpreter/mterp/x86_64/header.S @@ -162,13 +162,23 @@ unspecified registers or condition codes. #define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET) #define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET) +#define MTERP_PROFILE_BRANCHES 1 +#define MTERP_LOGGING 0 + /* - * - * The reference interpreter performs explicit suspect checks, which is somewhat wasteful. - * Dalvik's interpreter folded suspend checks into the jump table mechanism, and eventually - * mterp should do so as well. + * Profile branch. rINST should contain the offset. %eax is scratch. */ -#define MTERP_SUSPEND 0 +.macro MTERP_PROFILE_BRANCH +#ifdef MTERP_PROFILE_BRANCHES + EXPORT_PC + movq rSELF, OUT_ARG0 + leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1 + movl rINST, OUT_32_ARG2 + call SYMBOL(MterpProfileBranch) + testb %al, %al + jnz MterpOnStackReplacement +#endif +.endm /* * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must diff --git a/runtime/interpreter/mterp/x86_64/invoke.S b/runtime/interpreter/mterp/x86_64/invoke.S index 86eccdbf91..f7e6155c16 100644 --- a/runtime/interpreter/mterp/x86_64/invoke.S +++ b/runtime/interpreter/mterp/x86_64/invoke.S @@ -14,4 +14,9 @@ call SYMBOL($helper) testb %al, %al jz MterpException - ADVANCE_PC_FETCH_AND_GOTO_NEXT 3 + ADVANCE_PC 3 + call SYMBOL(MterpShouldSwitchInterpreters) + testb %al, %al + jnz MterpFallback + FETCH_INST + GOTO_NEXT diff --git a/runtime/interpreter/mterp/x86_64/op_goto.S b/runtime/interpreter/mterp/x86_64/op_goto.S index 05a2dda1c0..c4fc97644f 100644 --- a/runtime/interpreter/mterp/x86_64/op_goto.S +++ b/runtime/interpreter/mterp/x86_64/op_goto.S @@ -5,15 +5,10 @@ * double to get a byte offset. */ /* goto +AA */ - movsbq rINSTbl, %rax # rax <- ssssssAA - addq %rax, %rax # rax <- AA * 2 - leaq (rPC, %rax), rPC + movsbq rINSTbl, rINSTq # rINSTq <- ssssssAA + MTERP_PROFILE_BRANCH + addq rINSTq, rINSTq # rINSTq <- AA * 2 + leaq (rPC, rINSTq), rPC FETCH_INST - jg 1f # AA * 2 > 0 => no suspend check -#if MTERP_SUSPEND - REFRESH_IBASE -#else - jmp MterpCheckSuspendAndContinue -#endif -1: + jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check GOTO_NEXT diff --git a/runtime/interpreter/mterp/x86_64/op_goto_16.S b/runtime/interpreter/mterp/x86_64/op_goto_16.S index 029749c50a..8cb9a5c50f 100644 --- a/runtime/interpreter/mterp/x86_64/op_goto_16.S +++ b/runtime/interpreter/mterp/x86_64/op_goto_16.S @@ -5,15 +5,10 @@ * double to get a byte offset. */ /* goto/16 +AAAA */ - movswq 2(rPC), %rax # rax <- ssssAAAA - addq %rax, %rax # rax <- AA * 2 - leaq (rPC, %rax), rPC + movswq 2(rPC), rINSTq # rINSTq <- ssssAAAA + MTERP_PROFILE_BRANCH + addq rINSTq, rINSTq # rINSTq <- AA * 2 + leaq (rPC, rINSTq), rPC FETCH_INST - jg 1f # AA * 2 > 0 => no suspend check -#if MTERP_SUSPEND - REFRESH_IBASE -#else - jmp MterpCheckSuspendAndContinue -#endif -1: + jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check GOTO_NEXT diff --git a/runtime/interpreter/mterp/x86_64/op_goto_32.S b/runtime/interpreter/mterp/x86_64/op_goto_32.S index 28233108e5..4ecdacd3e6 100644 --- a/runtime/interpreter/mterp/x86_64/op_goto_32.S +++ b/runtime/interpreter/mterp/x86_64/op_goto_32.S @@ -8,15 +8,10 @@ * to convert from Dalvik offset to byte offset. */ /* goto/32 +AAAAAAAA */ - movslq 2(rPC), %rax # rax <- AAAAAAAA - addq %rax, %rax # rax <- AA * 2 - leaq (rPC, %rax), rPC + movslq 2(rPC), rINSTq # rINSTq <- AAAAAAAA + MTERP_PROFILE_BRANCH + addq rINSTq, rINSTq # rINSTq <- AA * 2 + leaq (rPC, rINSTq), rPC FETCH_INST - jg 1f # AA * 2 > 0 => no suspend check -#if MTERP_SUSPEND - REFRESH_IBASE -#else - jmp MterpCheckSuspendAndContinue -#endif -1: + jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check GOTO_NEXT diff --git a/runtime/interpreter/mterp/x86_64/op_packed_switch.S b/runtime/interpreter/mterp/x86_64/op_packed_switch.S index 0400ca45cf..cb0acb7a72 100644 --- a/runtime/interpreter/mterp/x86_64/op_packed_switch.S +++ b/runtime/interpreter/mterp/x86_64/op_packed_switch.S @@ -13,15 +13,10 @@ leaq (rPC,OUT_ARG0,2), OUT_ARG0 # rcx <- PC + BBBBbbbb*2 GET_VREG OUT_32_ARG1, rINSTq # eax <- vAA call SYMBOL($func) - addl %eax, %eax - movslq %eax, %rax - leaq (rPC, %rax), rPC + movslq %eax, rINSTq + MTERP_PROFILE_BRANCH + addq rINSTq, rINSTq + leaq (rPC, rINSTq), rPC FETCH_INST - jg 1f -#if MTERP_SUSPEND - REFRESH_IBASE -#else - jmp MterpCheckSuspendAndContinue -#endif -1: + jle MterpCheckSuspendAndContinue GOTO_NEXT diff --git a/runtime/interpreter/mterp/x86_64/zcmp.S b/runtime/interpreter/mterp/x86_64/zcmp.S index e503ec1f38..0051407cad 100644 --- a/runtime/interpreter/mterp/x86_64/zcmp.S +++ b/runtime/interpreter/mterp/x86_64/zcmp.S @@ -7,18 +7,13 @@ */ /* if-cmp vAA, +BBBB */ cmpl $$0, VREG_ADDRESS(rINSTq) # compare (vA, 0) - movl $$2, %eax # assume branch not taken + movl $$2, rINST # assume branch not taken j${revcmp} 1f - movswq 2(rPC),%rax # fetch signed displacement + movswq 2(rPC), rINSTq # fetch signed displacement 1: - addq %rax, %rax # eax <- AA * 2 - leaq (rPC, %rax), rPC + MTERP_PROFILE_BRANCH + addq rINSTq, rINSTq # rINSTq <- AA * 2 + leaq (rPC, rINSTq), rPC FETCH_INST - jg 2f # AA * 2 > 0 => no suspend check -#if MTERP_SUSPEND - REFRESH_IBASE -#else - jmp MterpCheckSuspendAndContinue -#endif -2: + jle MterpCheckSuspendAndContinue # AA * 2 <= 0 => suspend check GOTO_NEXT diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc index 5c4419333b..a41fd45041 100644 --- a/runtime/java_vm_ext.cc +++ b/runtime/java_vm_ext.cc @@ -16,15 +16,13 @@ #include "jni_internal.h" -#define ATRACE_TAG ATRACE_TAG_DALVIK - -#include <cutils/trace.h> #include <dlfcn.h> #include "art_method.h" #include "base/dumpable.h" #include "base/mutex.h" #include "base/stl_util.h" +#include "base/systrace.h" #include "check_jni.h" #include "dex_file-inl.h" #include "fault_handler.h" @@ -716,9 +714,11 @@ void JavaVMExt::UnloadNativeLibraries() { libraries_.get()->UnloadNativeLibraries(); } -bool JavaVMExt::LoadNativeLibrary(JNIEnv* env, const std::string& path, jobject class_loader, - bool is_shared_namespace, jstring library_path, - jstring permitted_path, std::string* error_msg) { +bool JavaVMExt::LoadNativeLibrary(JNIEnv* env, + const std::string& path, + jobject class_loader, + jstring library_path, + std::string* error_msg) { error_msg->clear(); // See if we've already loaded this library. If we have, and the class loader @@ -777,9 +777,12 @@ bool JavaVMExt::LoadNativeLibrary(JNIEnv* env, const std::string& path, jobject Locks::mutator_lock_->AssertNotHeld(self); const char* path_str = path.empty() ? nullptr : path.c_str(); - void* handle = android::OpenNativeLibrary(env, runtime_->GetTargetSdkVersion(), path_str, - class_loader, is_shared_namespace, library_path, - permitted_path); + void* handle = android::OpenNativeLibrary(env, + runtime_->GetTargetSdkVersion(), + path_str, + class_loader, + library_path); + bool needs_native_bridge = false; if (handle == nullptr) { if (android::NativeBridgeIsSupported(path_str)) { @@ -924,11 +927,10 @@ void JavaVMExt::VisitRoots(RootVisitor* visitor) { // JNI Invocation interface. extern "C" jint JNI_CreateJavaVM(JavaVM** p_vm, JNIEnv** p_env, void* vm_args) { - ATRACE_BEGIN(__FUNCTION__); + ScopedTrace trace(__FUNCTION__); const JavaVMInitArgs* args = static_cast<JavaVMInitArgs*>(vm_args); if (IsBadJniVersion(args->version)) { LOG(ERROR) << "Bad JNI version passed to CreateJavaVM: " << args->version; - ATRACE_END(); return JNI_EVERSION; } RuntimeOptions options; @@ -938,7 +940,6 @@ extern "C" jint JNI_CreateJavaVM(JavaVM** p_vm, JNIEnv** p_env, void* vm_args) { } bool ignore_unrecognized = args->ignoreUnrecognized; if (!Runtime::Create(options, ignore_unrecognized)) { - ATRACE_END(); return JNI_ERR; } Runtime* runtime = Runtime::Current(); @@ -947,12 +948,10 @@ extern "C" jint JNI_CreateJavaVM(JavaVM** p_vm, JNIEnv** p_env, void* vm_args) { delete Thread::Current()->GetJniEnv(); delete runtime->GetJavaVM(); LOG(WARNING) << "CreateJavaVM failed"; - ATRACE_END(); return JNI_ERR; } *p_env = Thread::Current()->GetJniEnv(); *p_vm = runtime->GetJavaVM(); - ATRACE_END(); return JNI_OK; } diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h index 8cae1e52d2..3d055cd7ce 100644 --- a/runtime/java_vm_ext.h +++ b/runtime/java_vm_ext.h @@ -85,8 +85,10 @@ class JavaVMExt : public JavaVM { * Returns 'true' on success. On failure, sets 'error_msg' to a * human-readable description of the error. */ - bool LoadNativeLibrary(JNIEnv* env, const std::string& path, jobject class_loader, - bool is_shared_namespace, jstring library_path, jstring permitted_path, + bool LoadNativeLibrary(JNIEnv* env, + const std::string& path, + jobject class_loader, + jstring library_path, std::string* error_msg); // Unload native libraries with cleared class loaders. diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc index 4623a4a3a2..5bd9a6b76b 100644 --- a/runtime/jit/jit.cc +++ b/runtime/jit/jit.cc @@ -41,30 +41,47 @@ static constexpr bool kEnableOnStackReplacement = true; JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& options) { auto* jit_options = new JitOptions; jit_options->use_jit_ = options.GetOrDefault(RuntimeArgumentMap::UseJIT); + jit_options->code_cache_initial_capacity_ = options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheInitialCapacity); jit_options->code_cache_max_capacity_ = options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheMaxCapacity); - jit_options->compile_threshold_ = - options.GetOrDefault(RuntimeArgumentMap::JITCompileThreshold); - // TODO(ngeoffray): Make this a proper option. - jit_options->osr_threshold_ = jit_options->compile_threshold_ * 2; - jit_options->warmup_threshold_ = - options.GetOrDefault(RuntimeArgumentMap::JITWarmupThreshold); jit_options->dump_info_on_shutdown_ = options.Exists(RuntimeArgumentMap::DumpJITInfoOnShutdown); jit_options->save_profiling_info_ = - options.GetOrDefault(RuntimeArgumentMap::JITSaveProfilingInfo);; + options.GetOrDefault(RuntimeArgumentMap::JITSaveProfilingInfo); + + jit_options->compile_threshold_ = options.GetOrDefault(RuntimeArgumentMap::JITCompileThreshold); + if (jit_options->compile_threshold_ > std::numeric_limits<uint16_t>::max()) { + LOG(FATAL) << "Method compilation threshold is above its internal limit."; + } + + if (options.Exists(RuntimeArgumentMap::JITWarmupThreshold)) { + jit_options->warmup_threshold_ = *options.Get(RuntimeArgumentMap::JITWarmupThreshold); + if (jit_options->warmup_threshold_ > std::numeric_limits<uint16_t>::max()) { + LOG(FATAL) << "Method warmup threshold is above its internal limit."; + } + } else { + jit_options->warmup_threshold_ = jit_options->compile_threshold_ / 2; + } + + if (options.Exists(RuntimeArgumentMap::JITOsrThreshold)) { + jit_options->osr_threshold_ = *options.Get(RuntimeArgumentMap::JITOsrThreshold); + if (jit_options->osr_threshold_ > std::numeric_limits<uint16_t>::max()) { + LOG(FATAL) << "Method on stack replacement threshold is above its internal limit."; + } + } else { + jit_options->osr_threshold_ = jit_options->compile_threshold_ * 2; + if (jit_options->osr_threshold_ > std::numeric_limits<uint16_t>::max()) { + jit_options->osr_threshold_ = std::numeric_limits<uint16_t>::max(); + } + } + return jit_options; } void Jit::DumpInfo(std::ostream& os) { - os << "JIT code cache size=" << PrettySize(code_cache_->CodeCacheSize()) << "\n" - << "JIT data cache size=" << PrettySize(code_cache_->DataCacheSize()) << "\n" - << "JIT current capacity=" << PrettySize(code_cache_->GetCurrentCapacity()) << "\n" - << "JIT number of compiled code=" << code_cache_->NumberOfCompiledCode() << "\n" - << "JIT total number of compilations=" << code_cache_->NumberOfCompilations() << "\n" - << "JIT total number of osr compilations=" << code_cache_->NumberOfOsrCompilations() << "\n"; + code_cache_->Dump(os); cumulative_timings_.Dump(os); } @@ -97,7 +114,7 @@ Jit* Jit::Create(JitOptions* options, std::string* error_msg) { return nullptr; } jit->save_profiling_info_ = options->GetSaveProfilingInfo(); - LOG(INFO) << "JIT created with initial_capacity=" + VLOG(jit) << "JIT created with initial_capacity=" << PrettySize(options->GetCodeCacheInitialCapacity()) << ", max_capacity=" << PrettySize(options->GetCodeCacheMaxCapacity()) << ", compile_threshold=" << options->GetCompileThreshold() @@ -174,7 +191,6 @@ bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool osr) { // of that proxy method, as the compiler does not expect a proxy method. ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(sizeof(void*)); if (!code_cache_->NotifyCompilationOf(method_to_compile, self, osr)) { - VLOG(jit) << "JIT not compiling " << PrettyMethod(method) << " due to code cache"; return false; } bool success = jit_compile_method_(jit_compiler_handle_, method_to_compile, self, osr); @@ -215,6 +231,10 @@ bool Jit::JitAtFirstUse() { return false; } +bool Jit::CanInvokeCompiledCode(ArtMethod* method) { + return code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode()); +} + Jit::~Jit() { DCHECK(!save_profiling_info_ || !ProfileSaver::IsStarted()); if (dump_info_on_shutdown_) { diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h index 570f683598..d5c213416a 100644 --- a/runtime/jit/jit.h +++ b/runtime/jit/jit.h @@ -43,8 +43,7 @@ class JitOptions; class Jit { public: static constexpr bool kStressMode = kIsDebugBuild; - static constexpr size_t kDefaultCompileThreshold = kStressMode ? 2 : 500; - static constexpr size_t kDefaultWarmupThreshold = kDefaultCompileThreshold / 2; + static constexpr size_t kDefaultCompileThreshold = kStressMode ? 2 : 10000; virtual ~Jit(); static Jit* Create(JitOptions* options, std::string* error_msg); @@ -94,8 +93,12 @@ class Jit { // into the specified class linker to the jit debug interface, void DumpTypeInfoForLoadedTypes(ClassLinker* linker); + // Return whether we should try to JIT compiled code as soon as an ArtMethod is invoked. bool JitAtFirstUse(); + // Return whether we can invoke JIT code for `method`. + bool CanInvokeCompiledCode(ArtMethod* method); + // If an OSR compiled version is available for `method`, // and `dex_pc + dex_pc_offset` is an entry point of that compiled // version, this method will jump to the compiled code, let it run, diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index 8858b486f9..af47da63c4 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -20,10 +20,12 @@ #include "art_method-inl.h" #include "base/stl_util.h" +#include "base/systrace.h" #include "base/time_utils.h" #include "debugger_interface.h" #include "entrypoints/runtime_asm_entrypoints.h" #include "gc/accounting/bitmap-inl.h" +#include "jit/jit.h" #include "jit/profiling_info.h" #include "linear_alloc.h" #include "mem_map.h" @@ -51,6 +53,7 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity, size_t max_capacity, bool generate_debug_info, std::string* error_msg) { + ScopedTrace trace(__PRETTY_FUNCTION__); CHECK_GE(max_capacity, initial_capacity); // Generating debug information is mostly for using the 'perf' tool, which does @@ -123,13 +126,15 @@ JitCodeCache::JitCodeCache(MemMap* code_map, current_capacity_(initial_code_capacity + initial_data_capacity), code_end_(initial_code_capacity), data_end_(initial_data_capacity), - has_done_full_collection_(false), + last_collection_increased_code_cache_(false), last_update_time_ns_(0), garbage_collect_code_(garbage_collect_code), used_memory_for_data_(0), used_memory_for_code_(0), number_of_compilations_(0), - number_of_osr_compilations_(0) { + number_of_osr_compilations_(0), + number_of_deoptimizations_(0), + number_of_collections_(0) { DCHECK_GE(max_capacity, initial_code_capacity + initial_data_capacity); code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_end_, false /*locked*/); @@ -164,12 +169,16 @@ bool JitCodeCache::ContainsMethod(ArtMethod* method) { return false; } -class ScopedCodeCacheWrite { +class ScopedCodeCacheWrite : ScopedTrace { public: - explicit ScopedCodeCacheWrite(MemMap* code_map) : code_map_(code_map) { + explicit ScopedCodeCacheWrite(MemMap* code_map) + : ScopedTrace("ScopedCodeCacheWrite"), + code_map_(code_map) { + ScopedTrace trace("mprotect all"); CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtAll); } ~ScopedCodeCacheWrite() { + ScopedTrace trace("mprotect code"); CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode); } private: @@ -252,6 +261,7 @@ void JitCodeCache::FreeCode(const void* code_ptr, ArtMethod* method ATTRIBUTE_UN } void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) { + ScopedTrace trace(__PRETTY_FUNCTION__); MutexLock mu(self, lock_); // We do not check if a code cache GC is in progress, as this method comes // with the classlinker_classes_lock_ held, and suspending ourselves could @@ -287,6 +297,15 @@ void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) { } } +void JitCodeCache::ClearGcRootsInInlineCaches(Thread* self) { + MutexLock mu(self, lock_); + for (ProfilingInfo* info : profiling_infos_) { + if (!info->IsInUseByCompiler()) { + info->ClearGcRootsInInlineCaches(); + } + } +} + uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, ArtMethod* method, const uint8_t* mapping_table, @@ -363,16 +382,6 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, return reinterpret_cast<uint8_t*>(method_header); } -size_t JitCodeCache::NumberOfCompilations() { - MutexLock mu(Thread::Current(), lock_); - return number_of_compilations_; -} - -size_t JitCodeCache::NumberOfOsrCompilations() { - MutexLock mu(Thread::Current(), lock_); - return number_of_osr_compilations_; -} - size_t JitCodeCache::CodeCacheSize() { MutexLock mu(Thread::Current(), lock_); return CodeCacheSizeLocked(); @@ -391,11 +400,6 @@ size_t JitCodeCache::DataCacheSizeLocked() { return used_memory_for_data_; } -size_t JitCodeCache::NumberOfCompiledCode() { - MutexLock mu(Thread::Current(), lock_); - return method_code_map_.size(); -} - void JitCodeCache::ClearData(Thread* self, void* data) { MutexLock mu(self, lock_); FreeData(reinterpret_cast<uint8_t*>(data)); @@ -464,6 +468,7 @@ class MarkCodeClosure FINAL : public Closure { : code_cache_(code_cache), barrier_(barrier) {} void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + ScopedTrace trace(__PRETTY_FUNCTION__); DCHECK(thread == Thread::Current() || thread->IsSuspended()); MarkCodeVisitor visitor(thread, code_cache_); visitor.WalkStack(); @@ -546,38 +551,25 @@ void JitCodeCache::MarkCompiledCodeOnThreadStacks(Thread* self) { } } -void JitCodeCache::RemoveUnusedCode(Thread* self) { - // Clear the osr map, chances are most of the code in it is now dead. - { - MutexLock mu(self, lock_); - osr_code_map_.clear(); - } - - // Run a checkpoint on all threads to mark the JIT compiled code they are running. - MarkCompiledCodeOnThreadStacks(self); - - // Iterate over all compiled code and remove entries that are not marked and not - // the entrypoint of their corresponding ArtMethod. - { - MutexLock mu(self, lock_); - ScopedCodeCacheWrite scc(code_map_.get()); - for (auto it = method_code_map_.begin(); it != method_code_map_.end();) { - const void* code_ptr = it->first; - ArtMethod* method = it->second; - uintptr_t allocation = FromCodeToAllocation(code_ptr); - const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); - if ((method->GetEntryPointFromQuickCompiledCode() != method_header->GetEntryPoint()) && - !GetLiveBitmap()->Test(allocation)) { - FreeCode(code_ptr, method); - it = method_code_map_.erase(it); - } else { - ++it; - } - } +bool JitCodeCache::ShouldDoFullCollection() { + if (current_capacity_ == max_capacity_) { + // Always do a full collection when the code cache is full. + return true; + } else if (current_capacity_ < kReservedCapacity) { + // Always do partial collection when the code cache size is below the reserved + // capacity. + return false; + } else if (last_collection_increased_code_cache_) { + // This time do a full collection. + return true; + } else { + // This time do a partial collection. + return false; } } void JitCodeCache::GarbageCollectCache(Thread* self) { + ScopedTrace trace(__FUNCTION__); if (!garbage_collect_code_) { MutexLock mu(self, lock_); IncreaseCodeCacheCapacity(); @@ -591,6 +583,7 @@ void JitCodeCache::GarbageCollectCache(Thread* self) { if (WaitForPotentialCollectionToComplete(self)) { return; } else { + number_of_collections_++; live_bitmap_.reset(CodeCacheBitmap::Create( "code-cache-bitmap", reinterpret_cast<uintptr_t>(code_map_->Begin()), @@ -599,70 +592,124 @@ void JitCodeCache::GarbageCollectCache(Thread* self) { } } - // Check if we want to do a full collection. - bool do_full_collection = true; + TimingLogger logger("JIT code cache timing logger", true, VLOG_IS_ON(jit)); { - MutexLock mu(self, lock_); - if (current_capacity_ == max_capacity_) { - // Always do a full collection when the code cache is full. - do_full_collection = true; - } else if (current_capacity_ < kReservedCapacity) { - // Do a partial collection until we hit the reserved capacity limit. - do_full_collection = false; - } else if (has_done_full_collection_) { - // Do a partial collection if we have done a full collection in the last - // collection round. - do_full_collection = false; + TimingLogger::ScopedTiming st("Code cache collection", &logger); + + bool do_full_collection = false; + { + MutexLock mu(self, lock_); + do_full_collection = ShouldDoFullCollection(); } - } - if (!kIsDebugBuild || VLOG_IS_ON(jit)) { - LOG(INFO) << "Do " - << (do_full_collection ? "full" : "partial") - << " code cache collection, code=" - << PrettySize(CodeCacheSize()) - << ", data=" << PrettySize(DataCacheSize()); - } + if (!kIsDebugBuild || VLOG_IS_ON(jit)) { + LOG(INFO) << "Do " + << (do_full_collection ? "full" : "partial") + << " code cache collection, code=" + << PrettySize(CodeCacheSize()) + << ", data=" << PrettySize(DataCacheSize()); + } - if (do_full_collection) { - DoFullCollection(self); - } else { - RemoveUnusedCode(self); - } + DoCollection(self, /* collect_profiling_info */ do_full_collection); - { - MutexLock mu(self, lock_); - if (!do_full_collection) { - has_done_full_collection_ = false; - IncreaseCodeCacheCapacity(); - } else { - has_done_full_collection_ = true; + if (!kIsDebugBuild || VLOG_IS_ON(jit)) { + LOG(INFO) << "After code cache collection, code=" + << PrettySize(CodeCacheSize()) + << ", data=" << PrettySize(DataCacheSize()); + } + + { + MutexLock mu(self, lock_); + + // Increase the code cache only when we do partial collections. + // TODO: base this strategy on how full the code cache is? + if (do_full_collection) { + last_collection_increased_code_cache_ = false; + } else { + last_collection_increased_code_cache_ = true; + IncreaseCodeCacheCapacity(); + } + + bool next_collection_will_be_full = ShouldDoFullCollection(); + + // Start polling the liveness of compiled code to prepare for the next full collection. + // We avoid doing this if exit stubs are installed to not mess with the instrumentation. + // TODO(ngeoffray): Clean up instrumentation and code cache interactions. + if (!Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled() && + next_collection_will_be_full) { + // Save the entry point of methods we have compiled, and update the entry + // point of those methods to the interpreter. If the method is invoked, the + // interpreter will update its entry point to the compiled code and call it. + for (ProfilingInfo* info : profiling_infos_) { + const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode(); + if (ContainsPc(entry_point)) { + info->SetSavedEntryPoint(entry_point); + info->GetMethod()->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge()); + } + } + + DCHECK(CheckLiveCompiledCodeHasProfilingInfo()); + } + live_bitmap_.reset(nullptr); + NotifyCollectionDone(self); } - live_bitmap_.reset(nullptr); - NotifyCollectionDone(self); } + Runtime::Current()->GetJit()->AddTimingLogger(logger); +} - if (!kIsDebugBuild || VLOG_IS_ON(jit)) { - LOG(INFO) << "After code cache collection, code=" - << PrettySize(CodeCacheSize()) - << ", data=" << PrettySize(DataCacheSize()); +void JitCodeCache::RemoveUnmarkedCode(Thread* self) { + ScopedTrace trace(__FUNCTION__); + MutexLock mu(self, lock_); + ScopedCodeCacheWrite scc(code_map_.get()); + // Iterate over all compiled code and remove entries that are not marked. + for (auto it = method_code_map_.begin(); it != method_code_map_.end();) { + const void* code_ptr = it->first; + ArtMethod* method = it->second; + uintptr_t allocation = FromCodeToAllocation(code_ptr); + if (GetLiveBitmap()->Test(allocation)) { + ++it; + } else { + const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); + if (method_header->GetEntryPoint() == GetQuickToInterpreterBridge()) { + method->ClearCounter(); + } + FreeCode(code_ptr, method); + it = method_code_map_.erase(it); + } } } -void JitCodeCache::DoFullCollection(Thread* self) { - instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); +void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) { + ScopedTrace trace(__FUNCTION__); { MutexLock mu(self, lock_); - // Walk over all compiled methods and set the entry points of these - // methods to interpreter. - for (auto& it : method_code_map_) { - instrumentation->UpdateMethodsCode(it.second, GetQuickToInterpreterBridge()); + if (collect_profiling_info) { + // Clear the profiling info of methods that do not have compiled code as entrypoint. + // Also remove the saved entry point from the ProfilingInfo objects. + for (ProfilingInfo* info : profiling_infos_) { + const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode(); + if (!ContainsPc(ptr) && !info->IsInUseByCompiler()) { + info->GetMethod()->SetProfilingInfo(nullptr); + } + info->SetSavedEntryPoint(nullptr); + } + } else if (kIsDebugBuild) { + // Sanity check that the profiling infos do not have a dangling entry point. + for (ProfilingInfo* info : profiling_infos_) { + DCHECK(info->GetSavedEntryPoint() == nullptr); + } } - // Clear the profiling info of methods that are not being compiled. - for (ProfilingInfo* info : profiling_infos_) { - if (!info->IsMethodBeingCompiled()) { - info->GetMethod()->SetProfilingInfo(nullptr); + // Mark compiled code that are entrypoints of ArtMethods. Compiled code that is not + // an entry point is either: + // - an osr compiled code, that will be removed if not in a thread call stack. + // - discarded compiled code, that will be removed if not in a thread call stack. + for (const auto& it : method_code_map_) { + ArtMethod* method = it.second; + const void* code_ptr = it.first; + const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); + if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) { + GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr)); } } @@ -674,41 +721,60 @@ void JitCodeCache::DoFullCollection(Thread* self) { // Run a checkpoint on all threads to mark the JIT compiled code they are running. MarkCompiledCodeOnThreadStacks(self); - { - MutexLock mu(self, lock_); - // Free unused compiled code, and restore the entry point of used compiled code. - { - ScopedCodeCacheWrite scc(code_map_.get()); - for (auto it = method_code_map_.begin(); it != method_code_map_.end();) { - const void* code_ptr = it->first; - ArtMethod* method = it->second; - uintptr_t allocation = FromCodeToAllocation(code_ptr); - const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); - if (GetLiveBitmap()->Test(allocation)) { - instrumentation->UpdateMethodsCode(method, method_header->GetEntryPoint()); - ++it; - } else { - method->ClearCounter(); - DCHECK_NE(method->GetEntryPointFromQuickCompiledCode(), method_header->GetEntryPoint()); - FreeCode(code_ptr, method); - it = method_code_map_.erase(it); - } - } - } + // At this point, mutator threads are still running, and entrypoints of methods can + // change. We do know they cannot change to a code cache entry that is not marked, + // therefore we can safely remove those entries. + RemoveUnmarkedCode(self); - // Free all profiling infos of methods that were not being compiled. + if (collect_profiling_info) { + MutexLock mu(self, lock_); + // Free all profiling infos of methods not compiled nor being compiled. auto profiling_kept_end = std::remove_if(profiling_infos_.begin(), profiling_infos_.end(), [this] (ProfilingInfo* info) NO_THREAD_SAFETY_ANALYSIS { - if (info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr) { + const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode(); + // We have previously cleared the ProfilingInfo pointer in the ArtMethod in the hope + // that the compiled code would not get revived. As mutator threads run concurrently, + // they may have revived the compiled code, and now we are in the situation where + // a method has compiled code but no ProfilingInfo. + // We make sure compiled methods have a ProfilingInfo object. It is needed for + // code cache collection. + if (ContainsPc(ptr) && info->GetMethod()->GetProfilingInfo(sizeof(void*)) == nullptr) { + // We clear the inline caches as classes in it might be stalled. + info->ClearGcRootsInInlineCaches(); + // Do a fence to make sure the clearing is seen before attaching to the method. + QuasiAtomic::ThreadFenceRelease(); + info->GetMethod()->SetProfilingInfo(info); + } else if (info->GetMethod()->GetProfilingInfo(sizeof(void*)) != info) { + // No need for this ProfilingInfo object anymore. FreeData(reinterpret_cast<uint8_t*>(info)); return true; } return false; }); profiling_infos_.erase(profiling_kept_end, profiling_infos_.end()); + DCHECK(CheckLiveCompiledCodeHasProfilingInfo()); } } +bool JitCodeCache::CheckLiveCompiledCodeHasProfilingInfo() { + ScopedTrace trace(__FUNCTION__); + // Check that methods we have compiled do have a ProfilingInfo object. We would + // have memory leaks of compiled code otherwise. + for (const auto& it : method_code_map_) { + ArtMethod* method = it.second; + if (method->GetProfilingInfo(sizeof(void*)) == nullptr) { + const void* code_ptr = it.first; + const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); + if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) { + // If the code is not dead, then we have a problem. Note that this can even + // happen just after a collection, as mutator threads are running in parallel + // and could deoptimize an existing compiled code. + return false; + } + } + } + return true; +} OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) { static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA"); @@ -751,23 +817,38 @@ OatQuickMethodHeader* JitCodeCache::LookupOsrMethodHeader(ArtMethod* method) { ProfilingInfo* JitCodeCache::AddProfilingInfo(Thread* self, ArtMethod* method, const std::vector<uint32_t>& entries, - bool retry_allocation) { - ProfilingInfo* info = AddProfilingInfoInternal(self, method, entries); + bool retry_allocation) + // No thread safety analysis as we are using TryLock/Unlock explicitly. + NO_THREAD_SAFETY_ANALYSIS { + ProfilingInfo* info = nullptr; + if (!retry_allocation) { + // If we are allocating for the interpreter, just try to lock, to avoid + // lock contention with the JIT. + if (lock_.ExclusiveTryLock(self)) { + info = AddProfilingInfoInternal(self, method, entries); + lock_.ExclusiveUnlock(self); + } + } else { + { + MutexLock mu(self, lock_); + info = AddProfilingInfoInternal(self, method, entries); + } - if (info == nullptr && retry_allocation) { - GarbageCollectCache(self); - info = AddProfilingInfoInternal(self, method, entries); + if (info == nullptr) { + GarbageCollectCache(self); + MutexLock mu(self, lock_); + info = AddProfilingInfoInternal(self, method, entries); + } } return info; } -ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self, +ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self ATTRIBUTE_UNUSED, ArtMethod* method, const std::vector<uint32_t>& entries) { size_t profile_info_size = RoundUp( sizeof(ProfilingInfo) + sizeof(InlineCache) * entries.size(), sizeof(void*)); - MutexLock mu(self, lock_); // Check whether some other thread has concurrently created it. ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*)); @@ -807,6 +888,7 @@ void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) NO_THREAD_S void JitCodeCache::GetCompiledArtMethods(const std::set<std::string>& dex_base_locations, std::vector<ArtMethod*>& methods) { + ScopedTrace trace(__FUNCTION__); MutexLock mu(Thread::Current(), lock_); for (auto it : method_code_map_) { if (ContainsElement(dex_base_locations, it.second->GetDexFile()->GetBaseLocation())) { @@ -821,21 +903,47 @@ uint64_t JitCodeCache::GetLastUpdateTimeNs() const { bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr) { if (!osr && ContainsPc(method->GetEntryPointFromQuickCompiledCode())) { + VLOG(jit) << PrettyMethod(method) << " is already compiled"; return false; } MutexLock mu(self, lock_); if (osr && (osr_code_map_.find(method) != osr_code_map_.end())) { + VLOG(jit) << PrettyMethod(method) << " is already osr compiled"; return false; } + ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*)); - if (info == nullptr || info->IsMethodBeingCompiled()) { + if (info == nullptr) { + VLOG(jit) << PrettyMethod(method) << " needs a ProfilingInfo to be compiled"; return false; } + + if (info->IsMethodBeingCompiled()) { + VLOG(jit) << PrettyMethod(method) << " is already being compiled"; + return false; + } + info->SetIsMethodBeingCompiled(true); return true; } +ProfilingInfo* JitCodeCache::NotifyCompilerUse(ArtMethod* method, Thread* self) { + MutexLock mu(self, lock_); + ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*)); + if (info != nullptr) { + info->IncrementInlineUse(); + } + return info; +} + +void JitCodeCache::DoneCompilerUse(ArtMethod* method, Thread* self) { + MutexLock mu(self, lock_); + ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*)); + DCHECK(info != nullptr); + info->DecrementInlineUse(); +} + void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self ATTRIBUTE_UNUSED) { ProfilingInfo* info = method->GetProfilingInfo(sizeof(void*)); DCHECK(info->IsMethodBeingCompiled()); @@ -849,6 +957,13 @@ size_t JitCodeCache::GetMemorySizeOfCodePointer(const void* ptr) { void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method, const OatQuickMethodHeader* header) { + ProfilingInfo* profiling_info = method->GetProfilingInfo(sizeof(void*)); + if ((profiling_info != nullptr) && + (profiling_info->GetSavedEntryPoint() == header->GetEntryPoint())) { + // Prevent future uses of the compiled code. + profiling_info->SetSavedEntryPoint(nullptr); + } + if (method->GetEntryPointFromQuickCompiledCode() == header->GetEntryPoint()) { // The entrypoint is the one to invalidate, so we just update // it to the interpreter entry point and clear the counter to get the method @@ -864,6 +979,8 @@ void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method, osr_code_map_.erase(it); } } + MutexLock mu(Thread::Current(), lock_); + number_of_deoptimizations_++; } uint8_t* JitCodeCache::AllocateCode(size_t code_size) { @@ -893,5 +1010,18 @@ void JitCodeCache::FreeData(uint8_t* data) { mspace_free(data_mspace_, data); } +void JitCodeCache::Dump(std::ostream& os) { + MutexLock mu(Thread::Current(), lock_); + os << "Current JIT code cache size: " << PrettySize(used_memory_for_code_) << "\n" + << "Current JIT data cache size: " << PrettySize(used_memory_for_data_) << "\n" + << "Current JIT capacity: " << PrettySize(current_capacity_) << "\n" + << "Current number of JIT code cache entries: " << method_code_map_.size() << "\n" + << "Total number of JIT compilations: " << number_of_compilations_ << "\n" + << "Total number of JIT compilations for on stack replacement: " + << number_of_osr_compilations_ << "\n" + << "Total number of deoptimizations: " << number_of_deoptimizations_ << "\n" + << "Total number of JIT code cache collections: " << number_of_collections_ << std::endl; +} + } // namespace jit } // namespace art diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h index 4574edfb46..98dd70dcf9 100644 --- a/runtime/jit/jit_code_cache.h +++ b/runtime/jit/jit_code_cache.h @@ -67,22 +67,26 @@ class JitCodeCache { // Number of bytes allocated in the data cache. size_t DataCacheSize() REQUIRES(!lock_); - // Number of compiled code in the code cache. Note that this is not the number - // of methods that got JIT compiled, as we might have collected some. - size_t NumberOfCompiledCode() REQUIRES(!lock_); - - // Number of compilations done throughout the lifetime of the JIT. - size_t NumberOfCompilations() REQUIRES(!lock_); - size_t NumberOfOsrCompilations() REQUIRES(!lock_); - bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); + // Notify to the code cache that the compiler wants to use the + // profiling info of `method` to drive optimizations, + // and therefore ensure the returned profiling info object is not + // collected. + ProfilingInfo* NotifyCompilerUse(ArtMethod* method, Thread* self) + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!lock_); + void DoneCompiling(ArtMethod* method, Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_); + void DoneCompilerUse(ArtMethod* method, Thread* self) + SHARED_REQUIRES(Locks::mutator_lock_) + REQUIRES(!lock_); + // Allocate and write code and its metadata to the code cache. uint8_t* CommitCode(Thread* self, ArtMethod* method, @@ -124,6 +128,11 @@ class JitCodeCache { return live_bitmap_.get(); } + // Return whether we should do a full collection given the current state of the cache. + bool ShouldDoFullCollection() + REQUIRES(lock_) + SHARED_REQUIRES(Locks::mutator_lock_); + // Perform a collection on the code cache. void GarbageCollectCache(Thread* self) REQUIRES(!lock_) @@ -146,6 +155,8 @@ class JitCodeCache { REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + void ClearGcRootsInInlineCaches(Thread* self) REQUIRES(!lock_); + // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true, // will collect and retry if the first allocation is unsuccessful. ProfilingInfo* AddProfilingInfo(Thread* self, @@ -180,6 +191,8 @@ class JitCodeCache { REQUIRES(!lock_) SHARED_REQUIRES(Locks::mutator_lock_); + void Dump(std::ostream& os) REQUIRES(!lock_); + private: // Take ownership of maps. JitCodeCache(MemMap* code_map, @@ -208,7 +221,7 @@ class JitCodeCache { ProfilingInfo* AddProfilingInfoInternal(Thread* self, ArtMethod* method, const std::vector<uint32_t>& entries) - REQUIRES(!lock_) + REQUIRES(lock_) SHARED_REQUIRES(Locks::mutator_lock_); // If a collection is in progress, wait for it to finish. Return @@ -235,11 +248,11 @@ class JitCodeCache { // Set the footprint limit of the code cache. void SetFootprintLimit(size_t new_footprint) REQUIRES(lock_); - void DoFullCollection(Thread* self) + void DoCollection(Thread* self, bool collect_profiling_info) REQUIRES(!lock_) SHARED_REQUIRES(Locks::mutator_lock_); - void RemoveUnusedCode(Thread* self) + void RemoveUnmarkedCode(Thread* self) REQUIRES(!lock_) SHARED_REQUIRES(Locks::mutator_lock_); @@ -247,6 +260,15 @@ class JitCodeCache { REQUIRES(!lock_) SHARED_REQUIRES(Locks::mutator_lock_); + bool CheckLiveCompiledCodeHasProfilingInfo() + REQUIRES(lock_) + SHARED_REQUIRES(Locks::mutator_lock_); + + void FreeCode(uint8_t* code) REQUIRES(lock_); + uint8_t* AllocateCode(size_t code_size) REQUIRES(lock_); + void FreeData(uint8_t* data) REQUIRES(lock_); + uint8_t* AllocateData(size_t data_size) REQUIRES(lock_); + // Lock for guarding allocations, collections, and the method_code_map_. Mutex lock_; // Condition to wait on during collection. @@ -282,8 +304,8 @@ class JitCodeCache { // The current footprint in bytes of the data portion of the code cache. size_t data_end_ GUARDED_BY(lock_); - // Whether a full collection has already been done on the current capacity. - bool has_done_full_collection_ GUARDED_BY(lock_); + // Whether the last collection round increased the code cache. + bool last_collection_increased_code_cache_ GUARDED_BY(lock_); // Last time the the code_cache was updated. // It is atomic to avoid locking when reading it. @@ -298,19 +320,21 @@ class JitCodeCache { // The size in bytes of used memory for the code portion of the code cache. size_t used_memory_for_code_ GUARDED_BY(lock_); - void FreeCode(uint8_t* code) REQUIRES(lock_); - uint8_t* AllocateCode(size_t code_size) REQUIRES(lock_); - void FreeData(uint8_t* data) REQUIRES(lock_); - uint8_t* AllocateData(size_t data_size) REQUIRES(lock_); - // Number of compilations done throughout the lifetime of the JIT. size_t number_of_compilations_ GUARDED_BY(lock_); + + // Number of compilations for on-stack-replacement done throughout the lifetime of the JIT. size_t number_of_osr_compilations_ GUARDED_BY(lock_); + // Number of deoptimizations done throughout the lifetime of the JIT. + size_t number_of_deoptimizations_ GUARDED_BY(lock_); + + // Number of code cache collections done throughout the lifetime of the JIT. + size_t number_of_collections_ GUARDED_BY(lock_); + DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache); }; - } // namespace jit } // namespace art diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc index 46c362ac62..d751e5aae9 100644 --- a/runtime/jit/jit_instrumentation.cc +++ b/runtime/jit/jit_instrumentation.cc @@ -187,7 +187,18 @@ void JitInstrumentationListener::MethodEntered(Thread* thread, return; } - instrumentation_cache_->AddSamples(thread, method, 1); + ProfilingInfo* profiling_info = method->GetProfilingInfo(sizeof(void*)); + // Update the entrypoint if the ProfilingInfo has one. The interpreter will call it + // instead of interpreting the method. + // We avoid doing this if exit stubs are installed to not mess with the instrumentation. + // TODO(ngeoffray): Clean up instrumentation and code cache interactions. + if ((profiling_info != nullptr) && + (profiling_info->GetSavedEntryPoint() != nullptr) && + !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()) { + method->SetEntryPointFromQuickCompiledCode(profiling_info->GetSavedEntryPoint()); + } else { + instrumentation_cache_->AddSamples(thread, method, 1); + } } void JitInstrumentationListener::Branch(Thread* thread, diff --git a/runtime/jit/offline_profiling_info.cc b/runtime/jit/offline_profiling_info.cc index 67c9b5f679..ecf34f57ef 100644 --- a/runtime/jit/offline_profiling_info.cc +++ b/runtime/jit/offline_profiling_info.cc @@ -26,6 +26,7 @@ #include "base/mutex.h" #include "base/scoped_flock.h" #include "base/stl_util.h" +#include "base/systrace.h" #include "base/unix_file/fd_file.h" #include "jit/profiling_info.h" #include "os.h" @@ -57,6 +58,7 @@ bool ProfileCompilationInfo::SaveProfilingInfo( return true; } + ScopedTrace trace(__PRETTY_FUNCTION__); ScopedFlock flock; std::string error; if (!flock.Init(filename.c_str(), O_RDWR | O_NOFOLLOW | O_CLOEXEC, /* block */ false, &error)) { @@ -132,6 +134,7 @@ static constexpr const char* kClassesMarker = "classes"; * app.apk:classes5.dex,218490184,39,13,49,1 **/ bool ProfileCompilationInfo::Save(int fd) { + ScopedTrace trace(__PRETTY_FUNCTION__); DCHECK_GE(fd, 0); // TODO(calin): Profile this and see how much memory it takes. If too much, // write to file directly. @@ -298,6 +301,7 @@ static int GetLineFromBuffer(char* buffer, int n, int start_from, std::string& l } bool ProfileCompilationInfo::Load(int fd) { + ScopedTrace trace(__PRETTY_FUNCTION__); DCHECK_GE(fd, 0); std::string current_line; diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc index 7f014fb660..6fe17dbe15 100644 --- a/runtime/jit/profile_saver.cc +++ b/runtime/jit/profile_saver.cc @@ -21,6 +21,7 @@ #include <fcntl.h> #include "art_method-inl.h" +#include "base/systrace.h" #include "scoped_thread_state_change.h" #include "oat_file_manager.h" @@ -114,6 +115,7 @@ void ProfileSaver::Run() { } bool ProfileSaver::ProcessProfilingInfo() { + ScopedTrace trace(__PRETTY_FUNCTION__); uint64_t last_update_time_ns = jit_code_cache_->GetLastUpdateTimeNs(); if (!first_profile_ && last_update_time_ns - code_cache_last_update_time_ns_ < kMinimumTimeBetweenCodeCacheUpdatesNs) { diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc index 3820592c4c..07c8051214 100644 --- a/runtime/jit/profiling_info.cc +++ b/runtime/jit/profiling_info.cc @@ -97,8 +97,8 @@ void ProfilingInfo::AddInvokeInfo(uint32_t dex_pc, mirror::Class* cls) { } } } - // Unsuccessfull - cache is full, making it megamorphic. - DCHECK(cache->IsMegamorphic()); + // Unsuccessfull - cache is full, making it megamorphic. We do not DCHECK it though, + // as the garbage collector might clear the entries concurrently. } } // namespace art diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h index ab7237376b..55d627ab48 100644 --- a/runtime/jit/profiling_info.h +++ b/runtime/jit/profiling_info.h @@ -56,10 +56,11 @@ class InlineCache { mirror::Class* GetMonomorphicType() const SHARED_REQUIRES(Locks::mutator_lock_) { // Note that we cannot ensure the inline cache is actually monomorphic // at this point, as other threads may have updated it. + DCHECK(!classes_[0].IsNull()); return classes_[0].Read(); } - bool IsUnitialized() const { + bool IsUninitialized() const { return classes_[0].IsNull(); } @@ -126,11 +127,44 @@ class ProfilingInfo { is_method_being_compiled_ = value; } + void SetSavedEntryPoint(const void* entry_point) { + saved_entry_point_ = entry_point; + } + + const void* GetSavedEntryPoint() const { + return saved_entry_point_; + } + + void ClearGcRootsInInlineCaches() { + for (size_t i = 0; i < number_of_inline_caches_; ++i) { + InlineCache* cache = &cache_[i]; + memset(&cache->classes_[0], + 0, + InlineCache::kIndividualCacheSize * sizeof(GcRoot<mirror::Class>)); + } + } + + void IncrementInlineUse() { + DCHECK_NE(current_inline_uses_, std::numeric_limits<uint16_t>::max()); + current_inline_uses_++; + } + + void DecrementInlineUse() { + DCHECK_GT(current_inline_uses_, 0); + current_inline_uses_--; + } + + bool IsInUseByCompiler() const { + return IsMethodBeingCompiled() || (current_inline_uses_ > 0); + } + private: ProfilingInfo(ArtMethod* method, const std::vector<uint32_t>& entries) : number_of_inline_caches_(entries.size()), method_(method), - is_method_being_compiled_(false) { + is_method_being_compiled_(false), + current_inline_uses_(0), + saved_entry_point_(nullptr) { memset(&cache_, 0, number_of_inline_caches_ * sizeof(InlineCache)); for (size_t i = 0; i < number_of_inline_caches_; ++i) { cache_[i].dex_pc_ = entries[i]; @@ -148,6 +182,14 @@ class ProfilingInfo { // TODO: Make the JIT code cache lock global. bool is_method_being_compiled_; + // When the compiler inlines the method associated to this ProfilingInfo, + // it updates this counter so that the GC does not try to clear the inline caches. + uint16_t current_inline_uses_; + + // Entry point of the corresponding ArtMethod, while the JIT code cache + // is poking for the liveness of compiled code. + const void* saved_entry_point_; + // Dynamically allocated array of size `number_of_inline_caches_`. InlineCache cache_[0]; diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc index 11156c6229..421641ce39 100644 --- a/runtime/mem_map.cc +++ b/runtime/mem_map.cc @@ -590,7 +590,19 @@ void MemMap::MadviseDontNeedAndZero() { } bool MemMap::Sync() { - return msync(BaseBegin(), BaseSize(), MS_SYNC) == 0; + bool result; + if (redzone_size_ != 0) { + // To avoid valgrind errors, temporarily lift the lower-end noaccess protection before passing + // it to msync() as it only accepts page-aligned base address, and exclude the higher-end + // noaccess protection from the msync range. b/27552451. + uint8_t* base_begin = reinterpret_cast<uint8_t*>(base_begin_); + MEMORY_TOOL_MAKE_DEFINED(base_begin, begin_ - base_begin); + result = msync(BaseBegin(), End() - base_begin, MS_SYNC) == 0; + MEMORY_TOOL_MAKE_NOACCESS(base_begin, begin_ - base_begin); + } else { + result = msync(BaseBegin(), BaseSize(), MS_SYNC) == 0; + } + return result; } bool MemMap::Protect(int prot) { diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc index 4d941302f9..701c600822 100644 --- a/runtime/mirror/object.cc +++ b/runtime/mirror/object.cc @@ -183,7 +183,7 @@ int32_t Object::IdentityHashCode() const { break; } case LockWord::kFatLocked: { - // Already inflated, return the has stored in the monitor. + // Already inflated, return the hash stored in the monitor. Monitor* monitor = lw.FatLockMonitor(); DCHECK(monitor != nullptr); return monitor->GetHashCode(); diff --git a/runtime/monitor.cc b/runtime/monitor.cc index 19c71f6d97..a262c7a8f3 100644 --- a/runtime/monitor.cc +++ b/runtime/monitor.cc @@ -16,14 +16,12 @@ #include "monitor.h" -#define ATRACE_TAG ATRACE_TAG_DALVIK - -#include <cutils/trace.h> #include <vector> #include "art_method-inl.h" #include "base/mutex.h" #include "base/stl_util.h" +#include "base/systrace.h" #include "base/time_utils.h" #include "class_linker.h" #include "dex_file-inl.h" @@ -499,6 +497,24 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, self->SetWaitMonitor(nullptr); } + // Allocate the interrupted exception not holding the monitor lock since it may cause a GC. + // If the GC requires acquiring the monitor for enqueuing cleared references, this would + // cause a deadlock if the monitor is held. + if (was_interrupted && interruptShouldThrow) { + /* + * We were interrupted while waiting, or somebody interrupted an + * un-interruptible thread earlier and we're bailing out immediately. + * + * The doc sayeth: "The interrupted status of the current thread is + * cleared when this exception is thrown." + */ + { + MutexLock mu(self, *self->GetWaitMutex()); + self->SetInterruptedLocked(false); + } + self->ThrowNewException("Ljava/lang/InterruptedException;", nullptr); + } + // Re-acquire the monitor and lock. Lock(self); monitor_lock_.Lock(self); @@ -518,21 +534,6 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, RemoveFromWaitSet(self); monitor_lock_.Unlock(self); - - if (was_interrupted && interruptShouldThrow) { - /* - * We were interrupted while waiting, or somebody interrupted an - * un-interruptible thread earlier and we're bailing out immediately. - * - * The doc sayeth: "The interrupted status of the current thread is - * cleared when this exception is thrown." - */ - { - MutexLock mu(self, *self->GetWaitMutex()); - self->SetInterruptedLocked(false); - } - self->ThrowNewException("Ljava/lang/InterruptedException;", nullptr); - } } void Monitor::Notify(Thread* self) { diff --git a/runtime/monitor_pool.cc b/runtime/monitor_pool.cc index 2832e32dd1..ce38e4f108 100644 --- a/runtime/monitor_pool.cc +++ b/runtime/monitor_pool.cc @@ -42,16 +42,17 @@ void MonitorPool::AllocateChunk() { if (capacity_ == 0U) { // Initialization. capacity_ = kInitialChunkStorage; - uintptr_t* new_backing = new uintptr_t[capacity_]; + uintptr_t* new_backing = new uintptr_t[capacity_](); + DCHECK(monitor_chunks_.LoadRelaxed() == nullptr); monitor_chunks_.StoreRelaxed(new_backing); } else { size_t new_capacity = 2 * capacity_; - uintptr_t* new_backing = new uintptr_t[new_capacity]; + uintptr_t* new_backing = new uintptr_t[new_capacity](); uintptr_t* old_backing = monitor_chunks_.LoadRelaxed(); memcpy(new_backing, old_backing, sizeof(uintptr_t) * capacity_); monitor_chunks_.StoreRelaxed(new_backing); capacity_ = new_capacity; - old_chunk_arrays_.push_back(old_backing); + old_chunk_arrays_.push_back(std::unique_ptr<uintptr_t[]>(old_backing)); VLOG(monitor) << "Resizing to capacity " << capacity_; } } @@ -88,6 +89,25 @@ void MonitorPool::AllocateChunk() { first_free_ = last; } +void MonitorPool::FreeInternal() { + // This is on shutdown with NO_THREAD_SAFETY_ANALYSIS, can't/don't need to lock. + uintptr_t* backing = monitor_chunks_.LoadRelaxed(); + DCHECK(backing != nullptr); + DCHECK_GT(capacity_, 0U); + DCHECK_GT(num_chunks_, 0U); + + for (size_t i = 0; i < capacity_; ++i) { + if (i < num_chunks_) { + DCHECK_NE(backing[i], 0U); + allocator_.deallocate(reinterpret_cast<uint8_t*>(backing[i]), kChunkSize); + } else { + DCHECK_EQ(backing[i], 0U); + } + } + + delete[] backing; +} + Monitor* MonitorPool::CreateMonitorInPool(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code) SHARED_REQUIRES(Locks::mutator_lock_) { diff --git a/runtime/monitor_pool.h b/runtime/monitor_pool.h index 240ca61641..875b3fe73d 100644 --- a/runtime/monitor_pool.h +++ b/runtime/monitor_pool.h @@ -104,6 +104,12 @@ class MonitorPool { #endif } + ~MonitorPool() { +#ifdef __LP64__ + FreeInternal(); +#endif + } + private: #ifdef __LP64__ // When we create a monitor pool, threads have not been initialized, yet, so ignore thread-safety @@ -112,6 +118,10 @@ class MonitorPool { void AllocateChunk() REQUIRES(Locks::allocated_monitor_ids_lock_); + // Release all chunks and metadata. This is done on shutdown, where threads have been destroyed, + // so ignore thead-safety analysis. + void FreeInternal() NO_THREAD_SAFETY_ANALYSIS; + Monitor* CreateMonitorInPool(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code) SHARED_REQUIRES(Locks::mutator_lock_); @@ -176,7 +186,8 @@ class MonitorPool { size_t capacity_ GUARDED_BY(Locks::allocated_monitor_ids_lock_); // To avoid race issues when resizing, we keep all the previous arrays. - std::vector<uintptr_t*> old_chunk_arrays_ GUARDED_BY(Locks::allocated_monitor_ids_lock_); + std::vector<std::unique_ptr<uintptr_t[]>> old_chunk_arrays_ + GUARDED_BY(Locks::allocated_monitor_ids_lock_); typedef TrackingAllocator<uint8_t, kAllocatorTagMonitorPool> Allocator; Allocator allocator_; diff --git a/runtime/native/java_lang_Runtime.cc b/runtime/native/java_lang_Runtime.cc index c177f19b56..df794e1249 100644 --- a/runtime/native/java_lang_Runtime.cc +++ b/runtime/native/java_lang_Runtime.cc @@ -67,9 +67,11 @@ static void SetLdLibraryPath(JNIEnv* env, jstring javaLdLibraryPath) { #endif } -static jstring Runtime_nativeLoad(JNIEnv* env, jclass, jstring javaFilename, jobject javaLoader, - jboolean isSharedNamespace, jstring javaLibrarySearchPath, - jstring javaLibraryPermittedPath) { +static jstring Runtime_nativeLoad(JNIEnv* env, + jclass, + jstring javaFilename, + jobject javaLoader, + jstring javaLibrarySearchPath) { ScopedUtfChars filename(env, javaFilename); if (filename.c_str() == nullptr) { return nullptr; @@ -79,7 +81,9 @@ static jstring Runtime_nativeLoad(JNIEnv* env, jclass, jstring javaFilename, job // Starting with N nativeLoad uses classloader local // linker namespace instead of global LD_LIBRARY_PATH - // (23 is Marshmallow) + // (23 is Marshmallow). This call is here to preserve + // backwards compatibility for the apps targeting sdk + // version <= 23 if (target_sdk_version == 0) { SetLdLibraryPath(env, javaLibrarySearchPath); } @@ -90,9 +94,7 @@ static jstring Runtime_nativeLoad(JNIEnv* env, jclass, jstring javaFilename, job bool success = vm->LoadNativeLibrary(env, filename.c_str(), javaLoader, - isSharedNamespace == JNI_TRUE, javaLibrarySearchPath, - javaLibraryPermittedPath, &error_msg); if (success) { return nullptr; @@ -121,7 +123,7 @@ static JNINativeMethod gMethods[] = { NATIVE_METHOD(Runtime, gc, "()V"), NATIVE_METHOD(Runtime, maxMemory, "!()J"), NATIVE_METHOD(Runtime, nativeExit, "(I)V"), - NATIVE_METHOD(Runtime, nativeLoad, "(Ljava/lang/String;Ljava/lang/ClassLoader;ZLjava/lang/String;Ljava/lang/String;)Ljava/lang/String;"), + NATIVE_METHOD(Runtime, nativeLoad, "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/String;)Ljava/lang/String;"), NATIVE_METHOD(Runtime, totalMemory, "!()J"), }; diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc index 6ffd476edf..858849f980 100644 --- a/runtime/native/sun_misc_Unsafe.cc +++ b/runtime/native/sun_misc_Unsafe.cc @@ -26,6 +26,7 @@ #include <unistd.h> #include <stdlib.h> #include <string.h> +#include <atomic> namespace art { @@ -473,6 +474,18 @@ static void Unsafe_putDouble(JNIEnv* env, jobject, jobject javaObj, jlong offset obj->SetField64<false>(MemberOffset(offset), conv.converted); } +static void Unsafe_loadFence(JNIEnv*, jobject) { + std::atomic_thread_fence(std::memory_order_acquire); +} + +static void Unsafe_storeFence(JNIEnv*, jobject) { + std::atomic_thread_fence(std::memory_order_release); +} + +static void Unsafe_fullFence(JNIEnv*, jobject) { + std::atomic_thread_fence(std::memory_order_seq_cst); +} + static JNINativeMethod gMethods[] = { NATIVE_METHOD(Unsafe, compareAndSwapInt, "!(Ljava/lang/Object;JII)Z"), NATIVE_METHOD(Unsafe, compareAndSwapLong, "!(Ljava/lang/Object;JJJ)Z"), @@ -532,6 +545,11 @@ static JNINativeMethod gMethods[] = { OVERLOADED_NATIVE_METHOD(Unsafe, putLong, "!(JJ)V", putLongJJ), OVERLOADED_NATIVE_METHOD(Unsafe, putFloat, "!(JF)V", putFloatJF), OVERLOADED_NATIVE_METHOD(Unsafe, putDouble, "!(JD)V", putDoubleJD), + + // CAS + NATIVE_METHOD(Unsafe, loadFence, "!()V"), + NATIVE_METHOD(Unsafe, storeFence, "!()V"), + NATIVE_METHOD(Unsafe, fullFence, "!()V"), }; void register_sun_misc_Unsafe(JNIEnv* env) { diff --git a/runtime/oat.cc b/runtime/oat.cc index 2ac105291d..ed99cbabbd 100644 --- a/runtime/oat.cc +++ b/runtime/oat.cc @@ -468,6 +468,10 @@ bool OatHeader::IsDebuggable() const { return IsKeyEnabled(OatHeader::kDebuggableKey); } +bool OatHeader::IsNativeDebuggable() const { + return IsKeyEnabled(OatHeader::kNativeDebuggableKey); +} + bool OatHeader::IsExtractOnly() const { return KeyHasValue(kCompilationType, kExtractOnlyValue, diff --git a/runtime/oat.h b/runtime/oat.h index 0660e19ff4..1d6c076c1b 100644 --- a/runtime/oat.h +++ b/runtime/oat.h @@ -38,6 +38,7 @@ class PACKED(4) OatHeader { static constexpr const char* kDex2OatHostKey = "dex2oat-host"; static constexpr const char* kPicKey = "pic"; static constexpr const char* kDebuggableKey = "debuggable"; + static constexpr const char* kNativeDebuggableKey = "native-debuggable"; static constexpr const char* kCompilationType = "compilation-type"; static constexpr const char* kClassPathKey = "classpath"; static constexpr const char* kBootClassPath = "bootclasspath"; @@ -110,6 +111,7 @@ class PACKED(4) OatHeader { size_t GetHeaderSize() const; bool IsPic() const; bool IsDebuggable() const; + bool IsNativeDebuggable() const; bool IsExtractOnly() const; bool IsProfileGuideCompiled() const; diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc index 49fbf7261e..7155c79afb 100644 --- a/runtime/oat_file.cc +++ b/runtime/oat_file.cc @@ -35,6 +35,7 @@ #include "art_method-inl.h" #include "base/bit_vector.h" #include "base/stl_util.h" +#include "base/systrace.h" #include "base/unix_file/fd_file.h" #include "elf_file.h" #include "elf_utils.h" @@ -168,7 +169,10 @@ bool OatFileBase::ComputeFields(uint8_t* requested_base, return false; } if (requested_base != nullptr && begin_ != requested_base) { - PrintFileToLog("/proc/self/maps", LogSeverity::WARNING); + // Host can fail this check. Do not dump there to avoid polluting the output. + if (kIsTargetBuild) { + PrintFileToLog("/proc/self/maps", LogSeverity::WARNING); + } *error_msg = StringPrintf("Failed to find oatdata symbol at expected address: " "oatdata=%p != expected=%p. See process maps in the log.", begin_, requested_base); @@ -746,6 +750,7 @@ ElfOatFile* ElfOatFile::OpenElfFile(File* file, bool executable, const char* abs_dex_location, std::string* error_msg) { + ScopedTrace trace("Open elf file " + location); std::unique_ptr<ElfOatFile> oat_file(new ElfOatFile(location, executable)); bool success = oat_file->ElfFileOpen(file, oat_file_begin, writable, executable, error_msg); if (!success) { @@ -768,6 +773,7 @@ ElfOatFile* ElfOatFile::OpenElfFile(File* file, bool ElfOatFile::InitializeFromElfFile(ElfFile* elf_file, const char* abs_dex_location, std::string* error_msg) { + ScopedTrace trace(__PRETTY_FUNCTION__); if (IsExecutable()) { *error_msg = "Cannot initialize from elf file in executable mode."; return false; @@ -787,6 +793,7 @@ bool ElfOatFile::Load(const std::string& elf_filename, bool writable, bool executable, std::string* error_msg) { + ScopedTrace trace(__PRETTY_FUNCTION__); std::unique_ptr<File> file(OS::OpenFileForReading(elf_filename.c_str())); if (file == nullptr) { *error_msg = StringPrintf("Failed to open oat filename for reading: %s", strerror(errno)); @@ -804,6 +811,7 @@ bool ElfOatFile::ElfFileOpen(File* file, bool writable, bool executable, std::string* error_msg) { + ScopedTrace trace(__PRETTY_FUNCTION__); // TODO: rename requested_base to oat_data_begin elf_file_.reset(ElfFile::Open(file, writable, @@ -864,6 +872,7 @@ OatFile* OatFile::Open(const std::string& filename, bool executable, const char* abs_dex_location, std::string* error_msg) { + ScopedTrace trace("Open oat file " + location); CHECK(!filename.empty()) << location; CheckLocation(location); std::unique_ptr<OatFile> ret; @@ -1072,6 +1081,7 @@ size_t OatFile::OatDexFile::FileSize() const { } std::unique_ptr<const DexFile> OatFile::OatDexFile::OpenDexFile(std::string* error_msg) const { + ScopedTrace trace(__PRETTY_FUNCTION__); return DexFile::Open(dex_file_pointer_, FileSize(), dex_file_location_, diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc index 0912ba06c1..e57125bef1 100644 --- a/runtime/oat_file_manager.cc +++ b/runtime/oat_file_manager.cc @@ -16,14 +16,13 @@ #include "oat_file_manager.h" -#define ATRACE_TAG ATRACE_TAG_DALVIK -#include <cutils/trace.h> #include <memory> #include <queue> #include <vector> #include "base/logging.h" #include "base/stl_util.h" +#include "base/systrace.h" #include "class_linker.h" #include "dex_file-inl.h" #include "gc/scoped_gc_critical_section.h" @@ -299,6 +298,7 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat( jobjectArray dex_elements, const OatFile** out_oat_file, std::vector<std::string>* error_msgs) { + ScopedTrace trace(__FUNCTION__); CHECK(dex_location != nullptr); CHECK(error_msgs != nullptr); @@ -393,14 +393,15 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat( ScopedSuspendAll ssa("Add image space"); runtime->GetHeap()->AddSpace(image_space.get()); } - ATRACE_BEGIN(StringPrintf("Adding image space for location %s", dex_location).c_str()); - added_image_space = runtime->GetClassLinker()->AddImageSpace(image_space.get(), - h_loader, - dex_elements, - dex_location, - /*out*/&dex_files, - /*out*/&temp_error_msg); - ATRACE_END(); + { + ScopedTrace trace2(StringPrintf("Adding image space for location %s", dex_location)); + added_image_space = runtime->GetClassLinker()->AddImageSpace(image_space.get(), + h_loader, + dex_elements, + dex_location, + /*out*/&dex_files, + /*out*/&temp_error_msg); + } if (added_image_space) { // Successfully added image space to heap, release the map so that it does not get // freed. diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h index 2b7eca2859..daabc6ee09 100644 --- a/runtime/oat_quick_method_header.h +++ b/runtime/oat_quick_method_header.h @@ -63,16 +63,24 @@ class PACKED(4) OatQuickMethodHeader { return gc_map_offset_ == 0 && vmap_table_offset_ != 0; } - CodeInfo GetOptimizedCodeInfo() const { + const void* GetOptimizedCodeInfoPtr() const { DCHECK(IsOptimized()); const void* data = reinterpret_cast<const void*>(code_ - vmap_table_offset_); - return CodeInfo(data); + return data; + } + + CodeInfo GetOptimizedCodeInfo() const { + return CodeInfo(GetOptimizedCodeInfoPtr()); } const uint8_t* GetCode() const { return code_; } + uint32_t GetCodeSize() const { + return code_size_; + } + const uint8_t* GetNativeGcMap() const { return (gc_map_offset_ == 0) ? nullptr : code_ - gc_map_offset_; } @@ -111,7 +119,7 @@ class PACKED(4) OatQuickMethodHeader { uint32_t GetFrameSizeInBytes() const { uint32_t result = frame_info_.FrameSizeInBytes(); if (kCheckFrameSize) { - DCHECK_LE(static_cast<size_t>(kStackAlignment), result); + DCHECK_ALIGNED(result, kStackAlignment); } return result; } diff --git a/runtime/openjdkjvm/OpenjdkJvm.cc b/runtime/openjdkjvm/OpenjdkJvm.cc index 725067a351..d377457eb2 100644 --- a/runtime/openjdkjvm/OpenjdkJvm.cc +++ b/runtime/openjdkjvm/OpenjdkJvm.cc @@ -329,9 +329,10 @@ static void SetLdLibraryPath(JNIEnv* env, jstring javaLdLibraryPath) { } -JNIEXPORT jstring JVM_NativeLoad(JNIEnv* env, jstring javaFilename, jobject javaLoader, - jboolean isSharedNamespace, jstring javaLibrarySearchPath, - jstring javaLibraryPermittedPath) { +JNIEXPORT jstring JVM_NativeLoad(JNIEnv* env, + jstring javaFilename, + jobject javaLoader, + jstring javaLibrarySearchPath) { ScopedUtfChars filename(env, javaFilename); if (filename.c_str() == NULL) { return NULL; @@ -354,9 +355,7 @@ JNIEXPORT jstring JVM_NativeLoad(JNIEnv* env, jstring javaFilename, jobject java bool success = vm->LoadNativeLibrary(env, filename.c_str(), javaLoader, - isSharedNamespace == JNI_TRUE, javaLibrarySearchPath, - javaLibraryPermittedPath, &error_msg); if (success) { return nullptr; diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc index d64aa432fc..60403f9752 100644 --- a/runtime/parsed_options.cc +++ b/runtime/parsed_options.cc @@ -166,6 +166,9 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize .Define("-Xjitwarmupthreshold:_") .WithType<unsigned int>() .IntoKey(M::JITWarmupThreshold) + .Define("-Xjitosrthreshold:_") + .WithType<unsigned int>() + .IntoKey(M::JITOsrThreshold) .Define("-Xjitsaveprofilinginfo") .WithValue(true) .IntoKey(M::JITSaveProfilingInfo) @@ -694,6 +697,8 @@ void ParsedOptions::Usage(const char* fmt, ...) { UsageMessage(stream, " -Xusejit:booleanvalue\n"); UsageMessage(stream, " -Xjitinitialsize:N\n"); UsageMessage(stream, " -Xjitmaxsize:N\n"); + UsageMessage(stream, " -Xjitwarmupthreshold:integervalue\n"); + UsageMessage(stream, " -Xjitosrthreshold:integervalue\n"); UsageMessage(stream, " -X[no]relocate\n"); UsageMessage(stream, " -X[no]dex2oat (Whether to invoke dex2oat on the application)\n"); UsageMessage(stream, " -X[no]image-dex2oat (Whether to create and use a boot image)\n"); diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc index 9b10f2e0b8..c7ccee2125 100644 --- a/runtime/quick/inline_method_analyser.cc +++ b/runtime/quick/inline_method_analyser.cc @@ -744,9 +744,12 @@ bool InlineMethodAnalyser::ComputeSpecialAccessorInfo(ArtMethod* method, return false; } DCHECK_GE(field->GetOffset().Int32Value(), 0); + // Do not interleave function calls with bit field writes to placate valgrind. Bug: 27552451. + uint32_t field_offset = field->GetOffset().Uint32Value(); + bool is_volatile = field->IsVolatile(); result->field_idx = field_idx; - result->field_offset = field->GetOffset().Int32Value(); - result->is_volatile = field->IsVolatile(); + result->field_offset = field_offset; + result->is_volatile = is_volatile ? 1u : 0u; return true; } diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h index 7e84b405e7..0e12d73595 100644 --- a/runtime/quick/inline_method_analyser.h +++ b/runtime/quick/inline_method_analyser.h @@ -101,6 +101,17 @@ enum InlineMethodOpcode : uint16_t { kIntrinsicCas, kIntrinsicUnsafeGet, kIntrinsicUnsafePut, + + // 1.8. + kIntrinsicUnsafeGetAndAddInt, + kIntrinsicUnsafeGetAndAddLong, + kIntrinsicUnsafeGetAndSetInt, + kIntrinsicUnsafeGetAndSetLong, + kIntrinsicUnsafeGetAndSetObject, + kIntrinsicUnsafeLoadFence, + kIntrinsicUnsafeStoreFence, + kIntrinsicUnsafeFullFence, + kIntrinsicSystemArrayCopyCharArray, kIntrinsicSystemArrayCopy, diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 4b45b42ea0..fd6cc100eb 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -23,8 +23,6 @@ #include <sys/prctl.h> #endif -#define ATRACE_TAG ATRACE_TAG_DALVIK -#include <cutils/trace.h> #include <signal.h> #include <sys/syscall.h> #include "base/memory_tool.h" @@ -58,6 +56,7 @@ #include "base/arena_allocator.h" #include "base/dumpable.h" #include "base/stl_util.h" +#include "base/systrace.h" #include "base/unix_file/fd_file.h" #include "class_linker-inl.h" #include "compiler_callbacks.h" @@ -216,7 +215,7 @@ Runtime::Runtime() } Runtime::~Runtime() { - ATRACE_BEGIN("Runtime shutdown"); + ScopedTrace trace("Runtime shutdown"); if (is_native_bridge_loaded_) { UnloadNativeBridge(); } @@ -231,40 +230,34 @@ Runtime::~Runtime() { Thread* self = Thread::Current(); const bool attach_shutdown_thread = self == nullptr; if (attach_shutdown_thread) { - ATRACE_BEGIN("Attach shutdown thread"); CHECK(AttachCurrentThread("Shutdown thread", false, nullptr, false)); - ATRACE_END(); self = Thread::Current(); } else { LOG(WARNING) << "Current thread not detached in Runtime shutdown"; } { - ATRACE_BEGIN("Wait for shutdown cond"); + ScopedTrace trace2("Wait for shutdown cond"); MutexLock mu(self, *Locks::runtime_shutdown_lock_); shutting_down_started_ = true; while (threads_being_born_ > 0) { shutdown_cond_->Wait(self); } shutting_down_ = true; - ATRACE_END(); } // Shutdown and wait for the daemons. CHECK(self != nullptr); if (IsFinishedStarting()) { - ATRACE_BEGIN("Waiting for Daemons"); + ScopedTrace trace2("Waiting for Daemons"); self->ClearException(); self->GetJniEnv()->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons, WellKnownClasses::java_lang_Daemons_stop); - ATRACE_END(); } Trace::Shutdown(); if (attach_shutdown_thread) { - ATRACE_BEGIN("Detach shutdown thread"); DetachCurrentThread(); - ATRACE_END(); self = nullptr; } @@ -272,14 +265,13 @@ Runtime::~Runtime() { heap_->WaitForGcToComplete(gc::kGcCauseBackground, self); heap_->DeleteThreadPool(); if (jit_ != nullptr) { - ATRACE_BEGIN("Delete jit"); + ScopedTrace trace2("Delete jit"); VLOG(jit) << "Deleting jit thread pool"; // Delete thread pool before the thread list since we don't want to wait forever on the // JIT compiler threads. jit_->DeleteThreadPool(); // Similarly, stop the profile saver thread before deleting the thread list. jit_->StopProfileSaver(); - ATRACE_END(); } // Make sure our internal threads are dead before we start tearing down things they're using. @@ -287,10 +279,10 @@ Runtime::~Runtime() { delete signal_catcher_; // Make sure all other non-daemon threads have terminated, and all daemon threads are suspended. - ATRACE_BEGIN("Delete thread list"); - delete thread_list_; - ATRACE_END(); - + { + ScopedTrace trace2("Delete thread list"); + delete thread_list_; + } // Delete the JIT after thread list to ensure that there is no remaining threads which could be // accessing the instrumentation when we delete it. if (jit_ != nullptr) { @@ -301,7 +293,7 @@ Runtime::~Runtime() { // Shutdown the fault manager if it was initialized. fault_manager.Shutdown(); - ATRACE_BEGIN("Delete state"); + ScopedTrace trace2("Delete state"); delete monitor_list_; delete monitor_pool_; delete class_linker_; @@ -319,12 +311,10 @@ Runtime::~Runtime() { arena_pool_.reset(); jit_arena_pool_.reset(); MemMap::Shutdown(); - ATRACE_END(); // TODO: acquire a static mutex on Runtime to avoid racing. CHECK(instance_ == nullptr || instance_ == this); instance_ = nullptr; - ATRACE_END(); } struct AbortState { @@ -563,12 +553,14 @@ bool Runtime::Start() { // Use !IsAotCompiler so that we get test coverage, tests are never the zygote. if (!IsAotCompiler()) { ScopedObjectAccess soa(self); - ATRACE_BEGIN("AddImageStringsToTable"); - GetInternTable()->AddImagesStringsToTable(heap_->GetBootImageSpaces()); - ATRACE_END(); - ATRACE_BEGIN("MoveImageClassesToClassTable"); - GetClassLinker()->AddBootImageClassesToClassTable(); - ATRACE_END(); + { + ScopedTrace trace2("AddImageStringsToTable"); + GetInternTable()->AddImagesStringsToTable(heap_->GetBootImageSpaces()); + } + { + ScopedTrace trace2("MoveImageClassesToClassTable"); + GetClassLinker()->AddBootImageClassesToClassTable(); + } } // If we are the zygote then we need to wait until after forking to create the code cache @@ -586,9 +578,10 @@ bool Runtime::Start() { // InitNativeMethods needs to be after started_ so that the classes // it touches will have methods linked to the oat file if necessary. - ATRACE_BEGIN("InitNativeMethods"); - InitNativeMethods(); - ATRACE_END(); + { + ScopedTrace trace2("InitNativeMethods"); + InitNativeMethods(); + } // Initialize well known thread group values that may be accessed threads while attaching. InitThreadGroups(self); @@ -614,9 +607,7 @@ bool Runtime::Start() { GetInstructionSetString(kRuntimeISA)); } - ATRACE_BEGIN("StartDaemonThreads"); StartDaemonThreads(); - ATRACE_END(); { ScopedObjectAccess soa(self); @@ -751,6 +742,7 @@ bool Runtime::IsDebuggable() const { } void Runtime::StartDaemonThreads() { + ScopedTrace trace(__FUNCTION__); VLOG(startup) << "Runtime::StartDaemonThreads entering"; Thread* self = Thread::Current(); @@ -894,7 +886,7 @@ void Runtime::SetSentinel(mirror::Object* sentinel) { bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { RuntimeArgumentMap runtime_options(std::move(runtime_options_in)); - ATRACE_BEGIN("Runtime::Init"); + ScopedTrace trace(__FUNCTION__); CHECK_EQ(sysconf(_SC_PAGE_SIZE), kPageSize); MemMap::Init(); @@ -961,7 +953,6 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { is_low_memory_mode_ = runtime_options.Exists(Opt::LowMemoryMode); XGcOption xgc_option = runtime_options.GetOrDefault(Opt::GcOption); - ATRACE_BEGIN("CreateHeap"); heap_ = new gc::Heap(runtime_options.GetOrDefault(Opt::MemoryInitialSize), runtime_options.GetOrDefault(Opt::HeapGrowthLimit), runtime_options.GetOrDefault(Opt::HeapMinFree), @@ -992,11 +983,9 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { xgc_option.gcstress_, runtime_options.GetOrDefault(Opt::EnableHSpaceCompactForOOM), runtime_options.GetOrDefault(Opt::HSpaceCompactForOOMMinIntervalsMs)); - ATRACE_END(); if (!heap_->HasBootImageSpace() && !allow_dex_file_fallback_) { LOG(ERROR) << "Dex file fallback disabled, cannot continue without image."; - ATRACE_END(); return false; } @@ -1106,10 +1095,8 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { CHECK_GE(GetHeap()->GetContinuousSpaces().size(), 1U); class_linker_ = new ClassLinker(intern_table_); if (GetHeap()->HasBootImageSpace()) { - ATRACE_BEGIN("InitFromImage"); std::string error_msg; bool result = class_linker_->InitFromBootImage(&error_msg); - ATRACE_END(); if (!result) { LOG(ERROR) << "Could not initialize from image: " << error_msg; return false; @@ -1252,8 +1239,6 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { VLOG(startup) << "Runtime::Init exiting"; - ATRACE_END(); - return true; } @@ -1281,9 +1266,7 @@ void Runtime::InitNativeMethods() { // libcore can't because it's the library that implements System.loadLibrary! { std::string error_msg; - if (!java_vm_->LoadNativeLibrary(env, "libjavacore.so", nullptr, - /* is_shared_namespace */ false, - nullptr, nullptr, &error_msg)) { + if (!java_vm_->LoadNativeLibrary(env, "libjavacore.so", nullptr, nullptr, &error_msg)) { LOG(FATAL) << "LoadNativeLibrary failed for \"libjavacore.so\": " << error_msg; } } @@ -1292,9 +1275,7 @@ void Runtime::InitNativeMethods() { ? "libopenjdkd.so" : "libopenjdk.so"; std::string error_msg; - if (!java_vm_->LoadNativeLibrary(env, kOpenJdkLibrary, nullptr, - /* is_shared_namespace */ false, - nullptr, nullptr, &error_msg)) { + if (!java_vm_->LoadNativeLibrary(env, kOpenJdkLibrary, nullptr, nullptr, &error_msg)) { LOG(FATAL) << "LoadNativeLibrary failed for \"" << kOpenJdkLibrary << "\": " << error_msg; } } @@ -1473,10 +1454,12 @@ void Runtime::BlockSignals() { bool Runtime::AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group, bool create_peer) { + ScopedTrace trace(__FUNCTION__); return Thread::Attach(thread_name, as_daemon, thread_group, create_peer) != nullptr; } void Runtime::DetachCurrentThread() { + ScopedTrace trace(__FUNCTION__); Thread* self = Thread::Current(); if (self == nullptr) { LOG(FATAL) << "attempting to detach thread that is not attached"; diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc index 8237b06a56..bc963c5b8c 100644 --- a/runtime/runtime_linux.cc +++ b/runtime/runtime_linux.cc @@ -36,6 +36,7 @@ namespace art { static constexpr bool kDumpHeapObjectOnSigsevg = false; static constexpr bool kUseSigRTTimeout = true; +static constexpr bool kDumpNativeStackOnTimeout = true; struct Backtrace { public: @@ -350,7 +351,9 @@ void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_contex if (runtime != nullptr) { if (IsTimeoutSignal(signal_number)) { // Special timeout signal. Try to dump all threads. - runtime->GetThreadList()->DumpForSigQuit(LOG(INTERNAL_FATAL)); + // Note: Do not use DumpForSigQuit, as that might disable native unwind, but the native parts + // are of value here. + runtime->GetThreadList()->Dump(LOG(INTERNAL_FATAL), kDumpNativeStackOnTimeout); } gc::Heap* heap = runtime->GetHeap(); LOG(INTERNAL_FATAL) << "Fault message: " << runtime->GetFaultMessage(); diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def index 838d1a9649..3fd9905a60 100644 --- a/runtime/runtime_options.def +++ b/runtime/runtime_options.def @@ -69,7 +69,8 @@ RUNTIME_OPTIONS_KEY (bool, EnableHSpaceCompactForOOM, true) RUNTIME_OPTIONS_KEY (bool, UseJIT, false) RUNTIME_OPTIONS_KEY (bool, DumpNativeStackOnSigQuit, true) RUNTIME_OPTIONS_KEY (unsigned int, JITCompileThreshold, jit::Jit::kDefaultCompileThreshold) -RUNTIME_OPTIONS_KEY (unsigned int, JITWarmupThreshold, jit::Jit::kDefaultWarmupThreshold) +RUNTIME_OPTIONS_KEY (unsigned int, JITWarmupThreshold) +RUNTIME_OPTIONS_KEY (unsigned int, JITOsrThreshold) RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheInitialCapacity, jit::JitCodeCache::kInitialCapacity) RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheMaxCapacity, jit::JitCodeCache::kMaxCapacity) RUNTIME_OPTIONS_KEY (bool, JITSaveProfilingInfo, false) diff --git a/runtime/simulator/Android.mk b/runtime/simulator/Android.mk index c154eb6346..5c71da6255 100644 --- a/runtime/simulator/Android.mk +++ b/runtime/simulator/Android.mk @@ -86,7 +86,7 @@ define build-libart-simulator LOCAL_NATIVE_COVERAGE := $(ART_COVERAGE) # For simulator_arm64. ifeq ($$(art_ndebug_or_debug),debug) - LOCAL_SHARED_LIBRARIES += libvixld + LOCAL_SHARED_LIBRARIES += libvixl else LOCAL_SHARED_LIBRARIES += libvixl endif diff --git a/runtime/stack.cc b/runtime/stack.cc index b1f1ed61b4..ee5da8e150 100644 --- a/runtime/stack.cc +++ b/runtime/stack.cc @@ -739,7 +739,7 @@ void StackVisitor::SanityCheckFrame() const { // Check class linker linear allocs. mirror::Class* klass = method->GetDeclaringClass(); LinearAlloc* const class_linear_alloc = (klass != nullptr) - ? ClassLinker::GetAllocatorForClassLoader(klass->GetClassLoader()) + ? runtime->GetClassLinker()->GetAllocatorForClassLoader(klass->GetClassLoader()) : linear_alloc; if (!class_linear_alloc->Contains(method)) { // Check image space. diff --git a/runtime/thread.cc b/runtime/thread.cc index 2ee160571e..6b8c0c2e4b 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -14,11 +14,8 @@ * limitations under the License. */ -#define ATRACE_TAG ATRACE_TAG_DALVIK - #include "thread.h" -#include <cutils/trace.h> #include <pthread.h> #include <signal.h> #include <sys/resource.h> @@ -39,6 +36,7 @@ #include "base/mutex.h" #include "base/timing_logger.h" #include "base/to_str.h" +#include "base/systrace.h" #include "class_linker-inl.h" #include "debugger.h" #include "dex_file-inl.h" @@ -1119,9 +1117,8 @@ void Thread::RunCheckpointFunction() { bool found_checkpoint = false; for (uint32_t i = 0; i < kMaxCheckpoints; ++i) { if (checkpoints[i] != nullptr) { - ATRACE_BEGIN("Checkpoint function"); + ScopedTrace trace("Run checkpoint function"); checkpoints[i]->Run(this); - ATRACE_END(); found_checkpoint = true; } } @@ -1187,14 +1184,13 @@ void Thread::SetFlipFunction(Closure* function) { } void Thread::FullSuspendCheck() { + ScopedTrace trace(__FUNCTION__); VLOG(threads) << this << " self-suspending"; - ATRACE_BEGIN("Full suspend check"); // Make thread appear suspended to other threads, release mutator_lock_. tls32_.suspended_at_suspend_check = true; // Transition to suspended and back to runnable, re-acquire share on mutator_lock_. ScopedThreadSuspension(this, kSuspended); tls32_.suspended_at_suspend_check = false; - ATRACE_END(); VLOG(threads) << this << " self-reviving"; } diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc index 49d54fda00..afb11d33e9 100644 --- a/runtime/thread_list.cc +++ b/runtime/thread_list.cc @@ -16,10 +16,7 @@ #include "thread_list.h" -#define ATRACE_TAG ATRACE_TAG_DALVIK - #include <backtrace/BacktraceMap.h> -#include <cutils/trace.h> #include <dirent.h> #include <ScopedLocalRef.h> #include <ScopedUtfChars.h> @@ -30,6 +27,7 @@ #include "base/histogram-inl.h" #include "base/mutex-inl.h" +#include "base/systrace.h" #include "base/time_utils.h" #include "base/timing_logger.h" #include "debugger.h" @@ -59,6 +57,10 @@ static constexpr useconds_t kThreadSuspendInitialSleepUs = 0; static constexpr useconds_t kThreadSuspendMaxYieldUs = 3000; static constexpr useconds_t kThreadSuspendMaxSleepUs = 5000; +// Whether we should try to dump the native stack of unattached threads. See commit ed8b723 for +// some history. +static constexpr bool kDumpUnattachedThreadNativeStack = true; + ThreadList::ThreadList() : suspend_all_count_(0), debug_suspend_all_count_(0), @@ -69,38 +71,29 @@ ThreadList::ThreadList() } ThreadList::~ThreadList() { - ATRACE_BEGIN(__FUNCTION__); + ScopedTrace trace(__PRETTY_FUNCTION__); // Detach the current thread if necessary. If we failed to start, there might not be any threads. // We need to detach the current thread here in case there's another thread waiting to join with // us. bool contains = false; + Thread* self = Thread::Current(); { - Thread* self = Thread::Current(); MutexLock mu(self, *Locks::thread_list_lock_); contains = Contains(self); } if (contains) { - ATRACE_BEGIN("DetachCurrentThread"); Runtime::Current()->DetachCurrentThread(); - ATRACE_END(); } - ATRACE_BEGIN("WaitForOtherNonDaemonThreadsToExit"); WaitForOtherNonDaemonThreadsToExit(); - ATRACE_END(); // Disable GC and wait for GC to complete in case there are still daemon threads doing // allocations. gc::Heap* const heap = Runtime::Current()->GetHeap(); heap->DisableGCForShutdown(); // In case a GC is in progress, wait for it to finish. - ATRACE_BEGIN("WaitForGcToComplete"); heap->WaitForGcToComplete(gc::kGcCauseBackground, Thread::Current()); - ATRACE_END(); // TODO: there's an unaddressed race here where a thread may attach during shutdown, see // Thread::Init. - ATRACE_BEGIN("SuspendAllDaemonThreadsForShutdown"); SuspendAllDaemonThreadsForShutdown(); - ATRACE_END(); - ATRACE_END(); } bool ThreadList::Contains(Thread* thread) { @@ -149,9 +142,7 @@ static void DumpUnattachedThread(std::ostream& os, pid_t tid) NO_THREAD_SAFETY_A // refactor DumpState to avoid skipping analysis. Thread::DumpState(os, nullptr, tid); DumpKernelStack(os, tid, " kernel: ", false); - // TODO: Reenable this when the native code in system_server can handle it. - // Currently "adb shell kill -3 `pid system_server`" will cause it to exit. - if (false) { + if (kDumpUnattachedThreadNativeStack) { DumpNativeStack(os, tid, nullptr, " native: "); } os << "\n"; @@ -182,9 +173,9 @@ void ThreadList::DumpUnattachedThreads(std::ostream& os) { closedir(d); } -// Dump checkpoint timeout in milliseconds. Larger amount on the host, as dumping will invoke -// addr2line when available. -static constexpr uint32_t kDumpWaitTimeout = kIsTargetBuild ? 10000 : 20000; +// Dump checkpoint timeout in milliseconds. Larger amount on the target, since the device could be +// overloaded with ANR dumps. +static constexpr uint32_t kDumpWaitTimeout = kIsTargetBuild ? 100000 : 20000; // A closure used by Thread::Dump. class DumpCheckpoint FINAL : public Closure { @@ -475,42 +466,42 @@ void ThreadList::SuspendAll(const char* cause, bool long_suspend) { } else { VLOG(threads) << "Thread[null] SuspendAll for " << cause << " starting..."; } - ATRACE_BEGIN("Suspending mutator threads"); - const uint64_t start_time = NanoTime(); + { + ScopedTrace trace("Suspending mutator threads"); + const uint64_t start_time = NanoTime(); - SuspendAllInternal(self, self); - // All threads are known to have suspended (but a thread may still own the mutator lock) - // Make sure this thread grabs exclusive access to the mutator lock and its protected data. + SuspendAllInternal(self, self); + // All threads are known to have suspended (but a thread may still own the mutator lock) + // Make sure this thread grabs exclusive access to the mutator lock and its protected data. #if HAVE_TIMED_RWLOCK - while (true) { - if (Locks::mutator_lock_->ExclusiveLockWithTimeout(self, kThreadSuspendTimeoutMs, 0)) { - break; - } else if (!long_suspend_) { - // Reading long_suspend without the mutator lock is slightly racy, in some rare cases, this - // could result in a thread suspend timeout. - // Timeout if we wait more than kThreadSuspendTimeoutMs seconds. - UnsafeLogFatalForThreadSuspendAllTimeout(); + while (true) { + if (Locks::mutator_lock_->ExclusiveLockWithTimeout(self, kThreadSuspendTimeoutMs, 0)) { + break; + } else if (!long_suspend_) { + // Reading long_suspend without the mutator lock is slightly racy, in some rare cases, this + // could result in a thread suspend timeout. + // Timeout if we wait more than kThreadSuspendTimeoutMs seconds. + UnsafeLogFatalForThreadSuspendAllTimeout(); + } } - } #else - Locks::mutator_lock_->ExclusiveLock(self); + Locks::mutator_lock_->ExclusiveLock(self); #endif - long_suspend_ = long_suspend; + long_suspend_ = long_suspend; - const uint64_t end_time = NanoTime(); - const uint64_t suspend_time = end_time - start_time; - suspend_all_historam_.AdjustAndAddValue(suspend_time); - if (suspend_time > kLongThreadSuspendThreshold) { - LOG(WARNING) << "Suspending all threads took: " << PrettyDuration(suspend_time); - } + const uint64_t end_time = NanoTime(); + const uint64_t suspend_time = end_time - start_time; + suspend_all_historam_.AdjustAndAddValue(suspend_time); + if (suspend_time > kLongThreadSuspendThreshold) { + LOG(WARNING) << "Suspending all threads took: " << PrettyDuration(suspend_time); + } - if (kDebugLocking) { - // Debug check that all threads are suspended. - AssertThreadsAreSuspended(self, self); + if (kDebugLocking) { + // Debug check that all threads are suspended. + AssertThreadsAreSuspended(self, self); + } } - - ATRACE_END(); ATRACE_BEGIN((std::string("Mutator threads suspended for ") + cause).c_str()); if (self != nullptr) { @@ -640,7 +631,8 @@ void ThreadList::ResumeAll() { } ATRACE_END(); - ATRACE_BEGIN("Resuming mutator threads"); + + ScopedTrace trace("Resuming mutator threads"); if (kDebugLocking) { // Debug check that all threads are suspended. @@ -672,7 +664,6 @@ void ThreadList::ResumeAll() { } Thread::resume_cond_->Broadcast(self); } - ATRACE_END(); if (self != nullptr) { VLOG(threads) << *self << " ResumeAll complete"; @@ -1117,6 +1108,7 @@ void ThreadList::UndoDebuggerSuspensions() { } void ThreadList::WaitForOtherNonDaemonThreadsToExit() { + ScopedTrace trace(__PRETTY_FUNCTION__); Thread* self = Thread::Current(); Locks::mutator_lock_->AssertNotHeld(self); while (true) { @@ -1148,6 +1140,7 @@ void ThreadList::WaitForOtherNonDaemonThreadsToExit() { } void ThreadList::SuspendAllDaemonThreadsForShutdown() { + ScopedTrace trace(__PRETTY_FUNCTION__); Thread* self = Thread::Current(); MutexLock mu(self, *Locks::thread_list_lock_); size_t daemons_left = 0; diff --git a/runtime/trace.cc b/runtime/trace.cc index 99b2296b60..b8793556d8 100644 --- a/runtime/trace.cc +++ b/runtime/trace.cc @@ -19,12 +19,10 @@ #include <sys/uio.h> #include <unistd.h> -#define ATRACE_TAG ATRACE_TAG_DALVIK -#include "cutils/trace.h" - #include "art_method-inl.h" #include "base/casts.h" #include "base/stl_util.h" +#include "base/systrace.h" #include "base/time_utils.h" #include "base/unix_file/fd_file.h" #include "class_linker.h" @@ -286,7 +284,7 @@ void* Trace::RunSamplingThread(void* arg) { while (true) { usleep(interval_us); - ATRACE_BEGIN("Profile sampling"); + ScopedTrace trace("Profile sampling"); Thread* self = Thread::Current(); Trace* the_trace; { @@ -301,7 +299,6 @@ void* Trace::RunSamplingThread(void* arg) { MutexLock mu(self, *Locks::thread_list_lock_); runtime->GetThreadList()->ForEach(GetSample, the_trace); } - ATRACE_END(); } runtime->DetachCurrentThread(); @@ -389,9 +386,10 @@ void Trace::StopTracing(bool finish_tracing, bool flush_file) { bool stop_alloc_counting = false; Runtime* const runtime = Runtime::Current(); Trace* the_trace = nullptr; + Thread* const self = Thread::Current(); pthread_t sampling_pthread = 0U; { - MutexLock mu(Thread::Current(), *Locks::trace_lock_); + MutexLock mu(self, *Locks::trace_lock_); if (the_trace_ == nullptr) { LOG(ERROR) << "Trace stop requested, but no trace currently running"; } else { @@ -409,6 +407,9 @@ void Trace::StopTracing(bool finish_tracing, bool flush_file) { } { + gc::ScopedGCCriticalSection gcs(self, + gc::kGcCauseInstrumentation, + gc::kCollectorTypeInstrumentation); ScopedSuspendAll ssa(__FUNCTION__); if (the_trace != nullptr) { stop_alloc_counting = (the_trace->flags_ & Trace::kTraceCountAllocs) != 0; @@ -417,7 +418,7 @@ void Trace::StopTracing(bool finish_tracing, bool flush_file) { } if (the_trace->trace_mode_ == TraceMode::kSampling) { - MutexLock mu(Thread::Current(), *Locks::thread_list_lock_); + MutexLock mu(self, *Locks::thread_list_lock_); runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, nullptr); } else { runtime->GetInstrumentation()->DisableMethodTracing(kTracerInstrumentationKey); diff --git a/runtime/utf_test.cc b/runtime/utf_test.cc index c67879b427..328492523f 100644 --- a/runtime/utf_test.cc +++ b/runtime/utf_test.cc @@ -312,8 +312,8 @@ static void codePointToSurrogatePair(uint32_t code_point, uint16_t &first, uint1 } static void testConversions(uint16_t *buf, int char_count) { - char bytes_test[8], bytes_reference[8]; - uint16_t out_buf_test[4], out_buf_reference[4]; + char bytes_test[8] = { 0 }, bytes_reference[8] = { 0 }; + uint16_t out_buf_test[4] = { 0 }, out_buf_reference[4] = { 0 }; int byte_count_test, byte_count_reference; int char_count_test, char_count_reference; @@ -349,7 +349,7 @@ static void testConversions(uint16_t *buf, int char_count) { TEST_F(UtfTest, ExhaustiveBidirectionalCodePointCheck) { for (int codePoint = 0; codePoint <= 0x10ffff; ++codePoint) { - uint16_t buf[4]; + uint16_t buf[4] = { 0 }; if (codePoint <= 0xffff) { if (codePoint >= 0xd800 && codePoint <= 0xdfff) { // According to the Unicode standard, no character will ever diff --git a/runtime/utils.cc b/runtime/utils.cc index 13564a6a0f..472a85c042 100644 --- a/runtime/utils.cc +++ b/runtime/utils.cc @@ -1120,7 +1120,8 @@ void DumpNativeStack(std::ostream& os, pid_t tid, BacktraceMap* existing_map, co } std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, tid, map)); if (!backtrace->Unwind(0, reinterpret_cast<ucontext*>(ucontext_ptr))) { - os << prefix << "(backtrace::Unwind failed for thread " << tid << ")\n"; + os << prefix << "(backtrace::Unwind failed for thread " << tid + << ": " << backtrace->GetErrorString(backtrace->GetError()) << ")\n"; return; } else if (backtrace->NumFrames() == 0) { os << prefix << "(no native stack frames for thread " << tid << ")\n"; diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc index 0c6060e4e8..4019656c93 100644 --- a/runtime/verifier/method_verifier.cc +++ b/runtime/verifier/method_verifier.cc @@ -23,6 +23,7 @@ #include "base/logging.h" #include "base/mutex-inl.h" #include "base/stl_util.h" +#include "base/systrace.h" #include "base/time_utils.h" #include "class_linker.h" #include "compiler_callbacks.h" @@ -269,6 +270,7 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(Thread* self, bool log_hard_failures, std::string* error) { DCHECK(class_def != nullptr); + ScopedTrace trace(__FUNCTION__); // A class must not be abstract and final. if ((class_def->access_flags_ & (kAccAbstract | kAccFinal)) == (kAccAbstract | kAccFinal)) { diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc index 30f613c389..b171b75e97 100644 --- a/runtime/verifier/reg_type_cache.cc +++ b/runtime/verifier/reg_type_cache.cc @@ -17,6 +17,7 @@ #include "reg_type_cache-inl.h" #include "base/arena_bit_vector.h" +#include "base/bit_vector-inl.h" #include "base/casts.h" #include "base/scoped_arena_allocator.h" #include "base/stl_util.h" @@ -351,9 +352,11 @@ const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, const RegT types.Copy(&left_merge->GetUnresolvedTypes()); left_resolved = &left_merge->GetResolvedPart(); } else if (left.IsUnresolvedTypes()) { + types.ClearAllBits(); types.SetBit(left.GetId()); left_resolved = &Zero(); } else { + types.ClearAllBits(); left_resolved = &left; } diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc index 22ac7e4ab2..42a74f88e1 100644 --- a/runtime/verifier/reg_type_test.cc +++ b/runtime/verifier/reg_type_test.cc @@ -30,23 +30,14 @@ namespace art { namespace verifier { -class BaseRegTypeTest : public CommonRuntimeTest { - public: - void PostRuntimeCreate() OVERRIDE { - stack.reset(new ArenaStack(Runtime::Current()->GetArenaPool())); - allocator.reset(new ScopedArenaAllocator(stack.get())); - } - - std::unique_ptr<ArenaStack> stack; - std::unique_ptr<ScopedArenaAllocator> allocator; -}; - -class RegTypeTest : public BaseRegTypeTest {}; +class RegTypeTest : public CommonRuntimeTest {}; TEST_F(RegTypeTest, ConstLoHi) { // Tests creating primitive types types. + ArenaStack stack(Runtime::Current()->GetArenaPool()); + ScopedArenaAllocator allocator(&stack); ScopedObjectAccess soa(Thread::Current()); - RegTypeCache cache(true, *allocator); + RegTypeCache cache(true, allocator); const RegType& ref_type_const_0 = cache.FromCat1Const(10, true); const RegType& ref_type_const_1 = cache.FromCat1Const(10, true); const RegType& ref_type_const_2 = cache.FromCat1Const(30, true); @@ -67,8 +58,10 @@ TEST_F(RegTypeTest, ConstLoHi) { } TEST_F(RegTypeTest, Pairs) { + ArenaStack stack(Runtime::Current()->GetArenaPool()); + ScopedArenaAllocator allocator(&stack); ScopedObjectAccess soa(Thread::Current()); - RegTypeCache cache(true, *allocator); + RegTypeCache cache(true, allocator); int64_t val = static_cast<int32_t>(1234); const RegType& precise_lo = cache.FromCat2ConstLo(static_cast<int32_t>(val), true); const RegType& precise_hi = cache.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true); @@ -91,8 +84,10 @@ TEST_F(RegTypeTest, Pairs) { } TEST_F(RegTypeTest, Primitives) { + ArenaStack stack(Runtime::Current()->GetArenaPool()); + ScopedArenaAllocator allocator(&stack); ScopedObjectAccess soa(Thread::Current()); - RegTypeCache cache(true, *allocator); + RegTypeCache cache(true, allocator); const RegType& bool_reg_type = cache.Boolean(); EXPECT_FALSE(bool_reg_type.IsUndefined()); @@ -359,13 +354,15 @@ TEST_F(RegTypeTest, Primitives) { EXPECT_TRUE(double_reg_type.HasClass()); } -class RegTypeReferenceTest : public BaseRegTypeTest {}; +class RegTypeReferenceTest : public CommonRuntimeTest {}; TEST_F(RegTypeReferenceTest, JavalangObjectImprecise) { // Tests matching precisions. A reference type that was created precise doesn't // match the one that is imprecise. + ArenaStack stack(Runtime::Current()->GetArenaPool()); + ScopedArenaAllocator allocator(&stack); ScopedObjectAccess soa(Thread::Current()); - RegTypeCache cache(true, *allocator); + RegTypeCache cache(true, allocator); const RegType& imprecise_obj = cache.JavaLangObject(false); const RegType& precise_obj = cache.JavaLangObject(true); const RegType& precise_obj_2 = cache.FromDescriptor(nullptr, "Ljava/lang/Object;", true); @@ -379,8 +376,10 @@ TEST_F(RegTypeReferenceTest, JavalangObjectImprecise) { TEST_F(RegTypeReferenceTest, UnresolvedType) { // Tests creating unresolved types. Miss for the first time asking the cache and // a hit second time. + ArenaStack stack(Runtime::Current()->GetArenaPool()); + ScopedArenaAllocator allocator(&stack); ScopedObjectAccess soa(Thread::Current()); - RegTypeCache cache(true, *allocator); + RegTypeCache cache(true, allocator); const RegType& ref_type_0 = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true); EXPECT_TRUE(ref_type_0.IsUnresolvedReference()); EXPECT_TRUE(ref_type_0.IsNonZeroReferenceTypes()); @@ -395,8 +394,10 @@ TEST_F(RegTypeReferenceTest, UnresolvedType) { TEST_F(RegTypeReferenceTest, UnresolvedUnintializedType) { // Tests creating types uninitialized types from unresolved types. + ArenaStack stack(Runtime::Current()->GetArenaPool()); + ScopedArenaAllocator allocator(&stack); ScopedObjectAccess soa(Thread::Current()); - RegTypeCache cache(true, *allocator); + RegTypeCache cache(true, allocator); const RegType& ref_type_0 = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true); EXPECT_TRUE(ref_type_0.IsUnresolvedReference()); const RegType& ref_type = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true); @@ -417,8 +418,10 @@ TEST_F(RegTypeReferenceTest, UnresolvedUnintializedType) { TEST_F(RegTypeReferenceTest, Dump) { // Tests types for proper Dump messages. + ArenaStack stack(Runtime::Current()->GetArenaPool()); + ScopedArenaAllocator allocator(&stack); ScopedObjectAccess soa(Thread::Current()); - RegTypeCache cache(true, *allocator); + RegTypeCache cache(true, allocator); const RegType& unresolved_ref = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true); const RegType& unresolved_ref_another = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExistEither;", true); const RegType& resolved_ref = cache.JavaLangString(); @@ -442,8 +445,10 @@ TEST_F(RegTypeReferenceTest, JavalangString) { // Add a class to the cache then look for the same class and make sure it is a // Hit the second time. Then check for the same effect when using // The JavaLangObject method instead of FromDescriptor. String class is final. + ArenaStack stack(Runtime::Current()->GetArenaPool()); + ScopedArenaAllocator allocator(&stack); ScopedObjectAccess soa(Thread::Current()); - RegTypeCache cache(true, *allocator); + RegTypeCache cache(true, allocator); const RegType& ref_type = cache.JavaLangString(); const RegType& ref_type_2 = cache.JavaLangString(); const RegType& ref_type_3 = cache.FromDescriptor(nullptr, "Ljava/lang/String;", true); @@ -462,8 +467,10 @@ TEST_F(RegTypeReferenceTest, JavalangObject) { // Add a class to the cache then look for the same class and make sure it is a // Hit the second time. Then I am checking for the same effect when using // The JavaLangObject method instead of FromDescriptor. Object Class in not final. + ArenaStack stack(Runtime::Current()->GetArenaPool()); + ScopedArenaAllocator allocator(&stack); ScopedObjectAccess soa(Thread::Current()); - RegTypeCache cache(true, *allocator); + RegTypeCache cache(true, allocator); const RegType& ref_type = cache.JavaLangObject(true); const RegType& ref_type_2 = cache.JavaLangObject(true); const RegType& ref_type_3 = cache.FromDescriptor(nullptr, "Ljava/lang/Object;", true); @@ -476,7 +483,9 @@ TEST_F(RegTypeReferenceTest, Merging) { // Tests merging logic // String and object , LUB is object. ScopedObjectAccess soa(Thread::Current()); - RegTypeCache cache_new(true, *allocator); + ArenaStack stack(Runtime::Current()->GetArenaPool()); + ScopedArenaAllocator allocator(&stack); + RegTypeCache cache_new(true, allocator); const RegType& string = cache_new.JavaLangString(); const RegType& Object = cache_new.JavaLangObject(true); EXPECT_TRUE(string.Merge(Object, &cache_new).IsJavaLangObject()); @@ -498,8 +507,10 @@ TEST_F(RegTypeReferenceTest, Merging) { TEST_F(RegTypeTest, MergingFloat) { // Testing merging logic with float and float constants. + ArenaStack stack(Runtime::Current()->GetArenaPool()); + ScopedArenaAllocator allocator(&stack); ScopedObjectAccess soa(Thread::Current()); - RegTypeCache cache_new(true, *allocator); + RegTypeCache cache_new(true, allocator); constexpr int32_t kTestConstantValue = 10; const RegType& float_type = cache_new.Float(); @@ -529,8 +540,10 @@ TEST_F(RegTypeTest, MergingFloat) { TEST_F(RegTypeTest, MergingLong) { // Testing merging logic with long and long constants. + ArenaStack stack(Runtime::Current()->GetArenaPool()); + ScopedArenaAllocator allocator(&stack); ScopedObjectAccess soa(Thread::Current()); - RegTypeCache cache_new(true, *allocator); + RegTypeCache cache_new(true, allocator); constexpr int32_t kTestConstantValue = 10; const RegType& long_lo_type = cache_new.LongLo(); @@ -583,8 +596,10 @@ TEST_F(RegTypeTest, MergingLong) { TEST_F(RegTypeTest, MergingDouble) { // Testing merging logic with double and double constants. + ArenaStack stack(Runtime::Current()->GetArenaPool()); + ScopedArenaAllocator allocator(&stack); ScopedObjectAccess soa(Thread::Current()); - RegTypeCache cache_new(true, *allocator); + RegTypeCache cache_new(true, allocator); constexpr int32_t kTestConstantValue = 10; const RegType& double_lo_type = cache_new.DoubleLo(); @@ -637,8 +652,10 @@ TEST_F(RegTypeTest, MergingDouble) { TEST_F(RegTypeTest, ConstPrecision) { // Tests creating primitive types types. + ArenaStack stack(Runtime::Current()->GetArenaPool()); + ScopedArenaAllocator allocator(&stack); ScopedObjectAccess soa(Thread::Current()); - RegTypeCache cache_new(true, *allocator); + RegTypeCache cache_new(true, allocator); const RegType& imprecise_const = cache_new.FromCat1Const(10, false); const RegType& precise_const = cache_new.FromCat1Const(10, true); diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc index cfa8329a36..d288943528 100644 --- a/runtime/well_known_classes.cc +++ b/runtime/well_known_classes.cc @@ -385,7 +385,7 @@ void WellKnownClasses::LateInit(JNIEnv* env) { ScopedLocalRef<jclass> java_lang_Runtime(env, env->FindClass("java/lang/Runtime")); java_lang_Runtime_nativeLoad = CacheMethod(env, java_lang_Runtime.get(), true, "nativeLoad", - "(Ljava/lang/String;Ljava/lang/ClassLoader;ZLjava/lang/String;Ljava/lang/String;)" + "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/String;)" "Ljava/lang/String;"); } diff --git a/test/004-UnsafeTest/src/Main.java b/test/004-UnsafeTest/src/Main.java index a9a7a058e0..b2f905e0ee 100644 --- a/test/004-UnsafeTest/src/Main.java +++ b/test/004-UnsafeTest/src/Main.java @@ -40,7 +40,7 @@ public class Main { } private static Unsafe getUnsafe() throws Exception { - Class<?> unsafeClass = Class.forName("sun.misc.Unsafe"); + Class<?> unsafeClass = Unsafe.class; Field f = unsafeClass.getDeclaredField("theUnsafe"); f.setAccessible(true); return (Unsafe) f.get(null); diff --git a/test/004-checker-UnsafeTest18/expected.txt b/test/004-checker-UnsafeTest18/expected.txt new file mode 100644 index 0000000000..651da727af --- /dev/null +++ b/test/004-checker-UnsafeTest18/expected.txt @@ -0,0 +1,2 @@ +starting +passed diff --git a/test/004-checker-UnsafeTest18/info.txt b/test/004-checker-UnsafeTest18/info.txt new file mode 100644 index 0000000000..0fca5ebf03 --- /dev/null +++ b/test/004-checker-UnsafeTest18/info.txt @@ -0,0 +1 @@ +Test support for 1.8 sun.misc.Unsafe. diff --git a/test/004-checker-UnsafeTest18/src/Main.java b/test/004-checker-UnsafeTest18/src/Main.java new file mode 100644 index 0000000000..bb020b9b9f --- /dev/null +++ b/test/004-checker-UnsafeTest18/src/Main.java @@ -0,0 +1,270 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.lang.reflect.Field; + +import sun.misc.Unsafe; + +/** + * Checker test on the 1.8 unsafe operations. Note, this is by no means an + * exhaustive unit test for these CAS (compare-and-swap) and fence operations. + * Instead, this test ensures the methods are recognized as intrinsic and behave + * as expected. + */ +public class Main { + + private static final Unsafe unsafe = getUnsafe(); + + private static Thread[] sThreads = new Thread[10]; + + // + // Fields accessed by setters and adders. + // + + public int i = 0; + public long l = 0; + public Object o = null; + + // + // Setters. + // + + /// CHECK-START: int Main.set32(java.lang.Object, long, int) intrinsics_recognition (after) + /// CHECK-DAG: <<Result:i\d+>> InvokeVirtual intrinsic:UnsafeGetAndSetInt + /// CHECK-DAG: Return [<<Result>>] + private static int set32(Object o, long offset, int newValue) { + return unsafe.getAndSetInt(o, offset, newValue); + } + + /// CHECK-START: long Main.set64(java.lang.Object, long, long) intrinsics_recognition (after) + /// CHECK-DAG: <<Result:j\d+>> InvokeVirtual intrinsic:UnsafeGetAndSetLong + /// CHECK-DAG: Return [<<Result>>] + private static long set64(Object o, long offset, long newValue) { + return unsafe.getAndSetLong(o, offset, newValue); + } + + /// CHECK-START: java.lang.Object Main.setObj(java.lang.Object, long, java.lang.Object) intrinsics_recognition (after) + /// CHECK-DAG: <<Result:l\d+>> InvokeVirtual intrinsic:UnsafeGetAndSetObject + /// CHECK-DAG: Return [<<Result>>] + private static Object setObj(Object o, long offset, Object newValue) { + return unsafe.getAndSetObject(o, offset, newValue); + } + + // + // Adders. + // + + /// CHECK-START: int Main.add32(java.lang.Object, long, int) intrinsics_recognition (after) + /// CHECK-DAG: <<Result:i\d+>> InvokeVirtual intrinsic:UnsafeGetAndAddInt + /// CHECK-DAG: Return [<<Result>>] + private static int add32(Object o, long offset, int delta) { + return unsafe.getAndAddInt(o, offset, delta); + } + + /// CHECK-START: long Main.add64(java.lang.Object, long, long) intrinsics_recognition (after) + /// CHECK-DAG: <<Result:j\d+>> InvokeVirtual intrinsic:UnsafeGetAndAddLong + /// CHECK-DAG: Return [<<Result>>] + private static long add64(Object o, long offset, long delta) { + return unsafe.getAndAddLong(o, offset, delta); + } + + // + // Fences (native). + // + + /// CHECK-START: void Main.load() intrinsics_recognition (after) + /// CHECK-DAG: InvokeVirtual intrinsic:UnsafeLoadFence + // + /// CHECK-START: void Main.load() instruction_simplifier (after) + /// CHECK-NOT: InvokeVirtual intrinsic:UnsafeLoadFence + // + /// CHECK-START: void Main.load() instruction_simplifier (after) + /// CHECK-DAG: MemoryBarrier kind:LoadAny + private static void load() { + unsafe.loadFence(); + } + + /// CHECK-START: void Main.store() intrinsics_recognition (after) + /// CHECK-DAG: InvokeVirtual intrinsic:UnsafeStoreFence + // + /// CHECK-START: void Main.store() instruction_simplifier (after) + /// CHECK-NOT: InvokeVirtual intrinsic:UnsafeStoreFence + // + /// CHECK-START: void Main.store() instruction_simplifier (after) + /// CHECK-DAG: MemoryBarrier kind:AnyStore + private static void store() { + unsafe.storeFence(); + } + + /// CHECK-START: void Main.full() intrinsics_recognition (after) + /// CHECK-DAG: InvokeVirtual intrinsic:UnsafeFullFence + // + /// CHECK-START: void Main.full() instruction_simplifier (after) + /// CHECK-NOT: InvokeVirtual intrinsic:UnsafeFullFence + // + /// CHECK-START: void Main.full() instruction_simplifier (after) + /// CHECK-DAG: MemoryBarrier kind:AnyAny + private static void full() { + unsafe.fullFence(); + } + + // + // Thread fork/join. + // + + private static void fork(Runnable r) { + for (int i = 0; i < 10; i++) { + sThreads[i] = new Thread(r); + sThreads[i].start(); + } + } + + private static void join() { + try { + for (int i = 0; i < 10; i++) { + sThreads[i].join(); + } + } catch (InterruptedException e) { + throw new Error("Failed join: " + e); + } + } + + // + // Driver. + // + + public static void main(String[] args) { + System.out.println("starting"); + + final Main m = new Main(); + + // Get the offsets. + + final long intOffset, longOffset, objOffset; + try { + Field intField = Main.class.getDeclaredField("i"); + Field longField = Main.class.getDeclaredField("l"); + Field objField = Main.class.getDeclaredField("o"); + + intOffset = unsafe.objectFieldOffset(intField); + longOffset = unsafe.objectFieldOffset(longField); + objOffset = unsafe.objectFieldOffset(objField); + + } catch (NoSuchFieldException e) { + throw new Error("No offset: " + e); + } + + // Some sanity within same thread. + + set32(m, intOffset, 3); + expectEquals32(3, m.i); + + set64(m, longOffset, 7L); + expectEquals64(7L, m.l); + + setObj(m, objOffset, m); + expectEqualsObj(m, m.o); + + add32(m, intOffset, 11); + expectEquals32(14, m.i); + + add64(m, longOffset, 13L); + expectEquals64(20L, m.l); + + // Some sanity on setters within different threads. + + fork(new Runnable() { + public void run() { + for (int i = 0; i < 10; i++) + set32(m, intOffset, i); + } + }); + join(); + expectEquals32(9, m.i); // one thread's last value wins + + fork(new Runnable() { + public void run() { + for (int i = 0; i < 10; i++) + set64(m, longOffset, (long) (100 + i)); + } + }); + join(); + expectEquals64(109L, m.l); // one thread's last value wins + + fork(new Runnable() { + public void run() { + for (int i = 0; i < 10; i++) + setObj(m, objOffset, sThreads[i]); + } + }); + join(); + expectEqualsObj(sThreads[9], m.o); // one thread's last value wins + + // Some sanity on adders within different threads. + + fork(new Runnable() { + public void run() { + for (int i = 0; i < 10; i++) + add32(m, intOffset, i + 1); + } + }); + join(); + expectEquals32(559, m.i); // all values accounted for + + fork(new Runnable() { + public void run() { + for (int i = 0; i < 10; i++) + add64(m, longOffset, (long) (i + 1)); + } + }); + join(); + expectEquals64(659L, m.l); // all values accounted for + + // TODO: the fences + + System.out.println("passed"); + } + + // Use reflection to implement "Unsafe.getUnsafe()"; + private static Unsafe getUnsafe() { + try { + Class<?> unsafeClass = Unsafe.class; + Field f = unsafeClass.getDeclaredField("theUnsafe"); + f.setAccessible(true); + return (Unsafe) f.get(null); + } catch (Exception e) { + throw new Error("Cannot get Unsafe instance"); + } + } + + private static void expectEquals32(int expected, int result) { + if (expected != result) { + throw new Error("Expected: " + expected + ", found: " + result); + } + } + + private static void expectEquals64(long expected, long result) { + if (expected != result) { + throw new Error("Expected: " + expected + ", found: " + result); + } + } + + private static void expectEqualsObj(Object expected, Object result) { + if (expected != result) { + throw new Error("Expected: " + expected + ", found: " + result); + } + } +} diff --git a/test/011-array-copy/src/Main.java b/test/011-array-copy/src/Main.java index 96e1dbf21a..d9b61e7acf 100644 --- a/test/011-array-copy/src/Main.java +++ b/test/011-array-copy/src/Main.java @@ -69,6 +69,11 @@ public class Main { array[i] = (long) i; } } + static void initCharArray(char[] array) { + for (int i = 0; i < ARRAY_SIZE; i++) { + array[i] = (char) i; + } + } /* * Perform an array copy operation on primitive arrays with different @@ -79,16 +84,19 @@ public class Main { short[] shortArray = new short[ARRAY_SIZE]; int[] intArray = new int[ARRAY_SIZE]; long[] longArray = new long[ARRAY_SIZE]; + char[] charArray = new char[ARRAY_SIZE]; initByteArray(byteArray); initShortArray(shortArray); initIntArray(intArray); initLongArray(longArray); + initCharArray(charArray); System.arraycopy(byteArray, srcPos, byteArray, dstPos, length); System.arraycopy(shortArray, srcPos, shortArray, dstPos, length); System.arraycopy(intArray, srcPos, intArray, dstPos, length); System.arraycopy(longArray, srcPos, longArray, dstPos, length); + System.arraycopy(charArray, srcPos, charArray, dstPos, length); for (int i = 0; i < ARRAY_SIZE; i++) { if (intArray[i] != byteArray[i]) { @@ -103,6 +111,10 @@ public class Main { System.out.println("mismatch int vs long at " + i + " : " + Arrays.toString(longArray)); break; + } else if (intArray[i] != charArray[i]) { + System.out.println("mismatch int vs char at " + i + " : " + + Arrays.toString(charArray)); + break; } } diff --git a/test/048-reflect-v8/build b/test/048-reflect-v8/build index 4ea1838465..3552b5c46c 100644 --- a/test/048-reflect-v8/build +++ b/test/048-reflect-v8/build @@ -20,9 +20,5 @@ set -e # Hard-wired use of experimental jack. # TODO: fix this temporary work-around for lambdas, see b/19467889 export USE_JACK=true -export JACK_SERVER=false -export JACK_REPOSITORY="${ANDROID_BUILD_TOP}/prebuilts/sdk/tools/jacks" -# e.g. /foo/bar/jack-3.10.ALPHA.jar -> 3.10.ALPHA -export JACK_VERSION="$(find "$JACK_REPOSITORY" -name '*ALPHA*' | sed 's/.*jack-//g' | sed 's/[.]jar//g')" ./default-build "$@" --experimental default-methods diff --git a/test/082-inline-execute/src/Main.java b/test/082-inline-execute/src/Main.java index 93a9005fe0..9aaed9d589 100644 --- a/test/082-inline-execute/src/Main.java +++ b/test/082-inline-execute/src/Main.java @@ -40,6 +40,10 @@ public class Main { test_Math_rint(); test_Math_round_D(); test_Math_round_F(); + test_Math_isNaN_D(); + test_Math_isNaN_F(); + test_Math_isInfinite_D(); + test_Math_isInfinite_F(); test_Short_reverseBytes(); test_Integer_reverseBytes(); test_Long_reverseBytes(); @@ -836,6 +840,106 @@ public class Main { Assert.assertEquals(Math.round(Float.NEGATIVE_INFINITY), Integer.MIN_VALUE); } + public static void test_Math_isNaN_D() { + // Quiet NaN. + Assert.assertTrue(Double.isNaN(Double.longBitsToDouble(0x7FF4000000000000l))); + Assert.assertTrue(Double.isNaN(Double.longBitsToDouble(0xFFF4000000000000l))); + // Signaling NaN. + Assert.assertTrue(Double.isNaN(Double.longBitsToDouble(0x7FF8000000000000l))); + Assert.assertTrue(Double.isNaN(Double.longBitsToDouble(0xFFF8000000000000l))); + // Distinct from +/- infinity. + Assert.assertFalse(Double.isNaN(Double.longBitsToDouble(0x7FF0000000000000l))); + Assert.assertFalse(Double.isNaN(Double.longBitsToDouble(0xFFF0000000000000l))); + // Distinct from normal numbers. + Assert.assertFalse(Double.isNaN(Double.longBitsToDouble(0x7FE0000000000000l))); + Assert.assertFalse(Double.isNaN(Double.longBitsToDouble(0xFFE0000000000000l))); + Assert.assertFalse(Double.isNaN(Double.longBitsToDouble(0x0010000000000000l))); + Assert.assertFalse(Double.isNaN(Double.longBitsToDouble(0x8010000000000000l))); + // Distinct from +/- zero. + Assert.assertFalse(Double.isNaN(Double.longBitsToDouble(0x0000000000000000l))); + Assert.assertFalse(Double.isNaN(Double.longBitsToDouble(0x8000000000000000l))); + // Distinct from subnormal numbers. + Assert.assertFalse(Double.isNaN(Double.longBitsToDouble(0x0008000000000000l))); + Assert.assertFalse(Double.isNaN(Double.longBitsToDouble(0x8008000000000000l))); + Assert.assertFalse(Double.isNaN(Double.longBitsToDouble(0x0000000000000001l))); + Assert.assertFalse(Double.isNaN(Double.longBitsToDouble(0x8000000000000001l))); + } + + public static void test_Math_isNaN_F() { + // Quiet NaN. + Assert.assertTrue(Float.isNaN(Float.intBitsToFloat(0x7FA00000))); + Assert.assertTrue(Float.isNaN(Float.intBitsToFloat(0xFFA00000))); + // Signaling NaN. + Assert.assertTrue(Float.isNaN(Float.intBitsToFloat(0x7FC00000))); + Assert.assertTrue(Float.isNaN(Float.intBitsToFloat(0xFFC00000))); + // Distinct from +/- infinity. + Assert.assertFalse(Float.isNaN(Float.intBitsToFloat(0x7F800000))); + Assert.assertFalse(Float.isNaN(Float.intBitsToFloat(0xFF800000))); + // Distinct from normal numbers. + Assert.assertFalse(Float.isNaN(Float.intBitsToFloat(0x7F000000))); + Assert.assertFalse(Float.isNaN(Float.intBitsToFloat(0xFF000000))); + Assert.assertFalse(Float.isNaN(Float.intBitsToFloat(0x00800000))); + Assert.assertFalse(Float.isNaN(Float.intBitsToFloat(0x80800000))); + // Distinct from +/- zero. + Assert.assertFalse(Float.isNaN(Float.intBitsToFloat(0x00000000))); + Assert.assertFalse(Float.isNaN(Float.intBitsToFloat(0x80000000))); + // Distinct from subnormal numbers. + Assert.assertFalse(Float.isNaN(Float.intBitsToFloat(0x00400000))); + Assert.assertFalse(Float.isNaN(Float.intBitsToFloat(0x80400000))); + Assert.assertFalse(Float.isNaN(Float.intBitsToFloat(0x00000001))); + Assert.assertFalse(Float.isNaN(Float.intBitsToFloat(0x80000001))); + } + + public static void test_Math_isInfinite_D() { + // Distinct from Quiet NaN. + Assert.assertFalse(Double.isInfinite(Double.longBitsToDouble(0x7FF4000000000000l))); + Assert.assertFalse(Double.isInfinite(Double.longBitsToDouble(0xFFF4000000000000l))); + // Distinct from Signaling NaN. + Assert.assertFalse(Double.isInfinite(Double.longBitsToDouble(0x7FF8000000000000l))); + Assert.assertFalse(Double.isInfinite(Double.longBitsToDouble(0xFFF8000000000000l))); + // +/- infinity. + Assert.assertTrue(Double.isInfinite(Double.longBitsToDouble(0x7FF0000000000000l))); + Assert.assertTrue(Double.isInfinite(Double.longBitsToDouble(0xFFF0000000000000l))); + // Distinct from normal numbers. + Assert.assertFalse(Double.isInfinite(Double.longBitsToDouble(0x7FE0000000000000l))); + Assert.assertFalse(Double.isInfinite(Double.longBitsToDouble(0xFFE0000000000000l))); + Assert.assertFalse(Double.isInfinite(Double.longBitsToDouble(0x0010000000000000l))); + Assert.assertFalse(Double.isInfinite(Double.longBitsToDouble(0x8010000000000000l))); + // Distinct from +/- zero. + Assert.assertFalse(Double.isInfinite(Double.longBitsToDouble(0x0000000000000000l))); + Assert.assertFalse(Double.isInfinite(Double.longBitsToDouble(0x8000000000000000l))); + // Distinct from subnormal numbers. + Assert.assertFalse(Double.isInfinite(Double.longBitsToDouble(0x0008000000000000l))); + Assert.assertFalse(Double.isInfinite(Double.longBitsToDouble(0x8008000000000000l))); + Assert.assertFalse(Double.isInfinite(Double.longBitsToDouble(0x0000000000000001l))); + Assert.assertFalse(Double.isInfinite(Double.longBitsToDouble(0x8000000000000001l))); + } + + public static void test_Math_isInfinite_F() { + // Distinct from Quiet NaN. + Assert.assertFalse(Float.isInfinite(Float.intBitsToFloat(0x7FA00000))); + Assert.assertFalse(Float.isInfinite(Float.intBitsToFloat(0xFFA00000))); + // Distinct from Signaling NaN. + Assert.assertFalse(Float.isInfinite(Float.intBitsToFloat(0x7FC00000))); + Assert.assertFalse(Float.isInfinite(Float.intBitsToFloat(0xFFC00000))); + // +/- infinity. + Assert.assertTrue(Float.isInfinite(Float.intBitsToFloat(0x7F800000))); + Assert.assertTrue(Float.isInfinite(Float.intBitsToFloat(0xFF800000))); + // Distinct from normal numbers. + Assert.assertFalse(Float.isInfinite(Float.intBitsToFloat(0x7F000000))); + Assert.assertFalse(Float.isInfinite(Float.intBitsToFloat(0xFF000000))); + Assert.assertFalse(Float.isInfinite(Float.intBitsToFloat(0x00800000))); + Assert.assertFalse(Float.isInfinite(Float.intBitsToFloat(0x80800000))); + // Distinct from +/- zero. + Assert.assertFalse(Float.isInfinite(Float.intBitsToFloat(0x00000000))); + Assert.assertFalse(Float.isInfinite(Float.intBitsToFloat(0x80000000))); + // Distinct from subnormal numbers. + Assert.assertFalse(Float.isInfinite(Float.intBitsToFloat(0x00400000))); + Assert.assertFalse(Float.isInfinite(Float.intBitsToFloat(0x80400000))); + Assert.assertFalse(Float.isInfinite(Float.intBitsToFloat(0x00000001))); + Assert.assertFalse(Float.isInfinite(Float.intBitsToFloat(0x80000001))); + } + public static void test_StrictMath_abs_I() { StrictMath.abs(-1); Assert.assertEquals(StrictMath.abs(0), 0); diff --git a/test/137-cfi/expected.txt b/test/137-cfi/expected.txt index 6a5618ebc6..8db7853696 100644 --- a/test/137-cfi/expected.txt +++ b/test/137-cfi/expected.txt @@ -1 +1,2 @@ JNI_OnLoad called +JNI_OnLoad called diff --git a/test/137-cfi/run b/test/137-cfi/run index 8ec98c11dc..ebc729bc74 100755 --- a/test/137-cfi/run +++ b/test/137-cfi/run @@ -16,9 +16,10 @@ # Test with full DWARF debugging information. # Check full signatures of methods. -${RUN} "$@" -Xcompiler-option --generate-debug-info --args --full-signatures +${RUN} "$@" -Xcompiler-option --generate-debug-info \ + --args --full-signatures --args --test-local --args --test-remote # Test with minimal compressed debugging information. # Check only method names (parameters are omitted to save space). -# Temporarily disable due to bug 27172087 (leak/race in libunwind). -# ${RUN} "$@" -Xcompiler-option --generate-mini-debug-info +# Check only remote unwinding since decompression is disabled in local unwinds (b/27391690). +${RUN} "$@" -Xcompiler-option --generate-mini-debug-info --args --test-remote diff --git a/test/137-cfi/src/Main.java b/test/137-cfi/src/Main.java index d60a4ebba8..5cfe33dc59 100644 --- a/test/137-cfi/src/Main.java +++ b/test/137-cfi/src/Main.java @@ -21,43 +21,48 @@ import java.util.Arrays; import java.util.Comparator; public class Main implements Comparator<Main> { - // Whether to test local unwinding. Libunwind uses linker info to find executables. As we do - // not dlopen at the moment, this doesn't work, so keep it off for now. - public final static boolean TEST_LOCAL_UNWINDING = true; + // Whether to test local unwinding. + private boolean testLocal; - // Unwinding another process, modelling debuggerd. This doesn't use the linker, so should work - // no matter whether we're using dlopen or not. - public final static boolean TEST_REMOTE_UNWINDING = true; + // Unwinding another process, modelling debuggerd. + private boolean testRemote; + // We fork ourself to create the secondary process for remote unwinding. private boolean secondary; - private boolean full_signatures; + // Expect the symbols to contain full method signatures including parameters. + private boolean fullSignatures; private boolean passed; - public Main(boolean secondary, boolean full_signatures) { - this.secondary = secondary; - this.full_signatures = full_signatures; + public Main(String[] args) throws Exception { + System.loadLibrary(args[0]); + for (String arg : args) { + if (arg.equals("--test-local")) { + testLocal = true; + } + if (arg.equals("--test-remote")) { + testRemote = true; + } + if (arg.equals("--secondary")) { + secondary = true; + } + if (arg.equals("--full-signatures")) { + fullSignatures = true; + } + } + if (!testLocal && !testRemote) { + System.out.println("No test selected."); + } } public static void main(String[] args) throws Exception { - System.loadLibrary(args[0]); - boolean secondary = false; - boolean full_signatures = false; - for (String arg : args) { - if (arg.equals("--secondary")) { - secondary = true; - } - if (arg.equals("--full-signatures")) { - full_signatures = true; - } - } - new Main(secondary, full_signatures).run(); + new Main(args).run(); } private void run() { if (secondary) { - if (!TEST_REMOTE_UNWINDING) { + if (!testRemote) { throw new RuntimeException("Should not be running secondary!"); } runSecondary(); @@ -73,11 +78,11 @@ public class Main implements Comparator<Main> { private void runPrimary() { // First do the in-process unwinding. - if (TEST_LOCAL_UNWINDING && !foo()) { + if (testLocal && !foo()) { System.out.println("Unwinding self failed."); } - if (!TEST_REMOTE_UNWINDING) { + if (!testRemote) { // Skip the remote step. return; } @@ -105,7 +110,7 @@ public class Main implements Comparator<Main> { throw new RuntimeException(e); } - if (!unwindOtherProcess(full_signatures, pid)) { + if (!unwindOtherProcess(fullSignatures, pid)) { System.out.println("Unwinding other process failed."); } } finally { @@ -163,7 +168,7 @@ public class Main implements Comparator<Main> { if (b) { return sleep(2, b, 1.0); } else { - return unwindInProcess(full_signatures, 1, b); + return unwindInProcess(fullSignatures, 1, b); } } @@ -171,6 +176,6 @@ public class Main implements Comparator<Main> { public native boolean sleep(int i, boolean b, double dummy); - public native boolean unwindInProcess(boolean full_signatures, int i, boolean b); - public native boolean unwindOtherProcess(boolean full_signatures, int pid); + public native boolean unwindInProcess(boolean fullSignatures, int i, boolean b); + public native boolean unwindOtherProcess(boolean fullSignatures, int pid); } diff --git a/test/141-class-unload/src/Main.java b/test/141-class-unload/src/Main.java index bcb697a396..15683b0b1e 100644 --- a/test/141-class-unload/src/Main.java +++ b/test/141-class-unload/src/Main.java @@ -181,6 +181,7 @@ public class Main { Class intHolder = loader.loadClass("IntHolder"); Method loadLibrary = intHolder.getDeclaredMethod("loadLibrary", String.class); loadLibrary.invoke(intHolder, nativeLibraryName); + waitForCompilation(intHolder); return new WeakReference(loader); } diff --git a/test/145-alloc-tracking-stress/src/Main.java b/test/145-alloc-tracking-stress/src/Main.java index 752fdd9135..418690a2a6 100644 --- a/test/145-alloc-tracking-stress/src/Main.java +++ b/test/145-alloc-tracking-stress/src/Main.java @@ -1,5 +1,4 @@ /* - * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/test/458-checker-instruction-simplification/src/Main.java b/test/458-checker-instruction-simplification/src/Main.java index 8640148795..dd4ffe45e4 100644 --- a/test/458-checker-instruction-simplification/src/Main.java +++ b/test/458-checker-instruction-simplification/src/Main.java @@ -1601,6 +1601,34 @@ public class Main { return (short) (value & 0x17fff); } + /// CHECK-START: int Main.intReverseCondition(int) instruction_simplifier (before) + /// CHECK-DAG: <<Arg:i\d+>> ParameterValue + /// CHECK-DAG: <<Const42:i\d+>> IntConstant 42 + /// CHECK-DAG: <<LE:z\d+>> LessThanOrEqual [<<Const42>>,<<Arg>>] + + /// CHECK-START: int Main.intReverseCondition(int) instruction_simplifier (after) + /// CHECK-DAG: <<Arg:i\d+>> ParameterValue + /// CHECK-DAG: <<Const42:i\d+>> IntConstant 42 + /// CHECK-DAG: <<GE:z\d+>> GreaterThanOrEqual [<<Arg>>,<<Const42>>] + + public static int intReverseCondition(int i) { + return (42 > i) ? 13 : 54; + } + + /// CHECK-START: int Main.intReverseConditionNaN(int) instruction_simplifier (before) + /// CHECK-DAG: <<Const42:d\d+>> DoubleConstant 42 + /// CHECK-DAG: <<Result:d\d+>> InvokeStaticOrDirect + /// CHECK-DAG: <<CMP:i\d+>> Compare [<<Const42>>,<<Result>>] + + /// CHECK-START: int Main.intReverseConditionNaN(int) instruction_simplifier (after) + /// CHECK-DAG: <<Const42:d\d+>> DoubleConstant 42 + /// CHECK-DAG: <<Result:d\d+>> InvokeStaticOrDirect + /// CHECK-DAG: <<EQ:z\d+>> Equal [<<Result>>,<<Const42>>] + + public static int intReverseConditionNaN(int i) { + return (42 != Math.sqrt(i)) ? 13 : 54; + } + public static int runSmaliTest(String name, boolean input) { try { Class<?> c = Class.forName("SmaliTests"); @@ -1611,7 +1639,7 @@ public class Main { } } - public static void main(String[] args) { +public static void main(String[] args) { int arg = 123456; assertLongEquals(Add0(arg), arg); @@ -1740,6 +1768,9 @@ public class Main { assertIntEquals(intAnd0x17fffToShort(Integer.MIN_VALUE), 0); assertIntEquals(intAnd0x17fffToShort(Integer.MAX_VALUE), Short.MAX_VALUE); + assertIntEquals(intReverseCondition(41), 13); + assertIntEquals(intReverseConditionNaN(-5), 13); + for (String condition : new String[] { "Equal", "NotEqual" }) { for (String constant : new String[] { "True", "False" }) { for (String side : new String[] { "Rhs", "Lhs" }) { diff --git a/test/462-checker-inlining-across-dex-files/multidex.jpp b/test/462-checker-inlining-across-dex-files/multidex.jpp new file mode 100644 index 0000000000..ae554566cb --- /dev/null +++ b/test/462-checker-inlining-across-dex-files/multidex.jpp @@ -0,0 +1,8 @@ +Main: + @@com.android.jack.annotations.ForceInMainDex + class Main + +AAA: + @@com.android.jack.annotations.ForceInMainDex + class AAA + diff --git a/test/537-checker-jump-over-jump/src/Main.java b/test/537-checker-jump-over-jump/src/Main.java index cf9a69d28e..7a58e8b1ac 100644 --- a/test/537-checker-jump-over-jump/src/Main.java +++ b/test/537-checker-jump-over-jump/src/Main.java @@ -24,7 +24,7 @@ public class Main { // /// CHECK: If /// CHECK-NEXT: cmp - /// CHECK-NEXT: jnl/ge + /// CHECK-NEXT: jle/ng // /// CHECK-DAG: <<Fibs:l\d+>> StaticFieldGet /// CHECK-DAG: NullCheck [<<Fibs>>] diff --git a/test/556-invoke-super/multidex.jpp b/test/556-invoke-super/multidex.jpp new file mode 100644 index 0000000000..fe018019e3 --- /dev/null +++ b/test/556-invoke-super/multidex.jpp @@ -0,0 +1,4 @@ +Main: + @@com.android.jack.annotations.ForceInMainDex + class Main* + diff --git a/test/563-checker-invoke-super/build b/test/563-checker-invoke-super/build index e06193ba78..32f84ef5ab 100755 --- a/test/563-checker-invoke-super/build +++ b/test/563-checker-invoke-super/build @@ -20,9 +20,5 @@ set -e # Hard-wired use of experimental jack. # TODO: fix this temporary work-around for lambdas, see b/19467889 export USE_JACK=true -export JACK_SERVER=false -export JACK_REPOSITORY="${ANDROID_BUILD_TOP}/prebuilts/sdk/tools/jacks" -# e.g. /foo/bar/jack-3.10.ALPHA.jar -> 3.10.ALPHA -export JACK_VERSION="$(find "$JACK_REPOSITORY" -name '*ALPHA*' | sed 's/.*jack-//g' | sed 's/[.]jar//g')" ./default-build "$@" --experimental default-methods diff --git a/test/564-checker-negbitwise/src/Main.java b/test/564-checker-negbitwise/src/Main.java index 3de7be7161..ccb8ff4fdf 100644 --- a/test/564-checker-negbitwise/src/Main.java +++ b/test/564-checker-negbitwise/src/Main.java @@ -45,7 +45,7 @@ public class Main { /// CHECK-START-ARM64: int Main.$opt$noinline$notAnd(int, int) instruction_simplifier_arm64 (after) /// CHECK: <<Base:i\d+>> ParameterValue /// CHECK: <<Mask:i\d+>> ParameterValue - /// CHECK: <<NegOp:i\d+>> Arm64BitwiseNegatedRight [<<Base>>,<<Mask>>] kind:And + /// CHECK: <<NegOp:i\d+>> BitwiseNegatedRight [<<Base>>,<<Mask>>] kind:And /// CHECK: Return [<<NegOp>>] /// CHECK-START-ARM64: int Main.$opt$noinline$notAnd(int, int) instruction_simplifier_arm64 (after) @@ -55,6 +55,27 @@ public class Main { /// CHECK-START-ARM64: int Main.$opt$noinline$notAnd(int, int) disassembly (after) /// CHECK: bic w{{\d+}}, w{{\d+}}, w{{\d+}} + + /// CHECK-START-ARM: int Main.$opt$noinline$notAnd(int, int) instruction_simplifier_arm (before) + /// CHECK: <<Base:i\d+>> ParameterValue + /// CHECK: <<Mask:i\d+>> ParameterValue + /// CHECK: <<Not:i\d+>> Not [<<Mask>>] + /// CHECK: <<Op:i\d+>> And [<<Base>>,<<Not>>] + /// CHECK: Return [<<Op>>] + + /// CHECK-START-ARM: int Main.$opt$noinline$notAnd(int, int) instruction_simplifier_arm (after) + /// CHECK: <<Base:i\d+>> ParameterValue + /// CHECK: <<Mask:i\d+>> ParameterValue + /// CHECK: <<NegOp:i\d+>> BitwiseNegatedRight [<<Base>>,<<Mask>>] kind:And + /// CHECK: Return [<<NegOp>>] + + /// CHECK-START-ARM: int Main.$opt$noinline$notAnd(int, int) instruction_simplifier_arm (after) + /// CHECK-NOT: Not + /// CHECK-NOT: And + + /// CHECK-START-ARM: int Main.$opt$noinline$notAnd(int, int) disassembly (after) + /// CHECK: bic.w r{{\d+}}, r{{\d+}}, r{{\d+}} + public static int $opt$noinline$notAnd(int base, int mask) { if (doThrow) throw new Error(); return base & ~mask; @@ -74,7 +95,7 @@ public class Main { /// CHECK-START-ARM64: long Main.$opt$noinline$notOr(long, long) instruction_simplifier_arm64 (after) /// CHECK: <<Base:j\d+>> ParameterValue /// CHECK: <<Mask:j\d+>> ParameterValue - /// CHECK: <<NegOp:j\d+>> Arm64BitwiseNegatedRight [<<Base>>,<<Mask>>] kind:Or + /// CHECK: <<NegOp:j\d+>> BitwiseNegatedRight [<<Base>>,<<Mask>>] kind:Or /// CHECK: Return [<<NegOp>>] /// CHECK-START-ARM64: long Main.$opt$noinline$notOr(long, long) instruction_simplifier_arm64 (after) @@ -84,6 +105,27 @@ public class Main { /// CHECK-START-ARM64: long Main.$opt$noinline$notOr(long, long) disassembly (after) /// CHECK: orn x{{\d+}}, x{{\d+}}, x{{\d+}} + + /// CHECK-START-ARM: long Main.$opt$noinline$notOr(long, long) instruction_simplifier_arm (before) + /// CHECK: <<Base:j\d+>> ParameterValue + /// CHECK: <<Mask:j\d+>> ParameterValue + /// CHECK: <<Not:j\d+>> Not [<<Mask>>] + /// CHECK: <<Op:j\d+>> Or [<<Base>>,<<Not>>] + /// CHECK: Return [<<Op>>] + + /// CHECK-START-ARM: long Main.$opt$noinline$notOr(long, long) instruction_simplifier_arm (after) + /// CHECK: <<Base:j\d+>> ParameterValue + /// CHECK: <<Mask:j\d+>> ParameterValue + /// CHECK: <<NegOp:j\d+>> BitwiseNegatedRight [<<Base>>,<<Mask>>] kind:Or + /// CHECK: Return [<<NegOp>>] + + /// CHECK-START-ARM: long Main.$opt$noinline$notOr(long, long) instruction_simplifier_arm (after) + /// CHECK-NOT: Not + /// CHECK-NOT: Or + + /// CHECK-START-ARM: long Main.$opt$noinline$notOr(long, long) disassembly (after) + /// CHECK: orn.w r{{\d+}}, r{{\d+}}, r{{\d+}} + public static long $opt$noinline$notOr(long base, long mask) { if (doThrow) throw new Error(); return base | ~mask; @@ -103,7 +145,7 @@ public class Main { /// CHECK-START-ARM64: int Main.$opt$noinline$notXor(int, int) instruction_simplifier_arm64 (after) /// CHECK: <<Base:i\d+>> ParameterValue /// CHECK: <<Mask:i\d+>> ParameterValue - /// CHECK: <<NegOp:i\d+>> Arm64BitwiseNegatedRight [<<Base>>,<<Mask>>] kind:Xor + /// CHECK: <<NegOp:i\d+>> BitwiseNegatedRight [<<Base>>,<<Mask>>] kind:Xor /// CHECK: Return [<<NegOp>>] /// CHECK-START-ARM64: int Main.$opt$noinline$notXor(int, int) instruction_simplifier_arm64 (after) @@ -113,39 +155,63 @@ public class Main { /// CHECK-START-ARM64: int Main.$opt$noinline$notXor(int, int) disassembly (after) /// CHECK: eon w{{\d+}}, w{{\d+}}, w{{\d+}} + + /// CHECK-START-ARM: int Main.$opt$noinline$notXor(int, int) instruction_simplifier_arm (before) + /// CHECK: <<Base:i\d+>> ParameterValue + /// CHECK: <<Mask:i\d+>> ParameterValue + /// CHECK: <<Not:i\d+>> Not [<<Mask>>] + /// CHECK: <<Op:i\d+>> Xor [<<Base>>,<<Not>>] + /// CHECK: Return [<<Op>>] + + /// CHECK-START-ARM: int Main.$opt$noinline$notXor(int, int) instruction_simplifier_arm (after) + /// CHECK: <<Base:i\d+>> ParameterValue + /// CHECK: <<Mask:i\d+>> ParameterValue + /// CHECK: <<Not:i\d+>> Not [<<Mask>>] + /// CHECK: <<Op:i\d+>> Xor [<<Base>>,<<Not>>] + /// CHECK: Return [<<Op>>] + + /// CHECK-START-ARM: int Main.$opt$noinline$notXor(int, int) instruction_simplifier_arm (after) + /// CHECK-NOT: BitwiseNegatedRight + public static int $opt$noinline$notXor(int base, int mask) { if (doThrow) throw new Error(); return base ^ ~mask; } /** - * Check that the transformation is also done when the base is a constant. + * Check that transformation is done when the argument is a constant. */ - /// CHECK-START-ARM64: int Main.$opt$noinline$notXorConstant(int) instruction_simplifier_arm64 (before) - /// CHECK: <<Mask:i\d+>> ParameterValue + /// CHECK-START-ARM64: int Main.$opt$noinline$notAndConstant(int) instruction_simplifier_arm64 (before) + /// CHECK: <<Base:i\d+>> ParameterValue /// CHECK: <<Constant:i\d+>> IntConstant - /// CHECK: <<Not:i\d+>> Not [<<Mask>>] - /// CHECK: <<Op:i\d+>> Xor [<<Not>>,<<Constant>>] + /// CHECK: <<Not:i\d+>> Not [<<Base>>] + /// CHECK: <<Op:i\d+>> And [<<Not>>,<<Constant>>] /// CHECK: Return [<<Op>>] - /// CHECK-START-ARM64: int Main.$opt$noinline$notXorConstant(int) instruction_simplifier_arm64 (after) - /// CHECK: <<Mask:i\d+>> ParameterValue + /// CHECK-START-ARM64: int Main.$opt$noinline$notAndConstant(int) instruction_simplifier_arm64 (after) + /// CHECK: <<Base:i\d+>> ParameterValue /// CHECK: <<Constant:i\d+>> IntConstant - /// CHECK: <<NegOp:i\d+>> Arm64BitwiseNegatedRight [<<Constant>>,<<Mask>>] kind:Xor + /// CHECK: <<NegOp:i\d+>> BitwiseNegatedRight [<<Constant>>,<<Base>>] kind:And /// CHECK: Return [<<NegOp>>] - /// CHECK-START-ARM64: int Main.$opt$noinline$notXorConstant(int) instruction_simplifier_arm64 (after) - /// CHECK-NOT: Not - /// CHECK-NOT: Xor - /// CHECK-START-ARM64: int Main.$opt$noinline$notXorConstant(int) disassembly (after) - /// CHECK: mov <<Reg:w\d+>>, #0xf - /// CHECK: eon w{{\d+}}, <<Reg>>, w{{\d+}} + /// CHECK-START-ARM: int Main.$opt$noinline$notAndConstant(int) instruction_simplifier_arm (before) + /// CHECK: <<Base:i\d+>> ParameterValue + /// CHECK: <<Constant:i\d+>> IntConstant + /// CHECK: <<Not:i\d+>> Not [<<Base>>] + /// CHECK: <<Op:i\d+>> And [<<Not>>,<<Constant>>] + /// CHECK: Return [<<Op>>] + + /// CHECK-START-ARM: int Main.$opt$noinline$notAndConstant(int) instruction_simplifier_arm (after) + /// CHECK: <<Base:i\d+>> ParameterValue + /// CHECK: <<Constant:i\d+>> IntConstant + /// CHECK: <<NegOp:i\d+>> BitwiseNegatedRight [<<Constant>>,<<Base>>] kind:And + /// CHECK: Return [<<NegOp>>] - public static int $opt$noinline$notXorConstant(int mask) { + public static int $opt$noinline$notAndConstant(int mask) { if (doThrow) throw new Error(); - return 0xf ^ ~mask; + return 0xf & ~mask; } /** @@ -173,7 +239,31 @@ public class Main { /// CHECK: Return [<<Add>>] /// CHECK-START-ARM64: int Main.$opt$noinline$notAndMultipleUses(int, int) instruction_simplifier_arm64 (after) - /// CHECK-NOT: Arm64BitwiseNegatedRight + /// CHECK-NOT: BitwiseNegatedRight + + + /// CHECK-START-ARM: int Main.$opt$noinline$notAndMultipleUses(int, int) instruction_simplifier_arm (before) + /// CHECK: <<Base:i\d+>> ParameterValue + /// CHECK: <<Mask:i\d+>> ParameterValue + /// CHECK: <<One:i\d+>> IntConstant + /// CHECK: <<Not:i\d+>> Not [<<Mask>>] + /// CHECK: <<Op1:i\d+>> And [<<Not>>,<<One>>] + /// CHECK: <<Op2:i\d+>> And [<<Base>>,<<Not>>] + /// CHECK: <<Add:i\d+>> Add [<<Op1>>,<<Op2>>] + /// CHECK: Return [<<Add>>] + + /// CHECK-START-ARM: int Main.$opt$noinline$notAndMultipleUses(int, int) instruction_simplifier_arm (after) + /// CHECK: <<Base:i\d+>> ParameterValue + /// CHECK: <<Mask:i\d+>> ParameterValue + /// CHECK: <<One:i\d+>> IntConstant + /// CHECK: <<Not:i\d+>> Not [<<Mask>>] + /// CHECK: <<Op1:i\d+>> And [<<Not>>,<<One>>] + /// CHECK: <<Op2:i\d+>> And [<<Base>>,<<Not>>] + /// CHECK: <<Add:i\d+>> Add [<<Op1>>,<<Op2>>] + /// CHECK: Return [<<Add>>] + + /// CHECK-START-ARM: int Main.$opt$noinline$notAndMultipleUses(int, int) instruction_simplifier_arm (after) + /// CHECK-NOT: BitwiseNegatedRight public static int $opt$noinline$notAndMultipleUses(int base, int mask) { if (doThrow) throw new Error(); @@ -189,7 +279,10 @@ public class Main { // have been applied then Not/Not/Or is replaced by And/Not. /// CHECK-START-ARM64: int Main.$opt$noinline$deMorganOr(int, int) instruction_simplifier_arm64 (after) - /// CHECK-NOT: Arm64BitwiseNegatedRight + /// CHECK-NOT: BitwiseNegatedRight + + /// CHECK-START-ARM: int Main.$opt$noinline$deMorganOr(int, int) instruction_simplifier_arm (after) + /// CHECK-NOT: BitwiseNegatedRight public static int $opt$noinline$deMorganOr(int a, int b) { if (doThrow) throw new Error(); @@ -200,7 +293,7 @@ public class Main { assertIntEquals(0xe, $opt$noinline$notAnd(0xf, 0x1)); assertLongEquals(~0x0, $opt$noinline$notOr(0xf, 0x1)); assertIntEquals(~0xe, $opt$noinline$notXor(0xf, 0x1)); - assertIntEquals(~0xe, $opt$noinline$notXorConstant(0x1)); + assertIntEquals(0xe, $opt$noinline$notAndConstant(0x1)); assertIntEquals(0xe, $opt$noinline$notAndMultipleUses(0xf, 0x1)); assertIntEquals(~0x1, $opt$noinline$deMorganOr(0x3, 0x1)); } diff --git a/test/565-checker-condition-liveness/src/Main.java b/test/565-checker-condition-liveness/src/Main.java index dc4cb76258..acfcecdba8 100644 --- a/test/565-checker-condition-liveness/src/Main.java +++ b/test/565-checker-condition-liveness/src/Main.java @@ -28,10 +28,7 @@ public class Main { /// CHECK-EVAL: <<UseInput>> == <<LivSel>> + 1 public static int p(float arg) { - if (arg > 5.0f) { - return 0; - } - return -1; + return (arg > 5.0f) ? 0 : -1; } /// CHECK-START: void Main.main(java.lang.String[]) liveness (after) diff --git a/test/569-checker-pattern-replacement/multidex.jpp b/test/569-checker-pattern-replacement/multidex.jpp new file mode 100644 index 0000000000..cfc8ad1fc9 --- /dev/null +++ b/test/569-checker-pattern-replacement/multidex.jpp @@ -0,0 +1,8 @@ +Main: + @@com.android.jack.annotations.ForceInMainDex + class Main + +BaseInMainDex: + @@com.android.jack.annotations.ForceInMainDex + class BaseInMainDex + diff --git a/test/579-inline-infinite/expected.txt b/test/579-inline-infinite/expected.txt new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/test/579-inline-infinite/expected.txt diff --git a/test/579-inline-infinite/info.txt b/test/579-inline-infinite/info.txt new file mode 100644 index 0000000000..6fb917c222 --- /dev/null +++ b/test/579-inline-infinite/info.txt @@ -0,0 +1,2 @@ +Regression test for optimizing. +Inlining of method with infinite loop cause a crash. diff --git a/test/579-inline-infinite/src/Main.java b/test/579-inline-infinite/src/Main.java new file mode 100644 index 0000000000..f214ed4ffd --- /dev/null +++ b/test/579-inline-infinite/src/Main.java @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +class Infinite implements Runnable { + public int field; + + private final void $noinline$infinite() { + while(true) { + field++; + } + } + + public void run() { + $noinline$infinite(); + } +} + +public class Main { + public static void main(String[] args) { + Thread thr = new Thread(new Infinite()); + thr.setDaemon(true); + thr.start(); + // This is a compiler test, so just finish. + } +} diff --git a/test/580-checker-round/expected.txt b/test/580-checker-round/expected.txt new file mode 100644 index 0000000000..b0aad4deb5 --- /dev/null +++ b/test/580-checker-round/expected.txt @@ -0,0 +1 @@ +passed diff --git a/test/580-checker-round/info.txt b/test/580-checker-round/info.txt new file mode 100644 index 0000000000..d6397fd13d --- /dev/null +++ b/test/580-checker-round/info.txt @@ -0,0 +1 @@ +Unit test for float/double rounding. diff --git a/test/580-checker-round/src/Main.java b/test/580-checker-round/src/Main.java new file mode 100644 index 0000000000..9e248ef95a --- /dev/null +++ b/test/580-checker-round/src/Main.java @@ -0,0 +1,172 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main { + + /// CHECK-START: int Main.round32(float) intrinsics_recognition (after) + /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:MathRoundFloat + /// CHECK-DAG: Return [<<Result>>] + private static int round32(float f) { + return Math.round(f); + } + + /// CHECK-START: long Main.round64(double) intrinsics_recognition (after) + /// CHECK-DAG: <<Result:j\d+>> InvokeStaticOrDirect intrinsic:MathRoundDouble + /// CHECK-DAG: Return [<<Result>>] + private static long round64(double d) { + return Math.round(d); + } + + public static void main(String args[]) { + // A few obvious numbers. + expectEquals32(-2147483648, round32(Float.NEGATIVE_INFINITY)); + expectEquals32(-2, round32(-1.51f)); + expectEquals32(-1, round32(-1.2f)); + expectEquals32(-1, round32(-1.0f)); + expectEquals32(-1, round32(-0.51f)); + expectEquals32(0, round32(-0.2f)); + expectEquals32(0, round32(-0.0f)); + expectEquals32(0, round32(+0.0f)); + expectEquals32(0, round32(+0.2f)); + expectEquals32(1, round32(+0.5f)); + expectEquals32(1, round32(+1.0f)); + expectEquals32(1, round32(+1.2f)); + expectEquals32(2, round32(+1.5f)); + expectEquals32(2147483647, round32(Float.POSITIVE_INFINITY)); + + // Some others. + for (int i = -100; i <= 100; ++i) { + expectEquals32(i - 1, round32((float) i - 0.51f)); + expectEquals32(i, round32((float) i)); + expectEquals32(i + 1, round32((float) i + 0.5f)); + } + for (float f = -1.5f; f <= -1.499f; f = Math.nextAfter(f, Float.POSITIVE_INFINITY)) { + expectEquals32(-1, round32(f)); + } + + // Some harder. + float[] fvals = { + -16777215.5f, + -16777215.0f, + -0.4999f, + 0.4999f, + 16777215.0f, + 16777215.5f + }; + int[] ivals = { + -16777216, + -16777215, + 0, + 0, + 16777215, + 16777216 + }; + for (int i = 0; i < fvals.length; i++) { + expectEquals32(ivals[i], round32(fvals[i])); + } + + // A few NaN numbers. + float[] fnans = { + Float.intBitsToFloat(0x7f800001), + Float.intBitsToFloat(0x7fa00000), + Float.intBitsToFloat(0x7fc00000), + Float.intBitsToFloat(0x7fffffff), + Float.intBitsToFloat(0xff800001), + Float.intBitsToFloat(0xffa00000), + Float.intBitsToFloat(0xffc00000), + Float.intBitsToFloat(0xffffffff) + }; + for (int i = 0; i < fnans.length; i++) { + expectEquals32(0, round32(fnans[i])); + } + + // A few obvious numbers. + expectEquals64(-9223372036854775808L, round64(Double.NEGATIVE_INFINITY)); + expectEquals64(-2L, round64(-1.51d)); + expectEquals64(-1L, round64(-1.2d)); + expectEquals64(-1L, round64(-1.0d)); + expectEquals64(-1L, round64(-0.51d)); + expectEquals64(0L, round64(-0.2d)); + expectEquals64(0L, round64(-0.0d)); + expectEquals64(0L, round64(+0.0d)); + expectEquals64(0L, round64(+0.2d)); + expectEquals64(1L, round64(+0.5d)); + expectEquals64(1L, round64(+1.0d)); + expectEquals64(1L, round64(+1.2d)); + expectEquals64(2L, round64(+1.5d)); + expectEquals64(9223372036854775807L, round64(Double.POSITIVE_INFINITY)); + + // Some others. + for (long l = -100; l <= 100; ++l) { + expectEquals64(l - 1, round64((double) l - 0.51d)); + expectEquals64(l + 1, round64((double) l + 0.5d)); + expectEquals64(l + 1, round64((double) l + 0.5d)); + } + for (double d = -1.5d; d <= -1.49999999999d; d = Math.nextAfter(d, Double.POSITIVE_INFINITY)) { + expectEquals64(-1L, round64(d)); + } + + // Some harder. + double[] dvals = { + -9007199254740991.5d, + -9007199254740991.0d, + -0.49999999999999994d, + 0.49999999999999994d, + 9007199254740991.0d, + 9007199254740991.5d + }; + long[] lvals = { + -9007199254740992L, + -9007199254740991L, + 0L, + 0L, + 9007199254740991L, + 9007199254740992L + }; + for (int i = 0; i < dvals.length; i++) { + expectEquals64(lvals[i], round64(dvals[i])); + } + + // A few NaN numbers. + double[] dnans = { + Double.longBitsToDouble(0x7ff0000000000001L), + Double.longBitsToDouble(0x7ff4000000000000L), + Double.longBitsToDouble(0x7ff8000000000000L), + Double.longBitsToDouble(0x7fffffffffffffffL), + Double.longBitsToDouble(0xfff0000000000001L), + Double.longBitsToDouble(0xfff4000000000000L), + Double.longBitsToDouble(0xfff8000000000000L), + Double.longBitsToDouble(0xffffffffffffffffL) + }; + for (int i = 0; i < dnans.length; i++) { + expectEquals64(0L, round64(dnans[i])); + } + + System.out.println("passed"); + } + + private static void expectEquals32(int expected, int result) { + if (expected != result) { + throw new Error("Expected: " + expected + ", found: " + result); + } + } + + private static void expectEquals64(long expected, long result) { + if (expected != result) { + throw new Error("Expected: " + expected + ", found: " + result); + } + } +} diff --git a/test/580-checker-string-factory-intrinsics/expected.txt b/test/580-checker-string-factory-intrinsics/expected.txt new file mode 100644 index 0000000000..86e041dad6 --- /dev/null +++ b/test/580-checker-string-factory-intrinsics/expected.txt @@ -0,0 +1,3 @@ +foo +bar +baz diff --git a/test/580-checker-string-factory-intrinsics/info.txt b/test/580-checker-string-factory-intrinsics/info.txt new file mode 100644 index 0000000000..3d01a1964a --- /dev/null +++ b/test/580-checker-string-factory-intrinsics/info.txt @@ -0,0 +1 @@ +Ensure java.lang.StringFactory intrinsics are recognized and used. diff --git a/test/580-checker-string-factory-intrinsics/src/Main.java b/test/580-checker-string-factory-intrinsics/src/Main.java new file mode 100644 index 0000000000..a2e34bffd0 --- /dev/null +++ b/test/580-checker-string-factory-intrinsics/src/Main.java @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main { + + /// CHECK-START: void Main.testNewStringFromBytes() builder (after) + /// CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromBytes intrinsic:None + + /// CHECK-START: void Main.testNewStringFromBytes() intrinsics_recognition (after) + /// CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromBytes intrinsic:StringNewStringFromBytes + + public static void testNewStringFromBytes() { + byte[] bytes = { 'f', 'o', 'o' }; + String s = StringFactory.newStringFromBytes(bytes, 0, 0, 3); + System.out.println(s); + } + + // The (native) method + // + // java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data) + // + // is recognized as intrinsic StringNewStringFromChars. However, + // because this method is not public, we cannot call it and check + // that the compiler actually intrinsifies it (as it does for the + // StringNewStringFromBytes and StringNewStringFromString + // intrinsics) with Checker. + // + // We can call a public method such as + // + // java.lang.StringFactory.newStringFromChars(char[] data) + // + // which contains a call to the former (non-public) native method. + // However, this call will not be inlined (because it is a method in + // another Dex file and which contains a call, which needs an + // environment), so we cannot use Checker here to ensure the native + // call was intrinsified either. + + /// CHECK-START: void Main.testNewStringFromChars() builder (after) + /// CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromChars intrinsic:None + + /// CHECK-START: void Main.testNewStringFromChars() intrinsics_recognition (after) + /// CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromChars intrinsic:None + + /// CHECK-START: void Main.testNewStringFromChars() inliner (after) + /// CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromChars intrinsic:None + + public static void testNewStringFromChars() { + char[] chars = { 'b', 'a', 'r' }; + String s = StringFactory.newStringFromChars(chars); + System.out.println(s); + } + + /// CHECK-START: void Main.testNewStringFromString() builder (after) + /// CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromString intrinsic:None + + /// CHECK-START: void Main.testNewStringFromString() intrinsics_recognition (after) + /// CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.StringFactory.newStringFromString intrinsic:StringNewStringFromString + + public static void testNewStringFromString() { + String s1 = "baz"; + String s2 = StringFactory.newStringFromString(s1); + System.out.println(s2); + } + + public static void main(String[] args) throws Exception { + testNewStringFromBytes(); + testNewStringFromChars(); + testNewStringFromString(); + } +} diff --git a/test/581-rtp/expected.txt b/test/581-rtp/expected.txt new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/test/581-rtp/expected.txt diff --git a/test/581-rtp/info.txt b/test/581-rtp/info.txt new file mode 100644 index 0000000000..b57449ae48 --- /dev/null +++ b/test/581-rtp/info.txt @@ -0,0 +1,2 @@ +Regression test for the reference type propagation pass +of the optimizing compiler that used to break invariants. diff --git a/test/581-rtp/src/Main.java b/test/581-rtp/src/Main.java new file mode 100644 index 0000000000..09f6f6c096 --- /dev/null +++ b/test/581-rtp/src/Main.java @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public final class Main { + + /// CHECK-START: void Main.main(String[]) builder (after) + /// CHECK: StaticFieldGet klass:Main[] exact: true + /// CHECK: ArrayGet klass:Main exact:true + /// CHECK: BoundType klass:Main exact:true + public static void main(String[] args) { + Object o = null; + Main f = a[0]; + for (int i = 0; i < 2; ++i) { + // We used to crash in the fixed point iteration of + // the reference type propagation while handling the instanceof: + // we were expecting `o` to get the same exact-ness as the + // `HBoundType` but the typing of the `ArrayGet` used to not + // propagate the exact-ness. + if (o instanceof Main) { + field = o; + } + o = f; + } + if (field != null) { + throw new Error("Expected null"); + } + } + + static Main[] a = new Main[1]; + static Object field; +} diff --git a/test/960-default-smali/build b/test/960-default-smali/build index b72afcdf18..e8f4ed084a 100755 --- a/test/960-default-smali/build +++ b/test/960-default-smali/build @@ -17,27 +17,14 @@ # make us exit on a failure set -e -# Generate the smali Main.smali file or fail -${ANDROID_BUILD_TOP}/art/test/utils/python/generate_smali_main.py ./smali - -# Should we compile with Java source code. By default we will use Smali. -USES_JAVA_SOURCE="false" -if [[ $@ == *"--jvm"* ]]; then - USES_JAVA_SOURCE="true" -elif [[ "$USE_JACK" == "true" ]]; then - if $JACK -D jack.java.source.version=1.8 >& /dev/null; then - USES_JAVA_SOURCE="true" - else - echo "WARNING: Cannot use jack because it does not support JLS 1.8. Falling back to smali" >&2 - fi +if [[ $@ != *"--jvm"* ]]; then + # Don't do anything with jvm + # Hard-wired use of experimental jack. + # TODO: fix this temporary work-around for default-methods, see b/19467889 + export USE_JACK=true fi -if [[ "$USES_JAVA_SOURCE" == "true" ]]; then - # We are compiling java code, create it. - mkdir -p src - ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src - # Ignore the smali directory. - EXTRA_ARGS="--no-smali" -fi +# Generate the Main.java file or fail +${ANDROID_BUILD_TOP}/art/test/utils/python/generate_java_main.py ./src -./default-build "$@" "$EXTRA_ARGS" --experimental default-methods +./default-build "$@" --experimental default-methods diff --git a/test/960-default-smali/smali/A.smali b/test/960-default-smali/smali/A.smali deleted file mode 100644 index e755612fbe..0000000000 --- a/test/960-default-smali/smali/A.smali +++ /dev/null @@ -1,38 +0,0 @@ -# /* -# * Copyright 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ - -.class public LA; -.super Ljava/lang/Object; -.implements LGreeter; - -# class A implements Greeter { -# public String SayHi() { -# return "Hi "; -# } -# } - -.method public constructor <init>()V - .registers 1 - invoke-direct {p0}, Ljava/lang/Object;-><init>()V - return-void -.end method - -.method public SayHi()Ljava/lang/String; - .registers 1 - - const-string v0, "Hi " - return-object v0 -.end method diff --git a/test/960-default-smali/smali/Attendant.smali b/test/960-default-smali/smali/Attendant.smali deleted file mode 100644 index ab63aeefcb..0000000000 --- a/test/960-default-smali/smali/Attendant.smali +++ /dev/null @@ -1,53 +0,0 @@ -# /* -# * Copyright 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ - -.class public abstract interface LAttendant; -.super Ljava/lang/Object; - -# public interface Attendant { -# public default String SayHi() { -# return "welcome to " + GetPlace(); -# } -# public default String SayHiTwice() { -# return SayHi() + SayHi(); -# } -# -# public String GetPlace(); -# } - -.method public SayHi()Ljava/lang/String; - .locals 2 - const-string v0, "welcome to " - invoke-interface {p0}, LAttendant;->GetPlace()Ljava/lang/String; - move-result-object v1 - invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String; - move-result-object v0 - return-object v0 -.end method - -.method public SayHiTwice()Ljava/lang/String; - .locals 2 - invoke-interface {p0}, LAttendant;->SayHi()Ljava/lang/String; - move-result-object v0 - invoke-interface {p0}, LAttendant;->SayHi()Ljava/lang/String; - move-result-object v1 - invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String; - move-result-object v0 - return-object v0 -.end method - -.method public abstract GetPlace()Ljava/lang/String; -.end method diff --git a/test/960-default-smali/smali/B.smali b/test/960-default-smali/smali/B.smali deleted file mode 100644 index d847dd12ff..0000000000 --- a/test/960-default-smali/smali/B.smali +++ /dev/null @@ -1,38 +0,0 @@ -# /* -# * Copyright 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ - -.class public LB; -.super Ljava/lang/Object; -.implements LGreeter2; - -# class B implements Greeter2 { -# public String SayHi() { -# return "Hello "; -# } -# } - -.method public constructor <init>()V - .registers 1 - invoke-direct {p0}, Ljava/lang/Object;-><init>()V - return-void -.end method - -.method public SayHi()Ljava/lang/String; - .registers 1 - - const-string v0, "Hello " - return-object v0 -.end method diff --git a/test/960-default-smali/smali/C.smali b/test/960-default-smali/smali/C.smali deleted file mode 100644 index 08a8508be1..0000000000 --- a/test/960-default-smali/smali/C.smali +++ /dev/null @@ -1,37 +0,0 @@ -# /* -# * Copyright 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ - -.class public LC; -.super LA; - -# class C extends A { -# public String SayHiTwice() { -# return "You don't control me"; -# } -# } - -.method public constructor <init>()V - .registers 1 - invoke-direct {p0}, LA;-><init>()V - return-void -.end method - -.method public SayHiTwice()Ljava/lang/String; - .registers 1 - - const-string v0, "You don't control me" - return-object v0 -.end method diff --git a/test/960-default-smali/smali/D.smali b/test/960-default-smali/smali/D.smali deleted file mode 100644 index 32f3b7ec8b..0000000000 --- a/test/960-default-smali/smali/D.smali +++ /dev/null @@ -1,38 +0,0 @@ -# /* -# * Copyright 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ - -.class public LD; -.super Ljava/lang/Object; -.implements LGreeter3; - -# class D implements Greeter3 { -# public String GetName() { -# return "Alex "; -# } -# } - -.method public constructor <init>()V - .registers 1 - invoke-direct {p0}, Ljava/lang/Object;-><init>()V - return-void -.end method - -.method public GetName()Ljava/lang/String; - .registers 1 - - const-string v0, "Alex " - return-object v0 -.end method diff --git a/test/960-default-smali/smali/E.smali b/test/960-default-smali/smali/E.smali deleted file mode 100644 index bae6250414..0000000000 --- a/test/960-default-smali/smali/E.smali +++ /dev/null @@ -1,38 +0,0 @@ -# /* -# * Copyright 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ - -.class public LE; -.super LA; -.implements LGreeter2; - -# class E extends A implements Greeter2 { -# public String SayHi() { -# return "Hi2 "; -# } -# } - -.method public constructor <init>()V - .registers 1 - invoke-direct {p0}, LA;-><init>()V - return-void -.end method - -.method public SayHi()Ljava/lang/String; - .registers 1 - - const-string v0, "Hi2 " - return-object v0 -.end method diff --git a/test/960-default-smali/smali/Extension.smali b/test/960-default-smali/smali/Extension.smali deleted file mode 100644 index 60ffa26ec6..0000000000 --- a/test/960-default-smali/smali/Extension.smali +++ /dev/null @@ -1,30 +0,0 @@ -# /* -# * Copyright 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ - -.class public abstract interface LExtension; -.super Ljava/lang/Object; - -# public interface Extension { -# public default String SayHi() { -# return "welcome "; -# } -# } - -.method public SayHi()Ljava/lang/String; - .locals 1 - const-string v0, "welcome " - return-object v0 -.end method diff --git a/test/960-default-smali/smali/F.smali b/test/960-default-smali/smali/F.smali deleted file mode 100644 index 3eaa089e1f..0000000000 --- a/test/960-default-smali/smali/F.smali +++ /dev/null @@ -1,47 +0,0 @@ -# /* -# * Copyright 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ - -.class public LF; -.super LA; -.implements LAttendant; - -# class F extends A implements Attendant { -# public String GetPlace() { -# return "android"; -# } -# public String SayHiTwice() { -# return "We can override both interfaces"; -# } -# } - -.method public constructor <init>()V - .registers 1 - invoke-direct {p0}, Ljava/lang/Object;-><init>()V - return-void -.end method - -.method public SayHiTwice()Ljava/lang/String; - .registers 1 - - const-string v0, "We can override both interfaces" - return-object v0 -.end method - -.method public GetPlace()Ljava/lang/String; - .registers 1 - const-string v0, "android" - return-object v0 -.end method diff --git a/test/960-default-smali/smali/G.smali b/test/960-default-smali/smali/G.smali deleted file mode 100644 index 446f2a4c64..0000000000 --- a/test/960-default-smali/smali/G.smali +++ /dev/null @@ -1,37 +0,0 @@ -# /* -# * Copyright 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ - -.class public LG; -.super Ljava/lang/Object; -.implements LAttendant; - -# class G implements Attendant { -# public String GetPlace() { -# return "android"; -# } -# } - -.method public constructor <init>()V - .registers 1 - invoke-direct {p0}, Ljava/lang/Object;-><init>()V - return-void -.end method - -.method public GetPlace()Ljava/lang/String; - .registers 1 - const-string v0, "android" - return-object v0 -.end method diff --git a/test/960-default-smali/smali/Greeter.smali b/test/960-default-smali/smali/Greeter.smali deleted file mode 100644 index 28530ffc6f..0000000000 --- a/test/960-default-smali/smali/Greeter.smali +++ /dev/null @@ -1,40 +0,0 @@ -# /* -# * Copyright 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ - -.class public abstract interface LGreeter; -.super Ljava/lang/Object; - -# public interface Greeter { -# public String SayHi(); -# -# public default String SayHiTwice() { -# return SayHi() + SayHi(); -# } -# } - -.method public abstract SayHi()Ljava/lang/String; -.end method - -.method public SayHiTwice()Ljava/lang/String; - .locals 2 - invoke-interface {p0}, LGreeter;->SayHi()Ljava/lang/String; - move-result-object v0 - invoke-interface {p0}, LGreeter;->SayHi()Ljava/lang/String; - move-result-object v1 - invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String; - move-result-object v0 - return-object v0 -.end method diff --git a/test/960-default-smali/smali/Greeter2.smali b/test/960-default-smali/smali/Greeter2.smali deleted file mode 100644 index ace1798bab..0000000000 --- a/test/960-default-smali/smali/Greeter2.smali +++ /dev/null @@ -1,39 +0,0 @@ -# /* -# * Copyright 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ - -.class public abstract interface LGreeter2; -.super Ljava/lang/Object; -.implements LGreeter; - -# public interface Greeter2 extends Greeter { -# public default String SayHiTwice() { -# return "I say " + SayHi() + SayHi(); -# } -# } - -.method public SayHiTwice()Ljava/lang/String; - .locals 3 - const-string v0, "I say " - invoke-interface {p0}, LGreeter;->SayHi()Ljava/lang/String; - move-result-object v1 - invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String; - move-result-object v0 - invoke-interface {p0}, LGreeter;->SayHi()Ljava/lang/String; - move-result-object v1 - invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String; - move-result-object v0 - return-object v0 -.end method diff --git a/test/960-default-smali/smali/Greeter3.smali b/test/960-default-smali/smali/Greeter3.smali deleted file mode 100644 index 31fc2e79ff..0000000000 --- a/test/960-default-smali/smali/Greeter3.smali +++ /dev/null @@ -1,40 +0,0 @@ -# /* -# * Copyright 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ - -.class public abstract interface LGreeter3; -.super Ljava/lang/Object; -.implements LGreeter; - -# public interface Greeter3 extends Greeter { -# public String GetName(); -# -# public default String SayHi() { -# return "Hello " + GetName(); -# } -# } - -.method public abstract GetName()Ljava/lang/String; -.end method - -.method public SayHi()Ljava/lang/String; - .locals 2 - const-string v0, "Hello " - invoke-interface {p0}, LGreeter3;->GetName()Ljava/lang/String; - move-result-object v1 - invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String; - move-result-object v0 - return-object v0 -.end method diff --git a/test/960-default-smali/smali/H.smali b/test/960-default-smali/smali/H.smali deleted file mode 100644 index 82065ea49d..0000000000 --- a/test/960-default-smali/smali/H.smali +++ /dev/null @@ -1,28 +0,0 @@ -# /* -# * Copyright 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ - -.class public LH; -.super Ljava/lang/Object; -.implements LExtension; - -# class H implements Extension { -# } - -.method public constructor <init>()V - .registers 1 - invoke-direct {p0}, Ljava/lang/Object;-><init>()V - return-void -.end method diff --git a/test/960-default-smali/smali/I.smali b/test/960-default-smali/smali/I.smali deleted file mode 100644 index 72fb58afe4..0000000000 --- a/test/960-default-smali/smali/I.smali +++ /dev/null @@ -1,28 +0,0 @@ -# /* -# * Copyright 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ - -.class public LI; -.super LA; -.implements LGreeter2; - -# class I extends A implements Greeter2 { -# } - -.method public constructor <init>()V - .registers 1 - invoke-direct {p0}, Ljava/lang/Object;-><init>()V - return-void -.end method diff --git a/test/960-default-smali/smali/J.smali b/test/960-default-smali/smali/J.smali deleted file mode 100644 index 93f3d6231c..0000000000 --- a/test/960-default-smali/smali/J.smali +++ /dev/null @@ -1,29 +0,0 @@ -# /* -# * Copyright 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ - -.class public LJ; -.super LA; - -# class J extends A { -# } - - -.method public constructor <init>()V - .registers 1 - invoke-direct {p0}, LA;-><init>()V - return-void -.end method - diff --git a/test/960-default-smali/src/A.java b/test/960-default-smali/src/A.java new file mode 100644 index 0000000000..7664a263f1 --- /dev/null +++ b/test/960-default-smali/src/A.java @@ -0,0 +1,20 @@ +/* + * Copyright 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +class A implements Greeter { + public String SayHi() { + return "Hi "; + } +} diff --git a/test/960-default-smali/src/Attendant.java b/test/960-default-smali/src/Attendant.java new file mode 100644 index 0000000000..9f9a58a402 --- /dev/null +++ b/test/960-default-smali/src/Attendant.java @@ -0,0 +1,24 @@ +/* + * Copyright 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public interface Attendant { + public default String SayHi() { + return "welcome to " + GetPlace(); + } + public default String SayHiTwice() { + return SayHi() + SayHi(); + } + public String GetPlace(); +} diff --git a/test/960-default-smali/src/B.java b/test/960-default-smali/src/B.java new file mode 100644 index 0000000000..18aaadea4c --- /dev/null +++ b/test/960-default-smali/src/B.java @@ -0,0 +1,20 @@ +/* + * Copyright 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +class B implements Greeter2 { + public String SayHi() { + return "Hello "; + } +} diff --git a/test/960-default-smali/src/C.java b/test/960-default-smali/src/C.java new file mode 100644 index 0000000000..f0bc185f95 --- /dev/null +++ b/test/960-default-smali/src/C.java @@ -0,0 +1,20 @@ +/* + * Copyright 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +class C extends A { + public String SayHiTwice() { + return "You don't control me"; + } +} diff --git a/test/960-default-smali/src/D.java b/test/960-default-smali/src/D.java new file mode 100644 index 0000000000..b1697cd865 --- /dev/null +++ b/test/960-default-smali/src/D.java @@ -0,0 +1,20 @@ +/* + * Copyright 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +class D implements Greeter3 { + public String GetName() { + return "Alex "; + } +} diff --git a/test/960-default-smali/src/E.java b/test/960-default-smali/src/E.java new file mode 100644 index 0000000000..477cb6727c --- /dev/null +++ b/test/960-default-smali/src/E.java @@ -0,0 +1,20 @@ +/* + * Copyright 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +class E extends A implements Greeter2 { + public String SayHi() { + return "Hi2 "; + } +} diff --git a/test/960-default-smali/src/Extension.java b/test/960-default-smali/src/Extension.java new file mode 100644 index 0000000000..89617ddab6 --- /dev/null +++ b/test/960-default-smali/src/Extension.java @@ -0,0 +1,20 @@ +/* + * Copyright 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public interface Extension { + public default String SayHi() { + return "welcome "; + } +} diff --git a/test/960-default-smali/src/F.java b/test/960-default-smali/src/F.java new file mode 100644 index 0000000000..0282de7793 --- /dev/null +++ b/test/960-default-smali/src/F.java @@ -0,0 +1,23 @@ +/* + * Copyright 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +class F extends A implements Attendant { + public String GetPlace() { + return "android"; + } + public String SayHiTwice() { + return "We can override both interfaces"; + } +} diff --git a/test/960-default-smali/src/G.java b/test/960-default-smali/src/G.java new file mode 100644 index 0000000000..86a140aa43 --- /dev/null +++ b/test/960-default-smali/src/G.java @@ -0,0 +1,20 @@ +/* + * Copyright 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +class G implements Attendant { + public String GetPlace() { + return "android"; + } +} diff --git a/test/960-default-smali/src/Greeter.java b/test/960-default-smali/src/Greeter.java new file mode 100644 index 0000000000..cee2283acd --- /dev/null +++ b/test/960-default-smali/src/Greeter.java @@ -0,0 +1,21 @@ +/* + * Copyright 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public interface Greeter { + public String SayHi(); + public default String SayHiTwice() { + return SayHi() + SayHi(); + } +} diff --git a/test/960-default-smali/src/Greeter2.java b/test/960-default-smali/src/Greeter2.java new file mode 100644 index 0000000000..07f6c53841 --- /dev/null +++ b/test/960-default-smali/src/Greeter2.java @@ -0,0 +1,20 @@ +/* + * Copyright 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public interface Greeter2 extends Greeter { + public default String SayHiTwice() { + return "I say " + SayHi() + SayHi(); + } +} diff --git a/test/960-default-smali/src/Greeter3.java b/test/960-default-smali/src/Greeter3.java new file mode 100644 index 0000000000..bbb7171a36 --- /dev/null +++ b/test/960-default-smali/src/Greeter3.java @@ -0,0 +1,21 @@ +/* + * Copyright 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public interface Greeter3 extends Greeter { + public String GetName(); + public default String SayHi() { + return "Hello " + GetName(); + } +} diff --git a/test/960-default-smali/src/H.java b/test/960-default-smali/src/H.java new file mode 100644 index 0000000000..d87a6db8f4 --- /dev/null +++ b/test/960-default-smali/src/H.java @@ -0,0 +1,16 @@ +/* + * Copyright 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +class H implements Extension { } diff --git a/test/960-default-smali/src/I.java b/test/960-default-smali/src/I.java new file mode 100644 index 0000000000..8d6779cd27 --- /dev/null +++ b/test/960-default-smali/src/I.java @@ -0,0 +1,16 @@ +/* + * Copyright 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +class I extends A implements Greeter2 { } diff --git a/test/960-default-smali/src/J.java b/test/960-default-smali/src/J.java new file mode 100644 index 0000000000..a365e406c6 --- /dev/null +++ b/test/960-default-smali/src/J.java @@ -0,0 +1,16 @@ +/* + * Copyright 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +class J extends A { } diff --git a/test/960-default-smali/smali/classes.xml b/test/960-default-smali/src/classes.xml index 0aa41f7fb6..0aa41f7fb6 100644 --- a/test/960-default-smali/smali/classes.xml +++ b/test/960-default-smali/src/classes.xml diff --git a/test/961-default-iface-resolution-generated/build b/test/961-default-iface-resolution-generated/build index 005f76c2dc..ccebbe4ac9 100755 --- a/test/961-default-iface-resolution-generated/build +++ b/test/961-default-iface-resolution-generated/build @@ -26,32 +26,19 @@ restore_ulimit() { } trap 'restore_ulimit' ERR -mkdir -p ./smali - -# Generate the smali files and expected.txt or fail -./util-src/generate_smali.py ./smali ./expected.txt - -# Should we compile with Java source code. By default we will use Smali. -USES_JAVA_SOURCE="false" -if [[ $@ == *"--jvm"* ]]; then - USES_JAVA_SOURCE="true" -elif [[ $USE_JACK == "true" ]]; then - if "$JACK" -D jack.java.source.version=1.8 >& /dev/null; then - USES_JAVA_SOURCE="true" - else - echo "WARNING: Cannot use jack because it does not support JLS 1.8. Falling back to smali" >&2 - fi +if [[ $@ != *"--jvm"* ]]; then + # Don't do anything with jvm + # Hard-wired use of experimental jack. + # TODO: fix this temporary work-around for default-methods, see b/19467889 + export USE_JACK=true fi -if [[ "$USES_JAVA_SOURCE" == "true" ]]; then - # We are compiling java code, create it. - mkdir -p src - ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src - # Ignore the smali directory. - EXTRA_ARGS="--no-smali" -fi +mkdir -p ./src + +# Generate the smali files and expected.txt or fail +./util-src/generate_java.py ./src ./expected.txt -./default-build "$@" "$EXTRA_ARGS" --experimental default-methods +./default-build "$@" --experimental default-methods # Reset the ulimit back to its initial value restore_ulimit diff --git a/test/961-default-iface-resolution-generated/util-src/generate_smali.py b/test/961-default-iface-resolution-generated/util-src/generate_java.py index 921a096dd3..a205cd6ce0 100755 --- a/test/961-default-iface-resolution-generated/util-src/generate_smali.py +++ b/test/961-default-iface-resolution-generated/util-src/generate_java.py @@ -15,7 +15,7 @@ # limitations under the License. """ -Generate Smali test files for test 961. +Generate Java test files for test 961. """ import os @@ -43,48 +43,27 @@ import string # every possible interface tree up to 5 layers deep. MAX_IFACE_DEPTH = 5 -class MainClass(mixins.DumpMixin, mixins.Named, mixins.SmaliFileMixin): +class MainClass(mixins.DumpMixin, mixins.Named, mixins.JavaFileMixin): """ - A Main.smali file containing the Main class and the main function. It will run + A Main.java file containing the Main class and the main function. It will run all the test functions we have. """ MAIN_CLASS_TEMPLATE = """{copyright} - -.class public LMain; -.super Ljava/lang/Object; - -# class Main {{ - -.method public constructor <init>()V - .registers 1 - invoke-direct {{p0}}, Ljava/lang/Object;-><init>()V - return-void -.end method - +class Main {{ {test_groups} - {main_func} - -# }} +}} """ MAIN_FUNCTION_TEMPLATE = """ -# public static void main(String[] args) {{ -.method public static main([Ljava/lang/String;)V - .locals 2 - sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream; - + public static void main(String[] args) {{ {test_group_invoke} - - return-void -.end method -# }} + }} """ TEST_GROUP_INVOKE_TEMPLATE = """ -# {test_name}(); - invoke-static {{}}, {test_name}()V + {test_name}(); """ def __init__(self): @@ -114,7 +93,7 @@ class MainClass(mixins.DumpMixin, mixins.Named, mixins.SmaliFileMixin): def __str__(self): """ - Print the MainClass smali code. + Print the MainClass java code. """ all_tests = sorted(self.tests) test_invoke = "" @@ -125,7 +104,7 @@ class MainClass(mixins.DumpMixin, mixins.Named, mixins.SmaliFileMixin): test_invoke += self.TEST_GROUP_INVOKE_TEMPLATE.format(test_name=t.get_name()) main_func = self.MAIN_FUNCTION_TEMPLATE.format(test_group_invoke=test_invoke) - return self.MAIN_CLASS_TEMPLATE.format(copyright = get_copyright("smali"), + return self.MAIN_CLASS_TEMPLATE.format(copyright = get_copyright("java"), test_groups = test_groups, main_func = main_func) @@ -136,49 +115,18 @@ class Func(mixins.Named, mixins.NameComparableMixin): """ TEST_FUNCTION_TEMPLATE = """ -# public static void {fname}() {{ -# try {{ -# {farg} v = new {farg}(); -# System.out.printf("%s calls default method on %s\\n", -# v.CalledClassName(), -# v.CalledInterfaceName()); -# return; -# }} catch (Error e) {{ -# e.printStackTrace(System.out); -# return; -# }} -# }} -.method public static {fname}()V - .locals 7 - :call_{fname}_try_start - new-instance v6, L{farg}; - invoke-direct {{v6}}, L{farg};-><init>()V - - const/4 v0, 2 - new-array v1,v0, [Ljava/lang/Object; - const/4 v0, 0 - invoke-virtual {{v6}}, L{farg};->CalledClassName()Ljava/lang/String; - move-result-object v4 - aput-object v4,v1,v0 - - sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream; - const-string v3, "%s calls default method on %s\\n" - - invoke-virtual {{v6}}, L{farg};->CalledInterfaceName()Ljava/lang/String; - move-result-object v4 - const/4 v0, 1 - aput-object v4, v1, v0 - - invoke-virtual {{v2,v3,v1}}, Ljava/io/PrintStream;->printf(Ljava/lang/String;[Ljava/lang/Object;)Ljava/io/PrintStream; - return-void - :call_{fname}_try_end - .catch Ljava/lang/Error; {{:call_{fname}_try_start .. :call_{fname}_try_end}} :error_{fname}_start - :error_{fname}_start - move-exception v3 - sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream; - invoke-virtual {{v3,v2}}, Ljava/lang/Error;->printStackTrace(Ljava/io/PrintStream;)V - return-void -.end method + public static void {fname}() {{ + try {{ + {farg} v = new {farg}(); + System.out.printf("%s calls default method on %s\\n", + v.CalledClassName(), + v.CalledInterfaceName()); + return; + }} catch (Error e) {{ + e.printStackTrace(System.out); + return; + }} + }} """ def __init__(self, farg): @@ -202,38 +150,21 @@ class Func(mixins.Named, mixins.NameComparableMixin): def __str__(self): """ - Print the smali code of this function. + Print the java code of this function. """ return self.TEST_FUNCTION_TEMPLATE.format(fname=self.get_name(), farg=self.farg.get_name()) -class TestClass(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, mixins.SmaliFileMixin): +class TestClass(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, mixins.JavaFileMixin): """ A class that will be instantiated to test default method resolution order. """ TEST_CLASS_TEMPLATE = """{copyright} - -.class public L{class_name}; -.super Ljava/lang/Object; -.implements L{iface_name}; - -# public class {class_name} implements {iface_name} {{ -# public String CalledClassName() {{ -# return "{tree}"; -# }} -# }} - -.method public constructor <init>()V - .registers 1 - invoke-direct {{p0}}, Ljava/lang/Object;-><init>()V - return-void -.end method - -.method public CalledClassName()Ljava/lang/String; - .locals 1 - const-string v0, "{tree}" - return-object v0 -.end method +public class {class_name} implements {iface_name} {{ + public String CalledClassName() {{ + return "{tree}"; + }} +}} """ def __init__(self, iface): @@ -276,46 +207,30 @@ class TestClass(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, mixi def __str__(self): """ - Print the smali code of this class. + Print the java code of this class. """ - return self.TEST_CLASS_TEMPLATE.format(copyright = get_copyright('smali'), + return self.TEST_CLASS_TEMPLATE.format(copyright = get_copyright('java'), iface_name = self.iface.get_name(), tree = self.get_tree(), class_name = self.class_name) -class TestInterface(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, mixins.SmaliFileMixin): +class TestInterface(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, mixins.JavaFileMixin): """ An interface that will be used to test default method resolution order. """ TEST_INTERFACE_TEMPLATE = """{copyright} -.class public abstract interface L{class_name}; -.super Ljava/lang/Object; -{implements_spec} - -# public interface {class_name} {extends} {ifaces} {{ -# public String CalledClassName(); -.method public abstract CalledClassName()Ljava/lang/String; -.end method +public interface {class_name} {extends} {ifaces} {{ + public String CalledClassName(); {funcs} - -# }} +}} """ DEFAULT_FUNC_TEMPLATE = """ -# public default String CalledInterfaceName() {{ -# return "{tree}"; -# }} -.method public CalledInterfaceName()Ljava/lang/String; - .locals 1 - const-string v0, "{tree}" - return-object v0 -.end method -""" - - IMPLEMENTS_TEMPLATE = """ -.implements L{iface_name}; + public default String CalledInterfaceName() {{ + return "{tree}"; + }} """ def __init__(self, ifaces, default): @@ -357,12 +272,10 @@ class TestInterface(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, def __str__(self): """ - Print the smali code of this interface. + Print the java code of this interface. """ - s_ifaces = " " j_ifaces = " " for i in self.ifaces: - s_ifaces += self.IMPLEMENTS_TEMPLATE.format(iface_name = i.get_name()) j_ifaces += " {},".format(i.get_name()) j_ifaces = j_ifaces[0:-1] if self.default: @@ -371,8 +284,7 @@ class TestInterface(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, class_name = self.class_name) else: funcs = "" - return self.TEST_INTERFACE_TEMPLATE.format(copyright = get_copyright('smali'), - implements_spec = s_ifaces, + return self.TEST_INTERFACE_TEMPLATE.format(copyright = get_copyright('java'), extends = "extends" if len(self.ifaces) else "", ifaces = j_ifaces, funcs = funcs, @@ -451,16 +363,16 @@ def create_all_test_files(): return mc, classes def main(argv): - smali_dir = Path(argv[1]) - if not smali_dir.exists() or not smali_dir.is_dir(): - print("{} is not a valid smali dir".format(smali_dir), file=sys.stderr) + java_dir = Path(argv[1]) + if not java_dir.exists() or not java_dir.is_dir(): + print("{} is not a valid java dir".format(java_dir), file=sys.stderr) sys.exit(1) expected_txt = Path(argv[2]) mainclass, all_files = create_all_test_files() with expected_txt.open('w') as out: print(mainclass.get_expected(), file=out) for f in all_files: - f.dump(smali_dir) + f.dump(java_dir) if __name__ == '__main__': main(sys.argv) diff --git a/test/962-iface-static/build b/test/962-iface-static/build index e17272f769..0dd8573f54 100755 --- a/test/962-iface-static/build +++ b/test/962-iface-static/build @@ -17,24 +17,11 @@ # make us exit on a failure set -e -# Should we compile with Java source code. By default we will use Smali. -USES_JAVA_SOURCE="false" -if [[ $@ == *"--jvm"* ]]; then - USES_JAVA_SOURCE="true" -elif [[ "$USE_JACK" == "true" ]]; then - if $JACK -D jack.java.source.version=1.8 2>/dev/null; then - USES_JAVA_SOURCE="true" - else - echo "WARNING: Cannot use jack because it does not support JLS 1.8. Falling back to smali" >&2 - fi +if [[ $@ != *"--jvm"* ]]; then + # Don't do anything with jvm + # Hard-wired use of experimental jack. + # TODO: fix this temporary work-around for default-methods, see b/19467889 + export USE_JACK=true fi -if [[ "$USES_JAVA_SOURCE" == "true" ]]; then - # We are compiling java code, create it. - mkdir -p src - ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src - # Ignore the smali directory. - EXTRA_ARGS="--no-smali" -fi - -./default-build "$@" "$EXTRA_ARGS" --experimental default-methods +./default-build "$@" --experimental default-methods diff --git a/test/962-iface-static/smali/Displayer.smali b/test/962-iface-static/smali/Displayer.smali deleted file mode 100644 index ed4c013d3b..0000000000 --- a/test/962-iface-static/smali/Displayer.smali +++ /dev/null @@ -1,45 +0,0 @@ -# /* -# * Copyright (C) 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# -# public class Displayer { -# static { -# System.out.println("init"); -# } -# -# public Displayer() { -# System.out.println("constructor"); -# } -# } - -.class public LDisplayer; -.super Ljava/lang/Object; - -.method static constructor <clinit>()V - .locals 3 - sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream; - const-string v0, "init" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - return-void -.end method - -.method public constructor <init>()V - .locals 2 - invoke-direct {p0}, Ljava/lang/Object;-><init>()V - sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream; - const-string v0, "constructor" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - return-void -.end method diff --git a/test/962-iface-static/smali/Main.smali b/test/962-iface-static/smali/Main.smali deleted file mode 100644 index 72fa5e0e6e..0000000000 --- a/test/962-iface-static/smali/Main.smali +++ /dev/null @@ -1,40 +0,0 @@ -# /* -# * Copyright (C) 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# -# class Main { -# public static void main(String[] args) { -# System.out.println(iface.SayHi()); -# } -# } -.class public LMain; -.super Ljava/lang/Object; - -.method public constructor <init>()V - .registers 1 - invoke-direct {p0}, Ljava/lang/Object;-><init>()V - return-void -.end method - -.method public static main([Ljava/lang/String;)V - .locals 2 - sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream; - - invoke-static {}, Liface;->SayHi()Ljava/lang/String; - move-result-object v0 - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - return-void -.end method diff --git a/test/962-iface-static/smali/iface.smali b/test/962-iface-static/smali/iface.smali deleted file mode 100644 index 5b9c03ec46..0000000000 --- a/test/962-iface-static/smali/iface.smali +++ /dev/null @@ -1,43 +0,0 @@ -# /* -# * Copyright (C) 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# -# public interface iface { -# public static final Displayer f = new Displayer(); -# -# public static String SayHi() { -# return "Hello"; -# } -# } - -.class public abstract interface Liface; -.super Ljava/lang/Object; - -.field public final static f:LDisplayer; - -.method static constructor <clinit>()V - .locals 3 - new-instance v1, LDisplayer; - invoke-direct {v1}, LDisplayer;-><init>()V - sput-object v1, Liface;->f:LDisplayer; - return-void -.end method - -.method public static SayHi()Ljava/lang/String; - .locals 1 - const-string v0, "Hello" - return-object v0 -.end method - diff --git a/test/962-iface-static/src/Displayer.java b/test/962-iface-static/src/Displayer.java new file mode 100644 index 0000000000..5b28b3f86d --- /dev/null +++ b/test/962-iface-static/src/Displayer.java @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public class Displayer { + static { + System.out.println("init"); + } + public Displayer() { + System.out.println("constructor"); + } +} diff --git a/test/962-iface-static/src/Iface.java b/test/962-iface-static/src/Iface.java new file mode 100644 index 0000000000..82c7808a31 --- /dev/null +++ b/test/962-iface-static/src/Iface.java @@ -0,0 +1,21 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public interface Iface { + public static final Displayer f = new Displayer(); + public static String SayHi() { + return "Hello"; + } +} diff --git a/test/962-iface-static/src/Main.java b/test/962-iface-static/src/Main.java new file mode 100644 index 0000000000..7cb8eb7f7b --- /dev/null +++ b/test/962-iface-static/src/Main.java @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +class Main { + public static void main(String[] args) { + System.out.println(Iface.SayHi()); + } +} diff --git a/test/963-default-range-smali/build b/test/963-default-range-smali/build index e17272f769..0dd8573f54 100755 --- a/test/963-default-range-smali/build +++ b/test/963-default-range-smali/build @@ -17,24 +17,11 @@ # make us exit on a failure set -e -# Should we compile with Java source code. By default we will use Smali. -USES_JAVA_SOURCE="false" -if [[ $@ == *"--jvm"* ]]; then - USES_JAVA_SOURCE="true" -elif [[ "$USE_JACK" == "true" ]]; then - if $JACK -D jack.java.source.version=1.8 2>/dev/null; then - USES_JAVA_SOURCE="true" - else - echo "WARNING: Cannot use jack because it does not support JLS 1.8. Falling back to smali" >&2 - fi +if [[ $@ != *"--jvm"* ]]; then + # Don't do anything with jvm + # Hard-wired use of experimental jack. + # TODO: fix this temporary work-around for default-methods, see b/19467889 + export USE_JACK=true fi -if [[ "$USES_JAVA_SOURCE" == "true" ]]; then - # We are compiling java code, create it. - mkdir -p src - ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src - # Ignore the smali directory. - EXTRA_ARGS="--no-smali" -fi - -./default-build "$@" "$EXTRA_ARGS" --experimental default-methods +./default-build "$@" --experimental default-methods diff --git a/test/963-default-range-smali/smali/A.smali b/test/963-default-range-smali/smali/A.smali deleted file mode 100644 index b3d91dd76b..0000000000 --- a/test/963-default-range-smali/smali/A.smali +++ /dev/null @@ -1,29 +0,0 @@ -# /* -# * Copyright 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ - -.class public LA; -.super Ljava/lang/Object; -.implements Liface; - -# class A implements iface { -# } - -.method public constructor <init>()V - .registers 1 - invoke-direct {p0}, Ljava/lang/Object;-><init>()V - return-void -.end method - diff --git a/test/963-default-range-smali/smali/Main.smali b/test/963-default-range-smali/smali/Main.smali deleted file mode 100644 index 400fba72d9..0000000000 --- a/test/963-default-range-smali/smali/Main.smali +++ /dev/null @@ -1,77 +0,0 @@ -# /* -# * Copyright (C) 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# -# class Main { -# public static void main(String[] args) { -# A a = new A(); -# System.out.println(a.SayHi("a string 0", -# "a string 1", -# "a string 2", -# "a string 3", -# "a string 4", -# "a string 5", -# "a string 6", -# "a string 7", -# "a string 8", -# "a string 9")); -# iface b = (iface)a; -# System.out.println(b.SayHi("a string 0", -# "a string 1", -# "a string 2", -# "a string 3", -# "a string 4", -# "a string 5", -# "a string 6", -# "a string 7", -# "a string 8", -# "a string 9")); -# } -# } -.class public LMain; -.super Ljava/lang/Object; - -.method public constructor <init>()V - .registers 1 - invoke-direct {p0}, Ljava/lang/Object;-><init>()V - return-void -.end method - -.method public static main([Ljava/lang/String;)V - .locals 15 - sget-object v12, Ljava/lang/System;->out:Ljava/io/PrintStream; - - new-instance v1, LA; - invoke-direct {v1}, LA;-><init>()V - const-string v2, "a string 0" - const-string v3, "a string 1" - const-string v4, "a string 2" - const-string v5, "a string 3" - const-string v6, "a string 4" - const-string v7, "a string 5" - const-string v8, "a string 6" - const-string v9, "a string 7" - const-string v10, "a string 8" - const-string v11, "a string 9" - invoke-virtual/range {v1 .. v11}, LA;->SayHi(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; - move-result-object v0 - invoke-virtual {v12,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - invoke-interface/range {v1 .. v11}, Liface;->SayHi(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; - move-result-object v0 - invoke-virtual {v12,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - return-void -.end method diff --git a/test/963-default-range-smali/smali/iface.smali b/test/963-default-range-smali/smali/iface.smali deleted file mode 100644 index c2c3ce69a7..0000000000 --- a/test/963-default-range-smali/smali/iface.smali +++ /dev/null @@ -1,40 +0,0 @@ -# /* -# * Copyright (C) 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# -# public interface iface { -# public default String SayHi(String n1, -# String n2, -# String n3, -# String n4, -# String n5, -# String n6, -# String n7, -# String n8, -# String n9, -# String n0) { -# return "Hello"; -# } -# } - -.class public abstract interface Liface; -.super Ljava/lang/Object; - -.method public SayHi(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String; - .locals 1 - const-string v0, "Hello" - return-object v0 -.end method - diff --git a/test/963-default-range-smali/src/A.java b/test/963-default-range-smali/src/A.java new file mode 100644 index 0000000000..617eccba49 --- /dev/null +++ b/test/963-default-range-smali/src/A.java @@ -0,0 +1,16 @@ +/* + * Copyright 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +class A implements Iface { } diff --git a/test/963-default-range-smali/src/Iface.java b/test/963-default-range-smali/src/Iface.java new file mode 100644 index 0000000000..7556209b54 --- /dev/null +++ b/test/963-default-range-smali/src/Iface.java @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public interface Iface { + public default String SayHi(String n1, + String n2, + String n3, + String n4, + String n5, + String n6, + String n7, + String n8, + String n9, + String n0) { + return "Hello"; + } +} diff --git a/test/963-default-range-smali/src/Main.java b/test/963-default-range-smali/src/Main.java new file mode 100644 index 0000000000..841842dc12 --- /dev/null +++ b/test/963-default-range-smali/src/Main.java @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +class Main { + public static void main(String[] args) { + A a = new A(); + System.out.println(a.SayHi("a string 0", + "a string 1", + "a string 2", + "a string 3", + "a string 4", + "a string 5", + "a string 6", + "a string 7", + "a string 8", + "a string 9")); + Iface b = a; + System.out.println(b.SayHi("a string 0", + "a string 1", + "a string 2", + "a string 3", + "a string 4", + "a string 5", + "a string 6", + "a string 7", + "a string 8", + "a string 9")); + } +} diff --git a/test/964-default-iface-init-generated/build b/test/964-default-iface-init-generated/build index 0780da14e2..ccebbe4ac9 100755 --- a/test/964-default-iface-init-generated/build +++ b/test/964-default-iface-init-generated/build @@ -26,30 +26,19 @@ restore_ulimit() { } trap 'restore_ulimit' ERR -# Generate the smali files and expected.txt or fail -./util-src/generate_smali.py ./smali ./expected.txt - -# Should we compile with Java source code. By default we will use Smali. -USES_JAVA_SOURCE="false" -if [[ $@ == *"--jvm"* ]]; then - USES_JAVA_SOURCE="true" -elif [[ "$USE_JACK" == "true" ]]; then - if $JACK -D jack.java.source.version=1.8 2>/dev/null; then - USES_JAVA_SOURCE="true" - else - echo "WARNING: Cannot use jack because it does not support JLS 1.8. Falling back to smali" >&2 - fi +if [[ $@ != *"--jvm"* ]]; then + # Don't do anything with jvm + # Hard-wired use of experimental jack. + # TODO: fix this temporary work-around for default-methods, see b/19467889 + export USE_JACK=true fi -if [[ "$USES_JAVA_SOURCE" == "true" ]]; then - # We are compiling java code, create it. - mkdir -p src - ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src - # Ignore the smali directory. - EXTRA_ARGS="--no-smali" -fi +mkdir -p ./src + +# Generate the smali files and expected.txt or fail +./util-src/generate_java.py ./src ./expected.txt -./default-build "$@" "$EXTRA_ARGS" --experimental default-methods +./default-build "$@" --experimental default-methods # Reset the ulimit back to its initial value restore_ulimit diff --git a/test/964-default-iface-init-generated/smali/Displayer.smali b/test/964-default-iface-init-generated/smali/Displayer.smali deleted file mode 100644 index 91280a8a42..0000000000 --- a/test/964-default-iface-init-generated/smali/Displayer.smali +++ /dev/null @@ -1,45 +0,0 @@ -# /* -# * Copyright (C) 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# -# // This class is b/c java does not allow static {} blocks in interfaces. -# public class Displayer { -# public Displayer(String type) { -# System.out.println("initialization of " + type); -# } -# public void touch() { -# return; -# } -# } - -.class public LDisplayer; -.super Ljava/lang/Object; - -.method public constructor <init>(Ljava/lang/String;)V - .locals 2 - invoke-direct {p0}, Ljava/lang/Object;-><init>()V - const-string v0, "initialization of " - invoke-virtual {v0, p1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String; - move-result-object v0 - sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream; - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - return-void -.end method - -.method public touch()V - .locals 0 - return-void -.end method - diff --git a/test/964-default-iface-init-generated/src/Displayer.java b/test/964-default-iface-init-generated/src/Displayer.java new file mode 100644 index 0000000000..4be0ab2732 --- /dev/null +++ b/test/964-default-iface-init-generated/src/Displayer.java @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// This class is b/c java does not allow static {} blocks in interfaces. +public class Displayer { + public Displayer(String type) { + System.out.println("initialization of " + type); + } + public void touch() { + return; + } +} diff --git a/test/964-default-iface-init-generated/util-src/generate_smali.py b/test/964-default-iface-init-generated/util-src/generate_java.py index c0ba157109..b2df49f70e 100755 --- a/test/964-default-iface-init-generated/util-src/generate_smali.py +++ b/test/964-default-iface-init-generated/util-src/generate_java.py @@ -15,7 +15,7 @@ # limitations under the License. """ -Generate Smali test files for test 964. +Generate java test files for test 964. """ import os @@ -40,47 +40,27 @@ import string # The max depth the tree can have. MAX_IFACE_DEPTH = 3 -class MainClass(mixins.DumpMixin, mixins.Named, mixins.SmaliFileMixin): +class MainClass(mixins.DumpMixin, mixins.Named, mixins.JavaFileMixin): """ - A Main.smali file containing the Main class and the main function. It will run + A Main.java file containing the Main class and the main function. It will run all the test functions we have. """ MAIN_CLASS_TEMPLATE = """{copyright} - -.class public LMain; -.super Ljava/lang/Object; - -# class Main {{ - -.method public constructor <init>()V - .registers 1 - invoke-direct {{p0}}, Ljava/lang/Object;-><init>()V - return-void -.end method - +class Main {{ {test_groups} - {main_func} - -# }} +}} """ MAIN_FUNCTION_TEMPLATE = """ -# public static void main(String[] args) {{ -.method public static main([Ljava/lang/String;)V - .locals 2 - + public static void main(String[] args) {{ {test_group_invoke} - - return-void -.end method -# }} + }} """ TEST_GROUP_INVOKE_TEMPLATE = """ -# {test_name}(); - invoke-static {{}}, {test_name}()V + {test_name}(); """ def __init__(self): @@ -110,7 +90,7 @@ class MainClass(mixins.DumpMixin, mixins.Named, mixins.SmaliFileMixin): def __str__(self): """ - Print the smali code for this test. + Print the java code for this test. """ all_tests = sorted(self.tests) test_invoke = "" @@ -121,7 +101,7 @@ class MainClass(mixins.DumpMixin, mixins.Named, mixins.SmaliFileMixin): test_invoke += self.TEST_GROUP_INVOKE_TEMPLATE.format(test_name=t.get_name()) main_func = self.MAIN_FUNCTION_TEMPLATE.format(test_group_invoke=test_invoke) - return self.MAIN_CLASS_TEMPLATE.format(copyright = get_copyright('smali'), + return self.MAIN_CLASS_TEMPLATE.format(copyright = get_copyright('java'), test_groups = test_groups, main_func = main_func) @@ -132,46 +112,19 @@ class Func(mixins.Named, mixins.NameComparableMixin): """ TEST_FUNCTION_TEMPLATE = """ -# public static void {fname}() {{ -# try {{ -# System.out.println("About to initialize {tree}"); -# {farg} v = new {farg}(); -# System.out.println("Initialized {tree}"); -# v.touchAll(); -# System.out.println("All of {tree} hierarchy initialized"); -# return; -# }} catch (Error e) {{ -# e.printStackTrace(System.out); -# return; -# }} -# }} -.method public static {fname}()V - .locals 7 - :call_{fname}_try_start - sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream; - const-string v3, "About to initialize {tree}" - invoke-virtual {{v2, v3}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - new-instance v6, L{farg}; - invoke-direct {{v6}}, L{farg};-><init>()V - - const-string v3, "Initialized {tree}" - invoke-virtual {{v2, v3}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - invoke-virtual {{v6}}, L{farg};->touchAll()V - - const-string v3, "All of {tree} hierarchy initialized" - invoke-virtual {{v2, v3}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - return-void - :call_{fname}_try_end - .catch Ljava/lang/Error; {{:call_{fname}_try_start .. :call_{fname}_try_end}} :error_{fname}_start - :error_{fname}_start - move-exception v3 - sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream; - invoke-virtual {{v3,v2}}, Ljava/lang/Error;->printStackTrace(Ljava/io/PrintStream;)V - return-void -.end method + public static void {fname}() {{ + try {{ + System.out.println("About to initialize {tree}"); + {farg} v = new {farg}(); + System.out.println("Initialized {tree}"); + v.touchAll(); + System.out.println("All of {tree} hierarchy initialized"); + return; + }} catch (Error e) {{ + e.printStackTrace(System.out); + return; + }} + }} """ OUTPUT_FORMAT = """ @@ -190,7 +143,7 @@ All of {tree} hierarchy initialized def __str__(self): """ - Print the smali code for this test function. + Print the java code for this test function. """ return self.TEST_FUNCTION_TEMPLATE.format(fname=self.get_name(), farg=self.farg.get_name(), @@ -211,57 +164,26 @@ All of {tree} hierarchy initialized initialize_output = self.farg.get_initialize_output().strip(), touch_output = self.farg.get_touch_output().strip()) -class TestClass(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, mixins.SmaliFileMixin): +class TestClass(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, mixins.JavaFileMixin): """ A class that will be instantiated to test interface initialization order. """ TEST_CLASS_TEMPLATE = """{copyright} - -.class public L{class_name}; -.super Ljava/lang/Object; -{implements_spec} - -# public class {class_name} implements {ifaces} {{ -# -# public {class_name}() {{ -# }} -.method public constructor <init>()V - .locals 2 - invoke-direct {{p0}}, Ljava/lang/Object;-><init>()V - return-void -.end method - -# public void marker() {{ -# return; -# }} -.method public marker()V - .locals 0 - return-void -.end method - -# public void touchAll() {{ -.method public touchAll()V - .locals 2 - sget-object v0, Ljava/lang/System;->out:Ljava/io/PrintStream; - {touch_calls} - return-void -.end method -# }} -# }} -""" - - IMPLEMENTS_TEMPLATE = """ -.implements L{iface_name}; +public class {class_name} implements {ifaces} {{ + public void marker() {{ + return; + }} + + public void touchAll() {{ +{touch_calls} + }} +}} """ TOUCH_CALL_TEMPLATE = """ -# System.out.println("{class_name} touching {iface_name}"); -# {iface_name}.field.touch(); - const-string v1, "{class_name} touching {iface_name}" - invoke-virtual {{v0, v1}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - sget-object v1, L{iface_name};->field:LDisplayer; - invoke-virtual {{v1}}, LDisplayer;->touch()V + System.out.println("{class_name} touching {iface_name}"); + {iface_name}.field.touch(); """ TOUCH_OUTPUT_TEMPLATE = """ @@ -306,63 +228,32 @@ class TestClass(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, mixi def __str__(self): """ - Print the smali code for this class. + Print the java code for this class. """ - s_ifaces = '\n'.join(map(lambda a: self.IMPLEMENTS_TEMPLATE.format(iface_name = a.get_name()), - self.ifaces)) j_ifaces = ', '.join(map(lambda a: a.get_name(), self.ifaces)) touches = '\n'.join(map(lambda a: self.TOUCH_CALL_TEMPLATE.format(class_name = self.class_name, iface_name = a.get_name()), self.get_all_interfaces())) - return self.TEST_CLASS_TEMPLATE.format(copyright = get_copyright('smali'), - implements_spec = s_ifaces, + return self.TEST_CLASS_TEMPLATE.format(copyright = get_copyright('java'), ifaces = j_ifaces, class_name = self.class_name, touch_calls = touches) -class TestInterface(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, mixins.SmaliFileMixin): +class TestInterface(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, mixins.JavaFileMixin): """ An interface that will be used to test default method resolution order. """ TEST_INTERFACE_TEMPLATE = """{copyright} -.class public abstract interface L{class_name}; -.super Ljava/lang/Object; -{implements_spec} - -# public interface {class_name} {extends} {ifaces} {{ -# public static final Displayer field = new Displayer("{tree}"); -.field public final static field:LDisplayer; - -.method static constructor <clinit>()V - .locals 3 - const-string v2, "{tree}" - new-instance v1, LDisplayer; - invoke-direct {{v1, v2}}, LDisplayer;-><init>(Ljava/lang/String;)V - sput-object v1, L{class_name};->field:LDisplayer; - return-void -.end method - -# public void marker(); -.method public abstract marker()V -.end method - +public interface {class_name} {extends} {ifaces} {{ + public static final Displayer field = new Displayer("{tree}"); + public void marker(); {funcs} - -# }} +}} """ DEFAULT_FUNC_TEMPLATE = """ -# public default void {class_name}_DEFAULT_FUNC() {{ -# return; -# }} -.method public {class_name}_DEFAULT_FUNC()V - .locals 0 - return-void -.end method -""" - IMPLEMENTS_TEMPLATE = """ -.implements L{iface_name}; + public default void {class_name}_DEFAULT_FUNC() {{ return; }} """ OUTPUT_TEMPLATE = "initialization of {tree}" @@ -429,17 +320,14 @@ class TestInterface(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, def __str__(self): """ - Print the smali code for this interface. + Print the java code for this interface. """ - s_ifaces = '\n'.join(map(lambda a: self.IMPLEMENTS_TEMPLATE.format(iface_name = a.get_name()), - self.ifaces)) j_ifaces = ', '.join(map(lambda a: a.get_name(), self.ifaces)) if self.default: funcs = self.DEFAULT_FUNC_TEMPLATE.format(class_name = self.class_name) else: funcs = "" - return self.TEST_INTERFACE_TEMPLATE.format(copyright = get_copyright('smali'), - implements_spec = s_ifaces, + return self.TEST_INTERFACE_TEMPLATE.format(copyright = get_copyright('java'), extends = "extends" if len(self.ifaces) else "", ifaces = j_ifaces, funcs = funcs, @@ -516,16 +404,16 @@ def create_all_test_files(): return mc, classes def main(argv): - smali_dir = Path(argv[1]) - if not smali_dir.exists() or not smali_dir.is_dir(): - print("{} is not a valid smali dir".format(smali_dir), file=sys.stderr) + java_dir = Path(argv[1]) + if not java_dir.exists() or not java_dir.is_dir(): + print("{} is not a valid java dir".format(java_dir), file=sys.stderr) sys.exit(1) expected_txt = Path(argv[2]) mainclass, all_files = create_all_test_files() with expected_txt.open('w') as out: print(mainclass.get_expected(), file=out) for f in all_files: - f.dump(smali_dir) + f.dump(java_dir) if __name__ == '__main__': main(sys.argv) diff --git a/test/965-default-verify/build b/test/965-default-verify/build index 5ba54380df..0dd8573f54 100755 --- a/test/965-default-verify/build +++ b/test/965-default-verify/build @@ -17,32 +17,11 @@ # make us exit on a failure set -e -# Should we compile with Java source code. By default we will use Smali. -USES_JAVA_SOURCE="false" -if [[ $@ == *"--jvm"* ]]; then - USES_JAVA_SOURCE="true" -elif [[ "$USE_JACK" == "true" ]]; then - if $JACK -D jack.java.source.version=1.8 2>/dev/null; then - USES_JAVA_SOURCE="true" - else - echo "WARNING: Cannot use jack because it does not support JLS 1.8. Falling back to smali" >&2 - fi +if [[ $@ != *"--jvm"* ]]; then + # Don't do anything with jvm + # Hard-wired use of experimental jack. + # TODO: fix this temporary work-around for default-methods, see b/19467889 + export USE_JACK=true fi -if [[ "$USES_JAVA_SOURCE" == "true" ]]; then - # We are compiling Java code, create it. - mkdir -p src - mkdir -p src2 - ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src - # Move build-src to src and the src copies to src2. This is needed because of - # how our default build script works and we wanted the java and smali code - # to be the same in the smali files. - for f in `find ./build-src -type f -name "*.java" | xargs -i basename \{\}`; do - mv ./src/$f ./src2/$f - mv ./build-src/$f ./src/$f - done - # Ignore the smali directory. - EXTRA_ARGS="--no-smali" -fi - -./default-build "$@" "$EXTRA_ARGS" --experimental default-methods +./default-build "$@" --experimental default-methods diff --git a/test/965-default-verify/smali/Iface.smali b/test/965-default-verify/smali/Iface.smali deleted file mode 100644 index 74799a6cf3..0000000000 --- a/test/965-default-verify/smali/Iface.smali +++ /dev/null @@ -1,40 +0,0 @@ -# /* -# * Copyright (C) 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# -# public interface Iface { -# public default String sayHi() { -# return "Hello"; -# } -# -# public default void verificationSoftFail() { -# Statics.nonexistantFunction(); -# } -# } - -.class public abstract interface LIface; -.super Ljava/lang/Object; - -.method public sayHi()Ljava/lang/String; - .locals 1 - const-string v0, "Hello" - return-object v0 -.end method - -.method public verificationSoftFail()V - .locals 1 - invoke-static {}, LStatics;->nonexistantFunction()V - return-void -.end method diff --git a/test/965-default-verify/smali/Main.smali b/test/965-default-verify/smali/Main.smali deleted file mode 100644 index 8e9070692d..0000000000 --- a/test/965-default-verify/smali/Main.smali +++ /dev/null @@ -1,179 +0,0 @@ -# /* -# * Copyright (C) 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# -# class Main implements Iface { -# public static void main(String[] args) { -# System.out.println("Create Main instance"); -# Main m = new Main(); -# System.out.println("Calling functions on concrete Main"); -# callMain(m); -# System.out.println("Calling functions on interface Iface"); -# callIface(m); -# } -# -# public static void callMain(Main m) { -# System.out.println("Calling verifiable function on Main"); -# System.out.println(m.sayHi()); -# System.out.println("Calling unverifiable function on Main"); -# try { -# m.verificationSoftFail(); -# System.out.println("Unexpected no error Thrown on Main"); -# } catch (NoSuchMethodError e) { -# System.out.println("Expected NSME Thrown on Main"); -# } catch (Throwable e) { -# System.out.println("Unexpected Error Thrown on Main"); -# e.printStackTrace(System.out); -# } -# System.out.println("Calling verifiable function on Main"); -# System.out.println(m.sayHi()); -# return; -# } -# -# public static void callIface(Iface m) { -# System.out.println("Calling verifiable function on Iface"); -# System.out.println(m.sayHi()); -# System.out.println("Calling unverifiable function on Iface"); -# try { -# m.verificationSoftFail(); -# System.out.println("Unexpected no error Thrown on Iface"); -# } catch (NoSuchMethodError e) { -# System.out.println("Expected NSME Thrown on Iface"); -# } catch (Throwable e) { -# System.out.println("Unexpected Error Thrown on Iface"); -# e.printStackTrace(System.out); -# } -# System.out.println("Calling verifiable function on Iface"); -# System.out.println(m.sayHi()); -# return; -# } -# } - -.class public LMain; -.super Ljava/lang/Object; -.implements LIface; - -.method public constructor <init>()V - .registers 1 - invoke-direct {p0}, Ljava/lang/Object;-><init>()V - return-void -.end method - -.method public static main([Ljava/lang/String;)V - .locals 3 - sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream; - - const-string v0, "Create Main instance" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - new-instance v2, LMain; - invoke-direct {v2}, LMain;-><init>()V - - const-string v0, "Calling functions on concrete Main" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - invoke-static {v2}, LMain;->callMain(LMain;)V - - const-string v0, "Calling functions on interface Iface" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - invoke-static {v2}, LMain;->callIface(LIface;)V - - return-void -.end method - -.method public static callIface(LIface;)V - .locals 3 - sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream; - const-string v0, "Calling verifiable function on Iface" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - invoke-interface {p0}, LIface;->sayHi()Ljava/lang/String; - move-result-object v0 - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - const-string v0, "Calling unverifiable function on Iface" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - :try_start - invoke-interface {p0}, LIface;->verificationSoftFail()V - - const-string v0, "Unexpected no error Thrown on Iface" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - goto :error_end - :try_end - .catch Ljava/lang/NoSuchMethodError; {:try_start .. :try_end} :NSME_error_start - .catch Ljava/lang/Throwable; {:try_start .. :try_end} :other_error_start - :NSME_error_start - const-string v0, "Expected NSME Thrown on Iface" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - goto :error_end - :other_error_start - move-exception v2 - const-string v0, "Unexpected Error Thrown on Iface" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - invoke-virtual {v2,v1}, Ljava/lang/Throwable;->printStackTrace(Ljava/io/PrintStream;)V - goto :error_end - :error_end - const-string v0, "Calling verifiable function on Iface" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - invoke-interface {p0}, LIface;->sayHi()Ljava/lang/String; - move-result-object v0 - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - return-void -.end method - -.method public static callMain(LMain;)V - .locals 3 - sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream; - const-string v0, "Calling verifiable function on Main" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - invoke-virtual {p0}, LMain;->sayHi()Ljava/lang/String; - move-result-object v0 - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - const-string v0, "Calling unverifiable function on Main" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - :try_start - invoke-virtual {p0}, LMain;->verificationSoftFail()V - - const-string v0, "Unexpected no error Thrown on Main" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - goto :error_end - :try_end - .catch Ljava/lang/NoSuchMethodError; {:try_start .. :try_end} :NSME_error_start - .catch Ljava/lang/Throwable; {:try_start .. :try_end} :other_error_start - :NSME_error_start - const-string v0, "Expected NSME Thrown on Main" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - goto :error_end - :other_error_start - move-exception v2 - const-string v0, "Unexpected Error Thrown on Main" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - invoke-virtual {v2,v1}, Ljava/lang/Throwable;->printStackTrace(Ljava/io/PrintStream;)V - goto :error_end - :error_end - const-string v0, "Calling verifiable function on Main" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - invoke-virtual {p0}, LMain;->sayHi()Ljava/lang/String; - move-result-object v0 - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - return-void -.end method diff --git a/test/965-default-verify/smali/Statics.smali b/test/965-default-verify/smali/Statics.smali deleted file mode 100644 index 1e8cac034a..0000000000 --- a/test/965-default-verify/smali/Statics.smali +++ /dev/null @@ -1,30 +0,0 @@ -# /* -# * Copyright (C) 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# -# class Statics { -# // public static void nonexistantFunction() { -# // System.out.println("I don't exist"); -# // } -# } -# -.class public LStatics; -.super Ljava/lang/Object; - -.method public constructor <init>()V - .registers 1 - invoke-direct {p0}, Ljava/lang/Object;-><init>()V - return-void -.end method diff --git a/test/965-default-verify/src/Iface.java b/test/965-default-verify/src/Iface.java new file mode 100644 index 0000000000..180fba2833 --- /dev/null +++ b/test/965-default-verify/src/Iface.java @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public interface Iface { + public default String sayHi() { + return "Hello"; + } + public default void verificationSoftFail() { + Statics.nonexistantFunction(); + } +} diff --git a/test/965-default-verify/src/Main.java b/test/965-default-verify/src/Main.java new file mode 100644 index 0000000000..6374cb5aa0 --- /dev/null +++ b/test/965-default-verify/src/Main.java @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +class Main implements Iface { + public static void main(String[] args) { + System.out.println("Create Main instance"); + Main m = new Main(); + System.out.println("Calling functions on concrete Main"); + callMain(m); + System.out.println("Calling functions on interface Iface"); + callIface(m); + } + + public static void callMain(Main m) { + System.out.println("Calling verifiable function on Main"); + System.out.println(m.sayHi()); + System.out.println("Calling unverifiable function on Main"); + try { + m.verificationSoftFail(); + System.out.println("Unexpected no error Thrown on Main"); + } catch (NoSuchMethodError e) { + System.out.println("Expected NSME Thrown on Main"); + } catch (Throwable e) { + System.out.println("Unexpected Error Thrown on Main"); + e.printStackTrace(System.out); + } + System.out.println("Calling verifiable function on Main"); + System.out.println(m.sayHi()); + return; + } + + public static void callIface(Iface m) { + System.out.println("Calling verifiable function on Iface"); + System.out.println(m.sayHi()); + System.out.println("Calling unverifiable function on Iface"); + try { + m.verificationSoftFail(); + System.out.println("Unexpected no error Thrown on Iface"); + } catch (NoSuchMethodError e) { + System.out.println("Expected NSME Thrown on Iface"); + } catch (Throwable e) { + System.out.println("Unexpected Error Thrown on Iface"); + e.printStackTrace(System.out); + } + System.out.println("Calling verifiable function on Iface"); + System.out.println(m.sayHi()); + return; + } +} diff --git a/test/965-default-verify/build-src/Statics.java b/test/965-default-verify/src/Statics.java index 300aeecca7..2e17ba4174 100644 --- a/test/965-default-verify/build-src/Statics.java +++ b/test/965-default-verify/src/Statics.java @@ -16,7 +16,7 @@ class Statics { public static void nonexistantFunction() { - System.out.println("I don't exist"); + System.out.println("I don't exist"); } } diff --git a/test/965-default-verify/src2/Statics.java b/test/965-default-verify/src2/Statics.java new file mode 100644 index 0000000000..7899ca9c5e --- /dev/null +++ b/test/965-default-verify/src2/Statics.java @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +class Statics { + // public static void nonexistantFunction() { + // System.out.println("I don't exist"); + // } +} diff --git a/test/966-default-conflict/build b/test/966-default-conflict/build index e66e8409c6..0dd8573f54 100755 --- a/test/966-default-conflict/build +++ b/test/966-default-conflict/build @@ -17,18 +17,11 @@ # make us exit on a failure set -e -# TODO: Support running with jack. - -if [[ $@ == *"--jvm"* ]]; then - # Build the Java files if we are running a --jvm test - mkdir -p src - mkdir -p classes - ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src - # Build with the non-conflicting version - ${JAVAC} -implicit:none -d classes src/Iface.java build-src/Iface2.java src/Main.java - rm classes/Iface2.class - # Build with the conflicting version - ${JAVAC} -implicit:none -cp classes -d classes src/Iface2.java -else - ./default-build "$@" --experimental default-methods +if [[ $@ != *"--jvm"* ]]; then + # Don't do anything with jvm + # Hard-wired use of experimental jack. + # TODO: fix this temporary work-around for default-methods, see b/19467889 + export USE_JACK=true fi + +./default-build "$@" --experimental default-methods diff --git a/test/966-default-conflict/smali/Iface.smali b/test/966-default-conflict/smali/Iface.smali deleted file mode 100644 index e996b3a4f4..0000000000 --- a/test/966-default-conflict/smali/Iface.smali +++ /dev/null @@ -1,39 +0,0 @@ -# /* -# * Copyright (C) 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# -# public interface Iface { -# public default String sayHi() { -# return "Hi"; -# } -# public default String charge() { -# return "CHARGE"; -# } -# } - -.class public abstract interface LIface; -.super Ljava/lang/Object; - -.method public sayHi()Ljava/lang/String; - .locals 1 - const-string v0, "Hi" - return-object v0 -.end method - -.method public charge()Ljava/lang/String; - .locals 1 - const-string v0, "CHARGE" - return-object v0 -.end method diff --git a/test/966-default-conflict/smali/Iface2.smali b/test/966-default-conflict/smali/Iface2.smali deleted file mode 100644 index 82fa547dea..0000000000 --- a/test/966-default-conflict/smali/Iface2.smali +++ /dev/null @@ -1,31 +0,0 @@ -# /* -# * Copyright (C) 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# -# public interface Iface2 { -# public default String sayHi() { -# return "hello"; -# } -# } - -.class public abstract interface LIface2; -.super Ljava/lang/Object; - -.method public sayHi()Ljava/lang/String; - .locals 1 - const-string v0, "hello" - return-object v0 -.end method - diff --git a/test/966-default-conflict/smali/Main.smali b/test/966-default-conflict/smali/Main.smali deleted file mode 100644 index ce974d8135..0000000000 --- a/test/966-default-conflict/smali/Main.smali +++ /dev/null @@ -1,227 +0,0 @@ -# /* -# * Copyright (C) 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# -# class Main implements Iface, Iface2 { -# public static void main(String[] args) { -# System.out.println("Create Main instance"); -# Main m = new Main(); -# System.out.println("Calling functions on concrete Main"); -# callMain(m); -# System.out.println("Calling functions on interface Iface"); -# callIface(m); -# System.out.println("Calling functions on interface Iface2"); -# callIface2(m); -# } -# -# public static void callMain(Main m) { -# System.out.println("Calling non-conflicting function on Main"); -# System.out.println(m.charge()); -# System.out.println("Calling conflicting function on Main"); -# try { -# System.out.println(m.sayHi()); -# System.out.println("Unexpected no error Thrown on Main"); -# } catch (AbstractMethodError e) { -# System.out.println("Unexpected AME Thrown on Main"); -# } catch (IncompatibleClassChangeError e) { -# System.out.println("Expected ICCE Thrown on Main"); -# } -# System.out.println("Calling non-conflicting function on Main"); -# System.out.println(m.charge()); -# return; -# } -# -# public static void callIface(Iface m) { -# System.out.println("Calling non-conflicting function on Iface"); -# System.out.println(m.charge()); -# System.out.println("Calling conflicting function on Iface"); -# try { -# System.out.println(m.sayHi()); -# System.out.println("Unexpected no error Thrown on Iface"); -# } catch (AbstractMethodError e) { -# System.out.println("Unexpected AME Thrown on Iface"); -# } catch (IncompatibleClassChangeError e) { -# System.out.println("Expected ICCE Thrown on Iface"); -# } -# System.out.println("Calling non-conflicting function on Iface"); -# System.out.println(m.charge()); -# return; -# } -# -# public static void callIface2(Iface2 m) { -# System.out.println("Calling conflicting function on Iface2"); -# try { -# System.out.println(m.sayHi()); -# System.out.println("Unexpected no error Thrown on Iface2"); -# } catch (AbstractMethodError e) { -# System.out.println("Unexpected AME Thrown on Iface2"); -# } catch (IncompatibleClassChangeError e) { -# System.out.println("Expected ICCE Thrown on Iface2"); -# } -# return; -# } -# } - -.class public LMain; -.super Ljava/lang/Object; -.implements LIface; -.implements LIface2; - -.method public constructor <init>()V - .registers 1 - invoke-direct {p0}, Ljava/lang/Object;-><init>()V - return-void -.end method - -.method public static main([Ljava/lang/String;)V - .locals 3 - sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream; - - const-string v0, "Create Main instance" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - new-instance v2, LMain; - invoke-direct {v2}, LMain;-><init>()V - - const-string v0, "Calling functions on concrete Main" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - invoke-static {v2}, LMain;->callMain(LMain;)V - - const-string v0, "Calling functions on interface Iface" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - invoke-static {v2}, LMain;->callIface(LIface;)V - - const-string v0, "Calling functions on interface Iface2" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - invoke-static {v2}, LMain;->callIface2(LIface2;)V - - return-void -.end method - -.method public static callIface(LIface;)V - .locals 2 - sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream; - const-string v0, "Calling non-conflicting function on Iface" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - invoke-interface {p0}, LIface;->charge()Ljava/lang/String; - move-result-object v0 - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - const-string v0, "Calling conflicting function on Iface" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - :try_start - invoke-interface {p0}, LIface;->sayHi()Ljava/lang/String; - move-result-object v0 - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - const-string v0, "Unexpected no error Thrown on Iface" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - goto :error_end - :try_end - .catch Ljava/lang/AbstractMethodError; {:try_start .. :try_end} :AME_error_start - .catch Ljava/lang/IncompatibleClassChangeError; {:try_start .. :try_end} :ICCE_error_start - :AME_error_start - const-string v0, "Unexpected AME Thrown on Iface" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - goto :error_end - :ICCE_error_start - const-string v0, "Expected ICCE Thrown on Iface" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - goto :error_end - :error_end - const-string v0, "Calling non-conflicting function on Iface" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - invoke-interface {p0}, LIface;->charge()Ljava/lang/String; - move-result-object v0 - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - return-void -.end method - -.method public static callIface2(LIface2;)V - .locals 2 - sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream; - const-string v0, "Calling conflicting function on Iface2" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - :try_start - invoke-interface {p0}, LIface2;->sayHi()Ljava/lang/String; - move-result-object v0 - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - const-string v0, "Unexpected no error Thrown on Iface2" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - goto :error_end - :try_end - .catch Ljava/lang/AbstractMethodError; {:try_start .. :try_end} :AME_error_start - .catch Ljava/lang/IncompatibleClassChangeError; {:try_start .. :try_end} :ICCE_error_start - :AME_error_start - const-string v0, "Unexpected AME Thrown on Iface2" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - goto :error_end - :ICCE_error_start - const-string v0, "Expected ICCE Thrown on Iface2" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - goto :error_end - :error_end - - return-void -.end method - -.method public static callMain(LMain;)V - .locals 2 - sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream; - const-string v0, "Calling non-conflicting function on Main" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - invoke-virtual {p0}, LMain;->charge()Ljava/lang/String; - move-result-object v0 - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - const-string v0, "Calling conflicting function on Main" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - :try_start - invoke-virtual {p0}, LMain;->sayHi()Ljava/lang/String; - move-result-object v0 - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - const-string v0, "Unexpected no error Thrown on Main" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - goto :error_end - :try_end - .catch Ljava/lang/AbstractMethodError; {:try_start .. :try_end} :AME_error_start - .catch Ljava/lang/IncompatibleClassChangeError; {:try_start .. :try_end} :ICCE_error_start - :AME_error_start - const-string v0, "Unexpected AME Thrown on Main" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - goto :error_end - :ICCE_error_start - const-string v0, "Expected ICCE Thrown on Main" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - goto :error_end - :error_end - const-string v0, "Calling non-conflicting function on Main" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - invoke-virtual {p0}, LMain;->charge()Ljava/lang/String; - move-result-object v0 - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - return-void -.end method diff --git a/test/966-default-conflict/src/Iface.java b/test/966-default-conflict/src/Iface.java new file mode 100644 index 0000000000..2131ed878d --- /dev/null +++ b/test/966-default-conflict/src/Iface.java @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public interface Iface { + public default String sayHi() { + return "Hi"; + } + public default String charge() { + return "CHARGE"; + } +} diff --git a/test/966-default-conflict/build-src/Iface2.java b/test/966-default-conflict/src/Iface2.java index 8d97df8385..8d97df8385 100644 --- a/test/966-default-conflict/build-src/Iface2.java +++ b/test/966-default-conflict/src/Iface2.java diff --git a/test/966-default-conflict/src/Main.java b/test/966-default-conflict/src/Main.java new file mode 100644 index 0000000000..ce8cb47209 --- /dev/null +++ b/test/966-default-conflict/src/Main.java @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +class Main implements Iface, Iface2 { + public static void main(String[] args) { + System.out.println("Create Main instance"); + Main m = new Main(); + System.out.println("Calling functions on concrete Main"); + callMain(m); + System.out.println("Calling functions on interface Iface"); + callIface(m); + System.out.println("Calling functions on interface Iface2"); + callIface2(m); + } + public static void callMain(Main m) { + System.out.println("Calling non-conflicting function on Main"); + System.out.println(m.charge()); + System.out.println("Calling conflicting function on Main"); + try { + System.out.println(m.sayHi()); + System.out.println("Unexpected no error Thrown on Main"); + } catch (AbstractMethodError e) { + System.out.println("Unexpected AME Thrown on Main"); + } catch (IncompatibleClassChangeError e) { + System.out.println("Expected ICCE Thrown on Main"); + } + System.out.println("Calling non-conflicting function on Main"); + System.out.println(m.charge()); + return; + } + public static void callIface(Iface m) { + System.out.println("Calling non-conflicting function on Iface"); + System.out.println(m.charge()); + System.out.println("Calling conflicting function on Iface"); + try { + System.out.println(m.sayHi()); + System.out.println("Unexpected no error Thrown on Iface"); + } catch (AbstractMethodError e) { + System.out.println("Unexpected AME Thrown on Iface"); + } catch (IncompatibleClassChangeError e) { + System.out.println("Expected ICCE Thrown on Iface"); + } + System.out.println("Calling non-conflicting function on Iface"); + System.out.println(m.charge()); + return; + } + public static void callIface2(Iface2 m) { + System.out.println("Calling conflicting function on Iface2"); + try { + System.out.println(m.sayHi()); + System.out.println("Unexpected no error Thrown on Iface2"); + } catch (AbstractMethodError e) { + System.out.println("Unexpected AME Thrown on Iface2"); + } catch (IncompatibleClassChangeError e) { + System.out.println("Expected ICCE Thrown on Iface2"); + } + return; + } +} diff --git a/test/966-default-conflict/src2/Iface2.java b/test/966-default-conflict/src2/Iface2.java new file mode 100644 index 0000000000..d29033cd93 --- /dev/null +++ b/test/966-default-conflict/src2/Iface2.java @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public interface Iface2 { + public default String sayHi() { + return "hello"; + } +} diff --git a/test/967-default-ame/build b/test/967-default-ame/build index 53001a9ad2..0dd8573f54 100755 --- a/test/967-default-ame/build +++ b/test/967-default-ame/build @@ -17,19 +17,11 @@ # make us exit on a failure set -e -# TODO: Support running with jack. - -if [[ $@ == *"--jvm"* ]]; then - # Build the Java files if we are running a --jvm test - mkdir -p src - mkdir -p classes - ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src - # Build with the non-conflicting version - ${JAVAC} -implicit:none -d classes src/Iface.java build-src/Iface2.java build-src/Iface3.java src/Main.java - rm classes/Iface2.class - rm classes/Iface3.class - # Build with the conflicting version - ${JAVAC} -implicit:none -cp classes -d classes src/Iface2.java src/Iface3.java -else - ./default-build "$@" --experimental default-methods +if [[ $@ != *"--jvm"* ]]; then + # Don't do anything with jvm + # Hard-wired use of experimental jack. + # TODO: fix this temporary work-around for default-methods, see b/19467889 + export USE_JACK=true fi + +./default-build "$@" --experimental default-methods diff --git a/test/967-default-ame/smali/Iface.smali b/test/967-default-ame/smali/Iface.smali deleted file mode 100644 index e996b3a4f4..0000000000 --- a/test/967-default-ame/smali/Iface.smali +++ /dev/null @@ -1,39 +0,0 @@ -# /* -# * Copyright (C) 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# -# public interface Iface { -# public default String sayHi() { -# return "Hi"; -# } -# public default String charge() { -# return "CHARGE"; -# } -# } - -.class public abstract interface LIface; -.super Ljava/lang/Object; - -.method public sayHi()Ljava/lang/String; - .locals 1 - const-string v0, "Hi" - return-object v0 -.end method - -.method public charge()Ljava/lang/String; - .locals 1 - const-string v0, "CHARGE" - return-object v0 -.end method diff --git a/test/967-default-ame/smali/Iface2.smali b/test/967-default-ame/smali/Iface2.smali deleted file mode 100644 index a21a8ddbc7..0000000000 --- a/test/967-default-ame/smali/Iface2.smali +++ /dev/null @@ -1,27 +0,0 @@ -# /* -# * Copyright (C) 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# -# public interface Iface2 extends Iface { -# public String sayHi(); -# } - -.class public abstract interface LIface2; -.super Ljava/lang/Object; -.implements LIface; - -.method public abstract sayHi()Ljava/lang/String; -.end method - diff --git a/test/967-default-ame/smali/Iface3.smali b/test/967-default-ame/smali/Iface3.smali deleted file mode 100644 index 874e96d069..0000000000 --- a/test/967-default-ame/smali/Iface3.smali +++ /dev/null @@ -1,26 +0,0 @@ -# /* -# * Copyright (C) 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# -# public interface Iface3 { -# public String charge(); -# } - -.class public abstract interface LIface3; -.super Ljava/lang/Object; - -.method public abstract charge()Ljava/lang/String; -.end method - diff --git a/test/967-default-ame/smali/Main.smali b/test/967-default-ame/smali/Main.smali deleted file mode 100644 index e4d63cfa24..0000000000 --- a/test/967-default-ame/smali/Main.smali +++ /dev/null @@ -1,228 +0,0 @@ -# /* -# * Copyright (C) 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# -# class Main implements Iface, Iface2, Iface3 { -# public static void main(String[] args) { -# System.out.println("Create Main instance"); -# Main m = new Main(); -# System.out.println("Calling functions on concrete Main"); -# callMain(m); -# System.out.println("Calling functions on interface Iface"); -# callIface(m); -# System.out.println("Calling functions on interface Iface2"); -# callIface2(m); -# } -# -# public static void callMain(Main m) { -# System.out.println("Calling non-abstract function on Main"); -# System.out.println(m.charge()); -# System.out.println("Calling abstract function on Main"); -# try { -# System.out.println(m.sayHi()); -# System.out.println("Unexpected no error Thrown on Main"); -# } catch (AbstractMethodError e) { -# System.out.println("Expected AME Thrown on Main"); -# } catch (IncompatibleClassChangeError e) { -# System.out.println("Unexpected ICCE Thrown on Main"); -# } -# System.out.println("Calling non-abstract function on Main"); -# System.out.println(m.charge()); -# return; -# } -# -# public static void callIface(Iface m) { -# System.out.println("Calling non-abstract function on Iface"); -# System.out.println(m.charge()); -# System.out.println("Calling abstract function on Iface"); -# try { -# System.out.println(m.sayHi()); -# System.out.println("Unexpected no error Thrown on Iface"); -# } catch (AbstractMethodError e) { -# System.out.println("Expected AME Thrown on Iface"); -# } catch (IncompatibleClassChangeError e) { -# System.out.println("Unexpected ICCE Thrown on Iface"); -# } -# System.out.println("Calling non-abstract function on Iface"); -# System.out.println(m.charge()); -# return; -# } -# -# public static void callIface2(Iface2 m) { -# System.out.println("Calling abstract function on Iface2"); -# try { -# System.out.println(m.sayHi()); -# System.out.println("Unexpected no error Thrown on Iface2"); -# } catch (AbstractMethodError e) { -# System.out.println("Expected AME Thrown on Iface2"); -# } catch (IncompatibleClassChangeError e) { -# System.out.println("Unexpected ICCE Thrown on Iface2"); -# } -# return; -# } -# } - -.class public LMain; -.super Ljava/lang/Object; -.implements LIface; -.implements LIface2; -.implements LIface3; - -.method public constructor <init>()V - .registers 1 - invoke-direct {p0}, Ljava/lang/Object;-><init>()V - return-void -.end method - -.method public static main([Ljava/lang/String;)V - .locals 3 - sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream; - - const-string v0, "Create Main instance" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - new-instance v2, LMain; - invoke-direct {v2}, LMain;-><init>()V - - const-string v0, "Calling functions on concrete Main" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - invoke-static {v2}, LMain;->callMain(LMain;)V - - const-string v0, "Calling functions on interface Iface" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - invoke-static {v2}, LMain;->callIface(LIface;)V - - const-string v0, "Calling functions on interface Iface2" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - invoke-static {v2}, LMain;->callIface2(LIface2;)V - - return-void -.end method - -.method public static callIface(LIface;)V - .locals 2 - sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream; - const-string v0, "Calling non-abstract function on Iface" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - invoke-interface {p0}, LIface;->charge()Ljava/lang/String; - move-result-object v0 - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - const-string v0, "Calling abstract function on Iface" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - :try_start - invoke-interface {p0}, LIface;->sayHi()Ljava/lang/String; - move-result-object v0 - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - const-string v0, "Unexpected no error Thrown on Iface" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - goto :error_end - :try_end - .catch Ljava/lang/AbstractMethodError; {:try_start .. :try_end} :AME_error_start - .catch Ljava/lang/IncompatibleClassChangeError; {:try_start .. :try_end} :ICCE_error_start - :AME_error_start - const-string v0, "Expected AME Thrown on Iface" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - goto :error_end - :ICCE_error_start - const-string v0, "Unexpected ICCE Thrown on Iface" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - goto :error_end - :error_end - const-string v0, "Calling non-abstract function on Iface" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - invoke-interface {p0}, LIface;->charge()Ljava/lang/String; - move-result-object v0 - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - return-void -.end method - -.method public static callIface2(LIface2;)V - .locals 2 - sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream; - const-string v0, "Calling abstract function on Iface2" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - :try_start - invoke-interface {p0}, LIface2;->sayHi()Ljava/lang/String; - move-result-object v0 - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - const-string v0, "Unexpected no error Thrown on Iface2" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - goto :error_end - :try_end - .catch Ljava/lang/AbstractMethodError; {:try_start .. :try_end} :AME_error_start - .catch Ljava/lang/IncompatibleClassChangeError; {:try_start .. :try_end} :ICCE_error_start - :AME_error_start - const-string v0, "Expected AME Thrown on Iface2" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - goto :error_end - :ICCE_error_start - const-string v0, "Unexpected ICCE Thrown on Iface2" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - goto :error_end - :error_end - - return-void -.end method - -.method public static callMain(LMain;)V - .locals 2 - sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream; - const-string v0, "Calling non-abstract function on Main" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - invoke-virtual {p0}, LMain;->charge()Ljava/lang/String; - move-result-object v0 - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - const-string v0, "Calling abstract function on Main" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - :try_start - invoke-virtual {p0}, LMain;->sayHi()Ljava/lang/String; - move-result-object v0 - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - const-string v0, "Unexpected no error Thrown on Main" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - goto :error_end - :try_end - .catch Ljava/lang/AbstractMethodError; {:try_start .. :try_end} :AME_error_start - .catch Ljava/lang/IncompatibleClassChangeError; {:try_start .. :try_end} :ICCE_error_start - :AME_error_start - const-string v0, "Expected AME Thrown on Main" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - goto :error_end - :ICCE_error_start - const-string v0, "Unexpected ICCE Thrown on Main" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - goto :error_end - :error_end - const-string v0, "Calling non-abstract function on Main" - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - invoke-virtual {p0}, LMain;->charge()Ljava/lang/String; - move-result-object v0 - invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - return-void -.end method diff --git a/test/967-default-ame/src/Iface.java b/test/967-default-ame/src/Iface.java new file mode 100644 index 0000000000..2131ed878d --- /dev/null +++ b/test/967-default-ame/src/Iface.java @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public interface Iface { + public default String sayHi() { + return "Hi"; + } + public default String charge() { + return "CHARGE"; + } +} diff --git a/test/967-default-ame/build-src/Iface2.java b/test/967-default-ame/src/Iface2.java index 55b2ac01b0..55b2ac01b0 100644 --- a/test/967-default-ame/build-src/Iface2.java +++ b/test/967-default-ame/src/Iface2.java diff --git a/test/967-default-ame/build-src/Iface3.java b/test/967-default-ame/src/Iface3.java index a6faa451e5..a6faa451e5 100644 --- a/test/967-default-ame/build-src/Iface3.java +++ b/test/967-default-ame/src/Iface3.java diff --git a/test/967-default-ame/src/Main.java b/test/967-default-ame/src/Main.java new file mode 100644 index 0000000000..3e48062aba --- /dev/null +++ b/test/967-default-ame/src/Main.java @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +class Main implements Iface, Iface2, Iface3 { + public static void main(String[] args) { + System.out.println("Create Main instance"); + Main m = new Main(); + System.out.println("Calling functions on concrete Main"); + callMain(m); + System.out.println("Calling functions on interface Iface"); + callIface(m); + System.out.println("Calling functions on interface Iface2"); + callIface2(m); + } + public static void callMain(Main m) { + System.out.println("Calling non-abstract function on Main"); + System.out.println(m.charge()); + System.out.println("Calling abstract function on Main"); + try { + System.out.println(m.sayHi()); + System.out.println("Unexpected no error Thrown on Main"); + } catch (AbstractMethodError e) { + System.out.println("Expected AME Thrown on Main"); + } catch (IncompatibleClassChangeError e) { + System.out.println("Unexpected ICCE Thrown on Main"); + } + System.out.println("Calling non-abstract function on Main"); + System.out.println(m.charge()); + return; + } + public static void callIface(Iface m) { + System.out.println("Calling non-abstract function on Iface"); + System.out.println(m.charge()); + System.out.println("Calling abstract function on Iface"); + try { + System.out.println(m.sayHi()); + System.out.println("Unexpected no error Thrown on Iface"); + } catch (AbstractMethodError e) { + System.out.println("Expected AME Thrown on Iface"); + } catch (IncompatibleClassChangeError e) { + System.out.println("Unexpected ICCE Thrown on Iface"); + } + System.out.println("Calling non-abstract function on Iface"); + System.out.println(m.charge()); + return; + } + public static void callIface2(Iface2 m) { + System.out.println("Calling abstract function on Iface2"); + try { + System.out.println(m.sayHi()); + System.out.println("Unexpected no error Thrown on Iface2"); + } catch (AbstractMethodError e) { + System.out.println("Expected AME Thrown on Iface2"); + } catch (IncompatibleClassChangeError e) { + System.out.println("Unexpected ICCE Thrown on Iface2"); + } + return; + } +} diff --git a/test/967-default-ame/src2/Iface.java b/test/967-default-ame/src2/Iface.java new file mode 100644 index 0000000000..2131ed878d --- /dev/null +++ b/test/967-default-ame/src2/Iface.java @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public interface Iface { + public default String sayHi() { + return "Hi"; + } + public default String charge() { + return "CHARGE"; + } +} diff --git a/test/967-default-ame/src2/Iface2.java b/test/967-default-ame/src2/Iface2.java new file mode 100644 index 0000000000..0e4fb5f2aa --- /dev/null +++ b/test/967-default-ame/src2/Iface2.java @@ -0,0 +1,18 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public interface Iface2 extends Iface { + public String sayHi(); +} diff --git a/test/967-default-ame/src2/Iface3.java b/test/967-default-ame/src2/Iface3.java new file mode 100644 index 0000000000..70fc33ba93 --- /dev/null +++ b/test/967-default-ame/src2/Iface3.java @@ -0,0 +1,18 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public interface Iface3 { + public String charge(); +} diff --git a/test/969-iface-super/build b/test/969-iface-super/build index b1ef320d05..e8f4ed084a 100755 --- a/test/969-iface-super/build +++ b/test/969-iface-super/build @@ -17,27 +17,14 @@ # make us exit on a failure set -e -# Should we compile with Java source code. By default we will use Smali. -USES_JAVA_SOURCE="false" -if [[ $@ == *"--jvm"* ]]; then - USES_JAVA_SOURCE="true" -elif [[ "$USE_JACK" == "true" ]]; then - if $JACK -D jack.java.source.version=1.8 2>/dev/null; then - USES_JAVA_SOURCE="true" - else - echo "WARNING: Cannot use jack because it does not support JLS 1.8. Falling back to smali" >&2 - fi +if [[ $@ != *"--jvm"* ]]; then + # Don't do anything with jvm + # Hard-wired use of experimental jack. + # TODO: fix this temporary work-around for default-methods, see b/19467889 + export USE_JACK=true fi -# Generate the smali Main.smali file or fail -${ANDROID_BUILD_TOP}/art/test/utils/python/generate_smali_main.py ./smali +# Generate the Main.java file or fail +${ANDROID_BUILD_TOP}/art/test/utils/python/generate_java_main.py ./src -if [[ "$USES_JAVA_SOURCE" == "true" ]]; then - # We are compiling java code, create it. - mkdir -p src - ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src - # Ignore the smali directory. - EXTRA_ARGS="--no-smali" -fi - -./default-build "$@" "$EXTRA_ARGS" --experimental default-methods +./default-build "$@" --experimental default-methods diff --git a/test/969-iface-super/expected.txt b/test/969-iface-super/expected.txt index f310d10e12..f7a63d6b08 100644 --- a/test/969-iface-super/expected.txt +++ b/test/969-iface-super/expected.txt @@ -1,39 +1,39 @@ Testing for type A A-virtual A.SayHi()='Hello ' -A-interface iface.SayHi()='Hello ' +A-interface Iface.SayHi()='Hello ' End testing for type A Testing for type B B-virtual B.SayHi()='Hello Hello ' -B-interface iface.SayHi()='Hello Hello ' -B-interface iface2.SayHi()='Hello Hello ' +B-interface Iface.SayHi()='Hello Hello ' +B-interface Iface2.SayHi()='Hello Hello ' End testing for type B Testing for type C C-virtual C.SayHi()='Hello and welcome ' -C-interface iface.SayHi()='Hello and welcome ' +C-interface Iface.SayHi()='Hello and welcome ' End testing for type C Testing for type D D-virtual D.SayHi()='Hello Hello and welcome ' -D-interface iface.SayHi()='Hello Hello and welcome ' -D-interface iface2.SayHi()='Hello Hello and welcome ' +D-interface Iface.SayHi()='Hello Hello and welcome ' +D-interface Iface2.SayHi()='Hello Hello and welcome ' End testing for type D Testing for type E E-virtual E.SayHi()='Hello there!' -E-interface iface.SayHi()='Hello there!' -E-interface iface3.SayHi()='Hello there!' +E-interface Iface.SayHi()='Hello there!' +E-interface Iface3.SayHi()='Hello there!' End testing for type E Testing for type F F-virtual E.SayHi()='Hello there!' F-virtual F.SayHi()='Hello there!' -F-interface iface.SayHi()='Hello there!' -F-interface iface3.SayHi()='Hello there!' +F-interface Iface.SayHi()='Hello there!' +F-interface Iface3.SayHi()='Hello there!' F-virtual F.SaySurprisedHi()='Hello there!!' End testing for type F Testing for type G G-virtual E.SayHi()='Hello there!?' G-virtual F.SayHi()='Hello there!?' G-virtual G.SayHi()='Hello there!?' -G-interface iface.SayHi()='Hello there!?' -G-interface iface3.SayHi()='Hello there!?' +G-interface Iface.SayHi()='Hello there!?' +G-interface Iface3.SayHi()='Hello there!?' G-virtual F.SaySurprisedHi()='Hello there!!' G-virtual G.SaySurprisedHi()='Hello there!!' G-virtual G.SayVerySurprisedHi()='Hello there!!!' @@ -42,6 +42,6 @@ Testing for type H H-virtual H.SayConfusedHi()='Hello ?!' H-virtual A.SayHi()='Hello ?' H-virtual H.SayHi()='Hello ?' -H-interface iface.SayHi()='Hello ?' +H-interface Iface.SayHi()='Hello ?' H-virtual H.SaySurprisedHi()='Hello !' End testing for type H diff --git a/test/969-iface-super/smali/A.smali b/test/969-iface-super/smali/A.smali deleted file mode 100644 index e7760a1062..0000000000 --- a/test/969-iface-super/smali/A.smali +++ /dev/null @@ -1,28 +0,0 @@ -# /* -# * Copyright (C) 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# -# public class A implements iface { -# } - -.class public LA; -.super Ljava/lang/Object; -.implements Liface; - -.method public constructor <init>()V - .registers 1 - invoke-direct {p0}, Ljava/lang/Object;-><init>()V - return-void -.end method diff --git a/test/969-iface-super/smali/B.smali b/test/969-iface-super/smali/B.smali deleted file mode 100644 index e529d0534f..0000000000 --- a/test/969-iface-super/smali/B.smali +++ /dev/null @@ -1,28 +0,0 @@ -# /* -# * Copyright (C) 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# -# public class B implements iface2 { -# } - -.class public LB; -.super Ljava/lang/Object; -.implements Liface2; - -.method public constructor <init>()V - .registers 1 - invoke-direct {p0}, Ljava/lang/Object;-><init>()V - return-void -.end method diff --git a/test/969-iface-super/smali/C.smali b/test/969-iface-super/smali/C.smali deleted file mode 100644 index 6fbb0c4b0e..0000000000 --- a/test/969-iface-super/smali/C.smali +++ /dev/null @@ -1,41 +0,0 @@ -# /* -# * Copyright (C) 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# -# public class C implements iface { -# public String SayHi() { -# return iface.super.SayHi() + " and welcome "; -# } -# } - -.class public LC; -.super Ljava/lang/Object; -.implements Liface; - -.method public constructor <init>()V - .registers 1 - invoke-direct {p0}, Ljava/lang/Object;-><init>()V - return-void -.end method - -.method public SayHi()Ljava/lang/String; - .locals 2 - invoke-super {p0}, Liface;->SayHi()Ljava/lang/String; - move-result-object v0 - const-string v1, " and welcome " - invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String; - move-result-object v0 - return-object v0 -.end method diff --git a/test/969-iface-super/smali/D.smali b/test/969-iface-super/smali/D.smali deleted file mode 100644 index ecd4629584..0000000000 --- a/test/969-iface-super/smali/D.smali +++ /dev/null @@ -1,41 +0,0 @@ -# /* -# * Copyright (C) 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# -# public class D implements iface2 { -# public String SayHi() { -# return iface2.super.SayHi() + " and welcome "; -# } -# } - -.class public LD; -.super Ljava/lang/Object; -.implements Liface2; - -.method public constructor <init>()V - .registers 1 - invoke-direct {p0}, Ljava/lang/Object;-><init>()V - return-void -.end method - -.method public SayHi()Ljava/lang/String; - .locals 2 - invoke-super {p0}, Liface2;->SayHi()Ljava/lang/String; - move-result-object v0 - const-string v1, " and welcome " - invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String; - move-result-object v0 - return-object v0 -.end method diff --git a/test/969-iface-super/smali/E.smali b/test/969-iface-super/smali/E.smali deleted file mode 100644 index 558aaea5d6..0000000000 --- a/test/969-iface-super/smali/E.smali +++ /dev/null @@ -1,41 +0,0 @@ -# /* -# * Copyright (C) 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# -# public class E implements iface3 { -# public String SayHi() { -# return iface3.super.SayHi() + " there!"; -# } -# } - -.class public LE; -.super Ljava/lang/Object; -.implements Liface3; - -.method public constructor <init>()V - .registers 1 - invoke-direct {p0}, Ljava/lang/Object;-><init>()V - return-void -.end method - -.method public SayHi()Ljava/lang/String; - .locals 2 - invoke-super {p0}, Liface3;->SayHi()Ljava/lang/String; - move-result-object v0 - const-string v1, " there!" - invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String; - move-result-object v0 - return-object v0 -.end method diff --git a/test/969-iface-super/smali/F.smali b/test/969-iface-super/smali/F.smali deleted file mode 100644 index c402d5cdc9..0000000000 --- a/test/969-iface-super/smali/F.smali +++ /dev/null @@ -1,40 +0,0 @@ -# /* -# * Copyright (C) 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# -# public class F extends E { -# public String SaySurprisedHi() { -# return super.SayHi() + "!"; -# } -# } - -.class public LF; -.super LE; - -.method public constructor <init>()V - .registers 1 - invoke-direct {p0}, LE;-><init>()V - return-void -.end method - -.method public SaySurprisedHi()Ljava/lang/String; - .registers 2 - invoke-super {p0}, LE;->SayHi()Ljava/lang/String; - move-result-object v0 - const-string v1, "!" - invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String; - move-result-object v0 - return-object v0 -.end method diff --git a/test/969-iface-super/smali/G.smali b/test/969-iface-super/smali/G.smali deleted file mode 100644 index 45705e6d86..0000000000 --- a/test/969-iface-super/smali/G.smali +++ /dev/null @@ -1,53 +0,0 @@ -# /* -# * Copyright (C) 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# -# public class G extends F { -# public String SayHi() { -# return super.SayHi() + "?"; -# } -# public String SayVerySurprisedHi() { -# return super.SaySurprisedHi() + "!"; -# } -# } - -.class public LG; -.super LF; - -.method public constructor <init>()V - .registers 1 - invoke-direct {p0}, LF;-><init>()V - return-void -.end method - -.method public SayHi()Ljava/lang/String; - .locals 2 - invoke-super {p0}, LF;->SayHi()Ljava/lang/String; - move-result-object v0 - const-string v1, "?" - invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String; - move-result-object v0 - return-object v0 -.end method - -.method public SayVerySurprisedHi()Ljava/lang/String; - .locals 2 - invoke-super {p0}, LF;->SaySurprisedHi()Ljava/lang/String; - move-result-object v0 - const-string v1, "!" - invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String; - move-result-object v0 - return-object v0 -.end method diff --git a/test/969-iface-super/smali/H.smali b/test/969-iface-super/smali/H.smali deleted file mode 100644 index 12f246b8c3..0000000000 --- a/test/969-iface-super/smali/H.smali +++ /dev/null @@ -1,66 +0,0 @@ -# /* -# * Copyright (C) 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# -# public class H extends A { -# public String SayHi() { -# return super.SayHi() + "?"; -# } -# public String SaySurprisedHi() { -# return super.SayHi() + "!"; -# } -# public String SayConfusedHi() { -# return SayHi() + "!"; -# } -# } - -.class public LH; -.super LA; - -.method public constructor <init>()V - .registers 1 - invoke-direct {p0}, LA;-><init>()V - return-void -.end method - -.method public SayHi()Ljava/lang/String; - .locals 2 - invoke-super {p0}, LA;->SayHi()Ljava/lang/String; - move-result-object v0 - const-string v1, "?" - invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String; - move-result-object v0 - return-object v0 -.end method - -.method public SaySurprisedHi()Ljava/lang/String; - .locals 2 - invoke-super {p0}, LA;->SayHi()Ljava/lang/String; - move-result-object v0 - const-string v1, "!" - invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String; - move-result-object v0 - return-object v0 -.end method - -.method public SayConfusedHi()Ljava/lang/String; - .locals 2 - invoke-virtual {p0}, LH;->SayHi()Ljava/lang/String; - move-result-object v0 - const-string v1, "!" - invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String; - move-result-object v0 - return-object v0 -.end method diff --git a/test/969-iface-super/smali/iface.smali b/test/969-iface-super/smali/iface.smali deleted file mode 100644 index 08bb93dd0c..0000000000 --- a/test/969-iface-super/smali/iface.smali +++ /dev/null @@ -1,30 +0,0 @@ -# /* -# * Copyright (C) 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# -# public interface iface { -# public default String SayHi() { -# return "Hello "; -# } -# } - -.class public abstract interface Liface; -.super Ljava/lang/Object; - -.method public SayHi()Ljava/lang/String; - .locals 1 - const-string v0, "Hello " - return-object v0 -.end method diff --git a/test/969-iface-super/smali/iface2.smali b/test/969-iface-super/smali/iface2.smali deleted file mode 100644 index ce6f86432d..0000000000 --- a/test/969-iface-super/smali/iface2.smali +++ /dev/null @@ -1,36 +0,0 @@ -# /* -# * Copyright (C) 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# -# public interface iface2 extends iface { -# public default String SayHi() { -# return iface.super.SayHi() + iface.super.SayHi(); -# } -# } - -.class public abstract interface Liface2; -.super Ljava/lang/Object; -.implements Liface; - -.method public SayHi()Ljava/lang/String; - .locals 2 - invoke-super {p0}, Liface;->SayHi()Ljava/lang/String; - move-result-object v0 - invoke-super {p0}, Liface;->SayHi()Ljava/lang/String; - move-result-object v1 - invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String; - move-result-object v0 - return-object v0 -.end method diff --git a/test/969-iface-super/smali/iface3.smali b/test/969-iface-super/smali/iface3.smali deleted file mode 100644 index bf200364ec..0000000000 --- a/test/969-iface-super/smali/iface3.smali +++ /dev/null @@ -1,22 +0,0 @@ -# /* -# * Copyright (C) 2015 The Android Open Source Project -# * -# * Licensed under the Apache License, Version 2.0 (the "License"); -# * you may not use this file except in compliance with the License. -# * You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# -# public interface iface3 extends iface { -# } - -.class public abstract interface Liface3; -.super Ljava/lang/Object; -.implements Liface; diff --git a/test/969-iface-super/src/A.java b/test/969-iface-super/src/A.java new file mode 100644 index 0000000000..47db14ba84 --- /dev/null +++ b/test/969-iface-super/src/A.java @@ -0,0 +1,16 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public class A implements Iface { } diff --git a/test/969-iface-super/src/B.java b/test/969-iface-super/src/B.java new file mode 100644 index 0000000000..70f63a237a --- /dev/null +++ b/test/969-iface-super/src/B.java @@ -0,0 +1,16 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public class B implements Iface2 { } diff --git a/test/969-iface-super/src/C.java b/test/969-iface-super/src/C.java new file mode 100644 index 0000000000..0fa0b9280b --- /dev/null +++ b/test/969-iface-super/src/C.java @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public class C implements Iface { + public String SayHi() { + return Iface.super.SayHi() + " and welcome "; + } +} diff --git a/test/969-iface-super/src/D.java b/test/969-iface-super/src/D.java new file mode 100644 index 0000000000..8a607c3adf --- /dev/null +++ b/test/969-iface-super/src/D.java @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public class D implements Iface2 { + public String SayHi() { + return Iface2.super.SayHi() + " and welcome "; + } +} diff --git a/test/969-iface-super/src/E.java b/test/969-iface-super/src/E.java new file mode 100644 index 0000000000..d5942b22b6 --- /dev/null +++ b/test/969-iface-super/src/E.java @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public class E implements Iface3 { + public String SayHi() { + return Iface3.super.SayHi() + " there!"; + } +} diff --git a/test/969-iface-super/src/F.java b/test/969-iface-super/src/F.java new file mode 100644 index 0000000000..610bcb158a --- /dev/null +++ b/test/969-iface-super/src/F.java @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public class F extends E { + public String SaySurprisedHi() { + return super.SayHi() + "!"; + } +} diff --git a/test/969-iface-super/src/G.java b/test/969-iface-super/src/G.java new file mode 100644 index 0000000000..edaf3a9b11 --- /dev/null +++ b/test/969-iface-super/src/G.java @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public class G extends F { + public String SayHi() { + return super.SayHi() + "?"; + } + public String SayVerySurprisedHi() { + return super.SaySurprisedHi() + "!"; + } +} diff --git a/test/969-iface-super/src/H.java b/test/969-iface-super/src/H.java new file mode 100644 index 0000000000..744bda6f82 --- /dev/null +++ b/test/969-iface-super/src/H.java @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public class H extends A { + public String SayHi() { + return super.SayHi() + "?"; + } + public String SaySurprisedHi() { + return super.SayHi() + "!"; + } + public String SayConfusedHi() { + return SayHi() + "!"; + } +} diff --git a/test/969-iface-super/src/Iface.java b/test/969-iface-super/src/Iface.java new file mode 100644 index 0000000000..ece5e592de --- /dev/null +++ b/test/969-iface-super/src/Iface.java @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public interface Iface { + public default String SayHi() { + return "Hello "; + } +} diff --git a/test/969-iface-super/src/Iface2.java b/test/969-iface-super/src/Iface2.java new file mode 100644 index 0000000000..d74ee6ddf5 --- /dev/null +++ b/test/969-iface-super/src/Iface2.java @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public interface Iface2 extends Iface { + public default String SayHi() { + return Iface.super.SayHi() + Iface.super.SayHi(); + } +} diff --git a/test/969-iface-super/src/Iface3.java b/test/969-iface-super/src/Iface3.java new file mode 100644 index 0000000000..10b010cb3b --- /dev/null +++ b/test/969-iface-super/src/Iface3.java @@ -0,0 +1,16 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +public interface Iface3 extends Iface { } diff --git a/test/969-iface-super/smali/classes.xml b/test/969-iface-super/src/classes.xml index 4d205bd606..4c3dae4fa0 100644 --- a/test/969-iface-super/smali/classes.xml +++ b/test/969-iface-super/src/classes.xml @@ -18,35 +18,35 @@ <classes> <class name="A" super="java/lang/Object"> <implements> - <item>iface</item> + <item>Iface</item> </implements> <methods> </methods> </class> <class name="B" super="java/lang/Object"> <implements> - <item>iface2</item> + <item>Iface2</item> </implements> <methods> </methods> </class> <class name="C" super="java/lang/Object"> <implements> - <item>iface</item> + <item>Iface</item> </implements> <methods> </methods> </class> <class name="D" super="java/lang/Object"> <implements> - <item>iface2</item> + <item>Iface2</item> </implements> <methods> </methods> </class> <class name="E" super="java/lang/Object"> <implements> - <item>iface3</item> + <item>Iface3</item> </implements> <methods> </methods> </class> @@ -75,23 +75,23 @@ </classes> <interfaces> - <interface name="iface" super="java/lang/Object"> + <interface name="Iface" super="java/lang/Object"> <implements> </implements> <methods> <item>SayHi</item> </methods> </interface> - <interface name="iface2" super="java/lang/Object"> + <interface name="Iface2" super="java/lang/Object"> <implements> - <item>iface</item> + <item>Iface</item> </implements> <methods> </methods> </interface> - <interface name="iface3" super="java/lang/Object"> + <interface name="Iface3" super="java/lang/Object"> <implements> - <item>iface</item> + <item>Iface</item> </implements> <methods> </methods> </interface> diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk index 19b535858f..7036bdcaf5 100644 --- a/test/Android.run-test.mk +++ b/test/Android.run-test.mk @@ -224,11 +224,12 @@ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES), # Disable 097-duplicate-method while investigation (broken by latest Jack release, b/27358065) # Disable 137-cfi (b/27391690). # Disable 536-checker-needs-access-check and 537-checker-inline-and-unverified (b/27425061) +# Disable 577-profile-foreign-dex (b/27454772). TEST_ART_BROKEN_ALL_TARGET_TESTS := \ 097-duplicate-method \ - 137-cfi \ 536-checker-needs-access-check \ 537-checker-inline-and-unverified \ + 577-profile-foreign-dex \ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \ $(COMPILER_TYPES), $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \ @@ -540,7 +541,9 @@ endif TEST_ART_BROKEN_OPTIMIZING_DEBUGGABLE_RUN_TESTS := # Tests that should fail in the read barrier configuration with the interpreter. -TEST_ART_BROKEN_INTERPRETER_READ_BARRIER_RUN_TESTS := +# 145: Test sometimes times out in read barrier configuration (b/27467554). +TEST_ART_BROKEN_INTERPRETER_READ_BARRIER_RUN_TESTS := \ + 145-alloc-tracking-stress # Tests that should fail in the read barrier configuration with the default (Quick) compiler (AOT). # Quick has no support for read barriers and punts to the interpreter, so this list is composed of @@ -550,6 +553,7 @@ TEST_ART_BROKEN_DEFAULT_READ_BARRIER_RUN_TESTS := \ $(TEST_ART_BROKEN_INTERPRETER_RUN_TESTS) # Tests that should fail in the read barrier configuration with the Optimizing compiler (AOT). +# 145: Test sometimes times out in read barrier configuration (b/27467554). # 484: Baker's fast path based read barrier compiler instrumentation generates code containing # more parallel moves on x86, thus some Checker assertions may fail. # 527: On ARM64, the read barrier instrumentation does not support the HArm64IntermediateAddress @@ -557,12 +561,15 @@ TEST_ART_BROKEN_DEFAULT_READ_BARRIER_RUN_TESTS := \ # 537: Expects an array copy to be intrinsified on x86-64, but calling-on-slowpath intrinsics are # not yet handled in the read barrier configuration. TEST_ART_BROKEN_OPTIMIZING_READ_BARRIER_RUN_TESTS := \ + 145-alloc-tracking-stress \ 484-checker-register-hints \ 527-checker-array-access-split \ 537-checker-arraycopy # Tests that should fail in the read barrier configuration with JIT (Optimizing compiler). -TEST_ART_BROKEN_JIT_READ_BARRIER_RUN_TESTS := +# 145: Test sometimes times out in read barrier configuration (b/27467554). +TEST_ART_BROKEN_JIT_READ_BARRIER_RUN_TESTS := \ + 145-alloc-tracking-stress ifeq ($(ART_USE_READ_BARRIER),true) ifneq (,$(filter interpreter,$(COMPILER_TYPES))) diff --git a/test/etc/default-build b/test/etc/default-build index 6e855ec30a..5f78496c3f 100755 --- a/test/etc/default-build +++ b/test/etc/default-build @@ -116,28 +116,33 @@ if ! [ "${HAS_SRC}" = "true" ] && ! [ "${HAS_SRC2}" = "true" ]; then SKIP_DX_MERGER="true" fi -if [ "${HAS_SRC_MULTIDEX}" = "true" ]; then - # Jack does not support this configuration unless we specify how to partition the DEX file - # with a .jpp file. - USE_JACK="false" -fi - if [ ${USE_JACK} = "true" ]; then # Jack toolchain if [ "${HAS_SRC}" = "true" ]; then - ${JACK} ${JACK_ARGS} --output-jack src.jack src - imported_jack_files="--import src.jack" + if [ "${HAS_SRC_MULTIDEX}" = "true" ]; then + # Compile src and src-multidex in the same .jack file. We will apply multidex partitioning + # when creating the output .dex file. + ${JACK} ${JACK_ARGS} --output-jack src.jack src src src-multidex + jack_extra_args="${jack_extra_args} -D jack.dex.output.policy=minimal-multidex" + jack_extra_args="${jack_extra_args} -D jack.preprocessor=true" + jack_extra_args="${jack_extra_args} -D jack.preprocessor.file=multidex.jpp" + else + ${JACK} ${JACK_ARGS} --output-jack src.jack src + fi + jack_extra_args="${jack_extra_args} --import src.jack" fi if [ "${HAS_SRC2}" = "true" ]; then ${JACK} ${JACK_ARGS} --output-jack src2.jack src2 - imported_jack_files="--import src2.jack ${imported_jack_files}" + # In case of duplicate classes, we want to take into account the classes from src2. Therefore + # we apply the 'keep-first' policy and import src2.jack file *before* the src.jack file. + jack_extra_args="${jack_extra_args} -D jack.import.type.policy=keep-first" + jack_extra_args="--import src2.jack ${jack_extra_args}" fi - # Compile jack files into a DEX file. We set jack.import.type.policy=keep-first to consider - # class definitions from src2 first. + # Compile jack files into a DEX file. if [ "${HAS_SRC}" = "true" ] || [ "${HAS_SRC2}" = "true" ]; then - ${JACK} ${JACK_ARGS} ${imported_jack_files} -D jack.import.type.policy=keep-first --output-dex . + ${JACK} ${JACK_ARGS} ${jack_extra_args} --output-dex . fi else # Legacy toolchain with javac+dx diff --git a/test/utils/python/generate_smali_main.py b/test/utils/python/generate_java_main.py index d796d313c6..f66d0dd372 100755 --- a/test/utils/python/generate_smali_main.py +++ b/test/utils/python/generate_java_main.py @@ -15,7 +15,7 @@ # limitations under the License. """ -Generate Smali Main file from a classes.xml file. +Generate Java Main file from a classes.xml file. """ import os @@ -38,48 +38,27 @@ import itertools import functools import xml.etree.ElementTree as ET -class MainClass(mixins.DumpMixin, mixins.Named, mixins.SmaliFileMixin): +class MainClass(mixins.DumpMixin, mixins.Named, mixins.JavaFileMixin): """ A mainclass and main method for this test. """ MAIN_CLASS_TEMPLATE = """{copyright} -.class public LMain; -.super Ljava/lang/Object; - -# class Main {{ - -.method public constructor <init>()V - .registers 1 - invoke-direct {{p0}}, Ljava/lang/Object;-><init>()V - return-void -.end method - +class Main {{ {test_groups} - {test_funcs} - {main_func} - -# }} +}} """ MAIN_FUNCTION_TEMPLATE = """ -# public static void main(String[] args) {{ -.method public static main([Ljava/lang/String;)V - .locals 2 - sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream; - + public static void main(String[] args) {{ {test_group_invoke} - - return-void -.end method -# }} + }} """ TEST_GROUP_INVOKE_TEMPLATE = """ -# {test_name}(); - invoke-static {{}}, {test_name}()V + {test_name}(); """ def __init__(self): @@ -123,7 +102,7 @@ class MainClass(mixins.DumpMixin, mixins.Named, mixins.SmaliFileMixin): funcs = "" for f in self.global_funcs: funcs += str(f) - return self.MAIN_CLASS_TEMPLATE.format(copyright = get_copyright('smali'), + return self.MAIN_CLASS_TEMPLATE.format(copyright = get_copyright('java'), test_groups=test_groups, main_func=main_func, test_funcs=funcs) @@ -135,33 +114,19 @@ class InstanceTest(mixins.Named, mixins.NameComparableMixin): """ INSTANCE_TEST_TEMPLATE = """ -# public static void {test_name}() {{ -# System.out.println("Testing for type {ty}"); -# String s = "{ty}"; -# {ty} v = new {ty}(); -.method public static {test_name}()V - .locals 3 - sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream; - const-string v0, "Testing for type {ty}" - invoke-virtual {{v2,v0}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - - const-string v0, "{ty}" - new-instance v1, L{ty}; - invoke-direct {{v1}}, L{ty};-><init>()V + public static void {test_name}() {{ + System.out.println("Testing for type {ty}"); + String s = "{ty}"; + {ty} v = new {ty}(); {invokes} - const-string v0, "End testing for type {ty}" - invoke-virtual {{v2,v0}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V - return-void -.end method -# System.out.println("End testing for type {ty}"); -# }} + System.out.println("End testing for type {ty}"); + }} """ TEST_INVOKE_TEMPLATE = """ -# {fname}(s, v); - invoke-static {{v0, v1}}, {fname}(Ljava/lang/String;L{farg};)V + {fname}(s, v); """ def __init__(self, main, ty): @@ -188,7 +153,7 @@ class InstanceTest(mixins.Named, mixins.NameComparableMixin): def __str__(self): """ - Returns the smali code for this function + Returns the java code for this function """ func_invokes = "" for f in sorted(self.funcs, key=lambda a: (a.func, a.farg)): @@ -204,47 +169,15 @@ class Func(mixins.Named, mixins.NameComparableMixin): """ TEST_FUNCTION_TEMPLATE = """ -# public static void {fname}(String s, {farg} v) {{ -# try {{ -# System.out.printf("%s-{invoke_type:<9} {farg:>9}.{callfunc}()='%s'\\n", s, v.{callfunc}()); -# return; -# }} catch (Error e) {{ -# System.out.printf("%s-{invoke_type} on {farg}: {callfunc}() threw exception!\\n", s); -# e.printStackTrace(System.out); -# }} -# }} -.method public static {fname}(Ljava/lang/String;L{farg};)V - .locals 7 - :call_{fname}_try_start - const/4 v0, 2 - new-array v1,v0, [Ljava/lang/Object; - const/4 v0, 0 - aput-object p0,v1,v0 - - sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream; - const-string v3, "%s-{invoke_type:<9} {farg:>9}.{callfunc}()='%s'\\n" - - invoke-{invoke_type} {{p1}}, L{farg};->{callfunc}()Ljava/lang/String; - move-result-object v4 - const/4 v0, 1 - aput-object v4, v1, v0 - - invoke-virtual {{v2,v3,v1}}, Ljava/io/PrintStream;->printf(Ljava/lang/String;[Ljava/lang/Object;)Ljava/io/PrintStream; - return-void - :call_{fname}_try_end - .catch Ljava/lang/Error; {{:call_{fname}_try_start .. :call_{fname}_try_end}} :error_{fname}_start - :error_{fname}_start - move-exception v3 - const/4 v0, 1 - new-array v1,v0, [Ljava/lang/Object; - const/4 v0, 0 - aput-object p0, v1, v0 - sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream; - const-string v4, "%s-{invoke_type} on {farg}: {callfunc}() threw exception!\\n" - invoke-virtual {{v2,v4,v1}}, Ljava/io/PrintStream;->printf(Ljava/lang/String;[Ljava/lang/Object;)Ljava/io/PrintStream; - invoke-virtual {{v3,v2}}, Ljava/lang/Error;->printStackTrace(Ljava/io/PrintStream;)V - return-void -.end method + public static void {fname}(String s, {farg} v) {{ + try {{ + System.out.printf("%s-{invoke_type:<9} {farg:>9}.{callfunc}()='%s'\\n", s, v.{callfunc}()); + return; + }} catch (Error e) {{ + System.out.printf("%s-{invoke_type} on {farg}: {callfunc}() threw exception!\\n", s); + e.printStackTrace(System.out); + }} + }} """ def __init__(self, func, farg, invoke): @@ -263,7 +196,7 @@ class Func(mixins.Named, mixins.NameComparableMixin): def __str__(self): """ - Get the smali code for this test function + Get the java code for this test function """ return self.TEST_FUNCTION_TEMPLATE.format(fname=self.get_name(), farg=self.farg, @@ -307,7 +240,7 @@ def flatten_interface_methods(dat, i): def make_main_class(dat): """ - Creates a Main.smali file that runs all the tests + Creates a Main.java file that runs all the tests """ m = MainClass() for c in dat.classes.values(): @@ -365,12 +298,12 @@ def parse_xml(xml): return TestData(classes, ifaces) def main(argv): - smali_dir = Path(argv[1]) - if not smali_dir.exists() or not smali_dir.is_dir(): - print("{} is not a valid smali dir".format(smali_dir), file=sys.stderr) + java_dir = Path(argv[1]) + if not java_dir.exists() or not java_dir.is_dir(): + print("{} is not a valid java dir".format(java_dir), file=sys.stderr) sys.exit(1) - class_data = parse_xml((smali_dir / "classes.xml").open().read()) - make_main_class(class_data).dump(smali_dir) + class_data = parse_xml((java_dir / "classes.xml").open().read()) + make_main_class(class_data).dump(java_dir) if __name__ == '__main__': main(sys.argv) diff --git a/test/valgrind-suppressions.txt b/test/valgrind-suppressions.txt new file mode 100644 index 0000000000..acab6e5135 --- /dev/null +++ b/test/valgrind-suppressions.txt @@ -0,0 +1,15 @@ +{ + b/27596582 + Memcheck:Cond + fun:index + fun:expand_dynamic_string_token + fun:_dl_map_object + fun:map_doit + fun:_dl_catch_error + fun:do_preload + fun:dl_main + fun:_dl_sysdep_start + fun:_dl_start_final + fun:_dl_start + obj:/lib/x86_64-linux-gnu/ld-2.19.so +} diff --git a/tools/Android.mk b/tools/Android.mk index 9a96f7a6e7..bc2fd8c53c 100644 --- a/tools/Android.mk +++ b/tools/Android.mk @@ -19,21 +19,14 @@ LOCAL_PATH := $(call my-dir) # Copy the art shell script to the host's bin directory include $(CLEAR_VARS) LOCAL_IS_HOST_MODULE := true -LOCAL_MODULE_TAGS := optional LOCAL_MODULE_CLASS := EXECUTABLES LOCAL_MODULE := art -include $(BUILD_SYSTEM)/base_rules.mk -$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/art $(ACP) - @echo "Copy: $(PRIVATE_MODULE) ($@)" - $(copy-file-to-new-target) - $(hide) chmod 755 $@ +LOCAL_SRC_FILES := art +include $(BUILD_PREBUILT) # Copy the art shell script to the target's bin directory include $(CLEAR_VARS) LOCAL_MODULE_CLASS := EXECUTABLES LOCAL_MODULE := art -include $(BUILD_SYSTEM)/base_rules.mk -$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/art $(ACP) - @echo "Copy: $(PRIVATE_MODULE) ($@)" - $(copy-file-to-new-target) - $(hide) chmod 755 $@ +LOCAL_SRC_FILES := art +include $(BUILD_PREBUILT) diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk index 6869b04a0b..cfbafde52b 100644 --- a/tools/ahat/Android.mk +++ b/tools/ahat/Android.mk @@ -35,16 +35,10 @@ include $(BUILD_HOST_JAVA_LIBRARY) # --- ahat script ---------------- include $(CLEAR_VARS) LOCAL_IS_HOST_MODULE := true -LOCAL_MODULE_TAGS := optional LOCAL_MODULE_CLASS := EXECUTABLES LOCAL_MODULE := ahat -include $(BUILD_SYSTEM)/base_rules.mk -$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/ahat $(ACP) - @echo "Copy: $(PRIVATE_MODULE) ($@)" - $(copy-file-to-new-target) - $(hide) chmod 755 $@ - -ahat: $(LOCAL_BUILT_MODULE) +LOCAL_SRC_FILES := ahat +include $(BUILD_PREBUILT) # --- ahat-tests.jar -------------- include $(CLEAR_VARS) diff --git a/tools/checker/common/logger.py b/tools/checker/common/logger.py index 28bb458da7..f13eaf6142 100644 --- a/tools/checker/common/logger.py +++ b/tools/checker/common/logger.py @@ -13,6 +13,7 @@ # limitations under the License. from __future__ import print_function +import collections import sys class Logger(object): @@ -21,7 +22,7 @@ class Logger(object): NoOutput, Error, Info = range(3) class Color(object): - Default, Blue, Gray, Purple, Red = range(5) + Default, Blue, Gray, Purple, Red, Green = range(6) @staticmethod def terminalCode(color, out=sys.stdout): @@ -35,6 +36,8 @@ class Logger(object): return '\033[95m' elif color == Logger.Color.Red: return '\033[91m' + elif color == Logger.Color.Green: + return '\033[32m' else: return '\033[0m' @@ -52,19 +55,34 @@ class Logger(object): out.flush() @staticmethod - def fail(msg, file=None, line=-1): - location = "" - if file: - location += file + ":" - if line > 0: - location += str(line) + ":" - if location: - location += " " - - Logger.log(location, Logger.Level.Error, color=Logger.Color.Gray, newLine=False, out=sys.stderr) + def fail(msg, file=None, line=-1, lineText=None, variables=None): Logger.log("error: ", Logger.Level.Error, color=Logger.Color.Red, newLine=False, out=sys.stderr) Logger.log(msg, Logger.Level.Error, out=sys.stderr) - sys.exit(msg) + + if lineText: + loc = "" + if file: + loc += file + ":" + if line > 0: + loc += str(line) + ":" + if loc: + loc += " " + Logger.log(loc, Logger.Level.Error, color=Logger.Color.Gray, newLine=False, out=sys.stderr) + Logger.log(lineText, Logger.Level.Error, out=sys.stderr) + + if variables: + longestName = 0 + for var in variables: + longestName = max(longestName, len(var)) + + for var in collections.OrderedDict(sorted(variables.items())): + padding = ' ' * (longestName - len(var)) + Logger.log(var, Logger.Level.Error, color=Logger.Color.Green, newLine=False, out=sys.stderr) + Logger.log(padding, Logger.Level.Error, newLine=False, out=sys.stderr) + Logger.log(" = ", Logger.Level.Error, newLine=False, out=sys.stderr) + Logger.log(variables[var], Logger.Level.Error, out=sys.stderr) + + sys.exit(1) @staticmethod def startTest(name): @@ -76,6 +94,6 @@ class Logger(object): Logger.log("PASS", color=Logger.Color.Blue) @staticmethod - def testFailed(msg, file=None, line=-1): + def testFailed(msg, assertion, variables): Logger.log("FAIL", color=Logger.Color.Red) - Logger.fail(msg, file, line) + Logger.fail(msg, assertion.fileName, assertion.lineNo, assertion.originalText, variables) diff --git a/tools/checker/match/file.py b/tools/checker/match/file.py index 3ded07482f..6ff19d5197 100644 --- a/tools/checker/match/file.py +++ b/tools/checker/match/file.py @@ -23,9 +23,10 @@ MatchScope = namedtuple("MatchScope", ["start", "end"]) MatchInfo = namedtuple("MatchInfo", ["scope", "variables"]) class MatchFailedException(Exception): - def __init__(self, assertion, lineNo): + def __init__(self, assertion, lineNo, variables): self.assertion = assertion self.lineNo = lineNo + self.variables = variables def splitIntoGroups(assertions): """ Breaks up a list of assertions, grouping instructions which should be @@ -58,7 +59,7 @@ def findMatchingLine(assertion, c1Pass, scope, variables, excludeLines=[]): newVariables = MatchLines(assertion, c1Pass.body[i], variables) if newVariables is not None: return MatchInfo(MatchScope(i, i), newVariables) - raise MatchFailedException(assertion, scope.start) + raise MatchFailedException(assertion, scope.start, variables) def matchDagGroup(assertions, c1Pass, scope, variables): """ Attempts to find matching `c1Pass` lines for a group of DAG assertions. @@ -92,12 +93,12 @@ def testNotGroup(assertions, c1Pass, scope, variables): for assertion in assertions: assert assertion.variant == TestAssertion.Variant.Not if MatchLines(assertion, line, variables) is not None: - raise MatchFailedException(assertion, i) + raise MatchFailedException(assertion, i, variables) def testEvalGroup(assertions, scope, variables): for assertion in assertions: if not EvaluateLine(assertion, variables): - raise MatchFailedException(assertion, scope.start) + raise MatchFailedException(assertion, scope.start, variables) def MatchTestCase(testCase, c1Pass): """ Runs a test case against a C1visualizer graph dump. @@ -181,8 +182,8 @@ def MatchFiles(checkerFile, c1File, targetArch, debuggableMode): except MatchFailedException as e: lineNo = c1Pass.startLineNo + e.lineNo if e.assertion.variant == TestAssertion.Variant.Not: - Logger.testFailed("NOT assertion matched line {}".format(lineNo), - e.assertion.fileName, e.assertion.lineNo) + msg = "NOT assertion matched line {}" else: - Logger.testFailed("Assertion could not be matched starting from line {}".format(lineNo), - e.assertion.fileName, e.assertion.lineNo) + msg = "Assertion could not be matched starting from line {}" + msg = msg.format(lineNo) + Logger.testFailed(msg, e.assertion, e.variables) diff --git a/tools/checker/match/line.py b/tools/checker/match/line.py index 08f001f660..ed48a5329f 100644 --- a/tools/checker/match/line.py +++ b/tools/checker/match/line.py @@ -35,15 +35,13 @@ def getVariable(name, variables, pos): if name in variables: return variables[name] else: - Logger.testFailed("Missing definition of variable \"{}\"".format(name), - pos.fileName, pos.lineNo) + Logger.testFailed("Missing definition of variable \"{}\"".format(name), pos, variables) def setVariable(name, value, variables, pos): if name not in variables: return variables.copyWith(name, value) else: - Logger.testFailed("Multiple definitions of variable \"{}\"".format(name), - pos.fileName, pos.lineNo) + Logger.testFailed("Multiple definitions of variable \"{}\"".format(name), pos, variables) def matchWords(checkerWord, stringWord, variables, pos): """ Attempts to match a list of TestExpressions against a string. diff --git a/tools/dexfuzz/Android.mk b/tools/dexfuzz/Android.mk index 1580bc37fb..473f6de3e5 100644 --- a/tools/dexfuzz/Android.mk +++ b/tools/dexfuzz/Android.mk @@ -27,14 +27,10 @@ include $(BUILD_HOST_JAVA_LIBRARY) # --- dexfuzz script ---------------- include $(CLEAR_VARS) LOCAL_IS_HOST_MODULE := true -LOCAL_MODULE_TAGS := optional LOCAL_MODULE_CLASS := EXECUTABLES LOCAL_MODULE := dexfuzz -include $(BUILD_SYSTEM)/base_rules.mk -$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/dexfuzz $(ACP) - @echo "Copy: $(PRIVATE_MODULE) ($@)" - $(copy-file-to-new-target) - $(hide) chmod 755 $@ +LOCAL_SRC_FILES := dexfuzz +include $(BUILD_PREBUILT) # --- dexfuzz script with core image dependencies ---------------- fuzzer: $(LOCAL_BUILT_MODULE) $(HOST_CORE_IMG_OUTS) diff --git a/tools/libcore_failures_concurrent_collector.txt b/tools/libcore_failures_concurrent_collector.txt index 19a61dc8cb..75d1eff2c3 100644 --- a/tools/libcore_failures_concurrent_collector.txt +++ b/tools/libcore_failures_concurrent_collector.txt @@ -22,14 +22,5 @@ result: EXEC_FAILED, names: ["libcore.java.lang.OldSystemTest#test_gc"], bug: 26155567 -}, -{ - description: "TimeoutException on hammerhead-concurrent-collector", - result: EXEC_FAILED, - modes: [device], - names: ["libcore.icu.RelativeDateTimeFormatterTest#test_bug25821045", - "libcore.java.text.SimpleDateFormatTest#testLocales", - "libcore.java.util.zip.ZipFileTest#testZipFileWithLotsOfEntries"], - bug: 26711853 } ] diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh index e4af9fa0d7..8422e20823 100755 --- a/tools/run-jdwp-tests.sh +++ b/tools/run-jdwp-tests.sh @@ -51,7 +51,7 @@ vm_command="--vm-command=$art" image_compiler_option="" debug="no" verbose="no" -image="-Ximage:/data/art-test/core-jit.art" +image="-Ximage:/data/art-test/core-optimizing-pic.art" vm_args="" # By default, we run the whole JDWP test suite. test="org.apache.harmony.jpda.tests.share.AllTests" @@ -70,9 +70,6 @@ while true; do device_dir="" # Vogar knows which VM to use on host. vm_command="" - # We only compile the image on the host. Note that not providing this option - # for target testing puts us below the adb command limit for vogar. - image_compiler_option="--vm-arg -Ximage-compiler-option --vm-arg --debuggable" shift elif [[ $1 == -Ximage:* ]]; then image="$1" diff --git a/tools/setup-buildbot-device.sh b/tools/setup-buildbot-device.sh index 45b60dc647..1e9c763534 100755 --- a/tools/setup-buildbot-device.sh +++ b/tools/setup-buildbot-device.sh @@ -37,6 +37,22 @@ adb shell uptime echo -e "${green}Battery info${nc}" adb shell dumpsys battery +echo -e "${green}Setting adb buffer size to 32MB${nc}" +adb logcat -G 32M +adb logcat -g + +echo -e "${green}Removing adb spam filter${nc}" +adb logcat -P "" +adb logcat -p + echo -e "${green}Kill stalled dalvikvm processes${nc}" -processes=$(adb shell "ps" | grep dalvikvm | awk '{print $2}') -for i in $processes; do adb shell kill -9 $i; done +# 'ps' on M can sometimes hang. +timeout 2s adb shell "ps" +if [ $? = 124 ]; then + echo -e "${green}Rebooting device to fix 'ps'${nc}" + adb reboot + adb wait-for-device root +else + processes=$(adb shell "ps" | grep dalvikvm | awk '{print $2}') + for i in $processes; do adb shell kill -9 $i; done +fi |