diff options
127 files changed, 3541 insertions, 1090 deletions
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk index d2e3371551..291db8b531 100644 --- a/build/Android.common_test.mk +++ b/build/Android.common_test.mk @@ -124,12 +124,17 @@ ART_TEST_RUN_TEST_DEBUGGABLE ?= $(ART_TEST_FULL) ART_TEST_RUN_TEST_MULTI_IMAGE ?= $(ART_TEST_FULL) # Define the command run on test failure. $(1) is the name of the test. Executed by the shell. +# If the test was a top-level make target (e.g. `test-art-host-gtest-codegen_test64`), the command +# fails with exit status 1 (returned by the last `grep` statement below). +# Otherwise (e.g., if the test was run as a prerequisite of a compound test command, such as +# `test-art-host-gtest-codegen_test`), the command does not fail, as this would break rules running +# ART_TEST_PREREQ_FINISHED as one of their actions, which expects *all* prerequisites *not* to fail. define ART_TEST_FAILED ( [ -f $(ART_HOST_TEST_DIR)/skipped/$(1) ] || \ (mkdir -p $(ART_HOST_TEST_DIR)/failed/ && touch $(ART_HOST_TEST_DIR)/failed/$(1) && \ echo $(ART_TEST_KNOWN_FAILING) | grep -q $(1) \ && (echo -e "$(1) \e[91mKNOWN FAILURE\e[0m") \ - || (echo -e "$(1) \e[91mFAILED\e[0m" >&2 ))) + || (echo -e "$(1) \e[91mFAILED\e[0m" >&2; echo $(MAKECMDGOALS) | grep -q -v $(1)))) endef ifeq ($(ART_TEST_QUIET),true) diff --git a/cmdline/cmdline.h b/cmdline/cmdline.h index dec9c83165..6e042c3c27 100644 --- a/cmdline/cmdline.h +++ b/cmdline/cmdline.h @@ -234,7 +234,7 @@ struct CmdlineArgs { // Checks for --boot-image location. { std::string boot_image_location = boot_image_location_; - size_t file_name_idx = boot_image_location.rfind("/"); + size_t file_name_idx = boot_image_location.rfind('/'); if (file_name_idx == std::string::npos) { // Prevent a InsertIsaDirectory check failure. *error_msg = "Boot image location must have a / in it"; return false; @@ -244,7 +244,7 @@ struct CmdlineArgs { // This prevents a common error "Could not create an image space..." when initing the Runtime. if (file_name_idx != std::string::npos) { std::string no_file_name = boot_image_location.substr(0, file_name_idx); - size_t ancestor_dirs_idx = no_file_name.rfind("/"); + size_t ancestor_dirs_idx = no_file_name.rfind('/'); std::string parent_dir_name; if (ancestor_dirs_idx != std::string::npos) { diff --git a/cmdline/cmdline_parser.h b/cmdline/cmdline_parser.h index cfc096728f..d82fd488e9 100644 --- a/cmdline/cmdline_parser.h +++ b/cmdline/cmdline_parser.h @@ -390,7 +390,7 @@ struct CmdlineParser { // Unlike regular argument definitions, when a value gets parsed into its // stronger type, we just throw it away. - if (ign.find("_") != std::string::npos) { // Does the arg-def have a wildcard? + if (ign.find('_') != std::string::npos) { // Does the arg-def have a wildcard? // pretend this is a string, e.g. -Xjitconfig:<anythinggoeshere> auto&& builder = Define(ignore_name).template WithType<std::string>().IntoIgnore(); assert(&builder == this); diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc index cad51045aa..550e8c4605 100644 --- a/cmdline/cmdline_parser_test.cc +++ b/cmdline/cmdline_parser_test.cc @@ -78,7 +78,7 @@ namespace art { return memcmp(std::addressof(expected), std::addressof(actual), sizeof(expected)) == 0; } - bool UsuallyEquals(const char* expected, std::string actual) { + bool UsuallyEquals(const char* expected, const std::string& actual) { return std::string(expected) == actual; } @@ -129,7 +129,7 @@ class CmdlineParserTest : public ::testing::Test { parser_ = ParsedOptions::MakeParser(false); // do not ignore unrecognized options } - static ::testing::AssertionResult IsResultSuccessful(CmdlineResult result) { + static ::testing::AssertionResult IsResultSuccessful(const CmdlineResult& result) { if (result.IsSuccess()) { return ::testing::AssertionSuccess(); } else { @@ -138,7 +138,7 @@ class CmdlineParserTest : public ::testing::Test { } } - static ::testing::AssertionResult IsResultFailure(CmdlineResult result, + static ::testing::AssertionResult IsResultFailure(const CmdlineResult& result, CmdlineResult::Status failure_status) { if (result.IsSuccess()) { return ::testing::AssertionFailure() << " got success but expected failure: " diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h index a5bb117509..3f55eefa0e 100644 --- a/cmdline/cmdline_types.h +++ b/cmdline/cmdline_types.h @@ -696,7 +696,7 @@ struct CmdlineType<ProfileSaverOptions> : CmdlineTypeParser<ProfileSaverOptions> } static std::string RemovePrefix(const std::string& source) { - size_t prefix_idx = source.find(":"); + size_t prefix_idx = source.find(':'); if (prefix_idx == std::string::npos) { return ""; diff --git a/cmdline/detail/cmdline_parse_argument_detail.h b/cmdline/detail/cmdline_parse_argument_detail.h index 84beff59c7..14eac30aa1 100644 --- a/cmdline/detail/cmdline_parse_argument_detail.h +++ b/cmdline/detail/cmdline_parse_argument_detail.h @@ -108,7 +108,7 @@ namespace art { // If this is true, then the wildcard matching later on can still fail, so this is not // a guarantee that the argument is correct, it's more of a strong hint that the // user-provided input *probably* was trying to match this argument. - size_t MaybeMatches(TokenRange token_list) const { + size_t MaybeMatches(const TokenRange& token_list) const { auto best_match = FindClosestMatch(token_list); return best_match.second; @@ -118,7 +118,7 @@ namespace art { // // Returns the token range that was the closest match and the # of tokens that // this range was matched up until. - std::pair<const TokenRange*, size_t> FindClosestMatch(TokenRange token_list) const { + std::pair<const TokenRange*, size_t> FindClosestMatch(const TokenRange& token_list) const { const TokenRange* best_match_ptr = nullptr; size_t best_match = 0; diff --git a/compiler/Android.bp b/compiler/Android.bp index f1bf27ece7..7ddd582385 100644 --- a/compiler/Android.bp +++ b/compiler/Android.bp @@ -42,6 +42,7 @@ art_cc_defaults { "linker/vector_output_stream.cc", "linker/relative_patcher.cc", "jit/jit_compiler.cc", + "jit/jit_logger.cc", "jni/quick/calling_convention.cc", "jni/quick/jni_compiler.cc", "optimizing/block_builder.cc", diff --git a/compiler/intrinsics_list.h b/compiler/intrinsics_list.h index b617387ef8..555baf6de9 100644 --- a/compiler/intrinsics_list.h +++ b/compiler/intrinsics_list.h @@ -108,8 +108,10 @@ V(StringCompareTo, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "compareTo", "(Ljava/lang/String;)I") \ V(StringEquals, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "equals", "(Ljava/lang/Object;)Z") \ V(StringGetCharsNoCheck, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "getCharsNoCheck", "(II[CI)V") \ - V(StringIndexOf, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "indexOf", "(I)I") \ - V(StringIndexOfAfter, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "indexOf", "(II)I") \ + V(StringIndexOf, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/String;", "indexOf", "(I)I") \ + V(StringIndexOfAfter, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/String;", "indexOf", "(II)I") \ + V(StringStringIndexOf, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "indexOf", "(Ljava/lang/String;)I") \ + V(StringStringIndexOfAfter, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "indexOf", "(Ljava/lang/String;I)I") \ V(StringIsEmpty, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/String;", "isEmpty", "()Z") \ V(StringLength, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/String;", "length", "()I") \ V(StringNewStringFromBytes, kStatic, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringFactory;", "newStringFromBytes", "([BIII)Ljava/lang/String;") \ diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc index f83d37cdf2..9dfb434b10 100644 --- a/compiler/jit/jit_compiler.cc +++ b/compiler/jit/jit_compiler.cc @@ -171,19 +171,10 @@ JitCompiler::JitCompiler() { size_t thread_count = compiler_driver_->GetThreadCount(); if (compiler_options_->GetGenerateDebugInfo()) { -#ifdef ART_TARGET_ANDROID - const char* prefix = "/data/misc/trace"; -#else - const char* prefix = "/tmp"; -#endif DCHECK_EQ(thread_count, 1u) << "Generating debug info only works with one compiler thread"; - std::string perf_filename = std::string(prefix) + "/perf-" + std::to_string(getpid()) + ".map"; - perf_file_.reset(OS::CreateEmptyFileWriteOnly(perf_filename.c_str())); - if (perf_file_ == nullptr) { - LOG(ERROR) << "Could not create perf file at " << perf_filename << - " Are you on a user build? Perf only works on userdebug/eng builds"; - } + jit_logger_.reset(new JitLogger()); + jit_logger_->OpenLog(); } size_t inline_depth_limit = compiler_driver_->GetCompilerOptions().GetInlineDepthLimit(); @@ -192,9 +183,8 @@ JitCompiler::JitCompiler() { } JitCompiler::~JitCompiler() { - if (perf_file_ != nullptr) { - UNUSED(perf_file_->Flush()); - UNUSED(perf_file_->Close()); + if (compiler_options_->GetGenerateDebugInfo()) { + jit_logger_->CloseLog(); } } @@ -218,19 +208,8 @@ bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool osr) { TimingLogger::ScopedTiming t2("Compiling", &logger); JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache(); success = compiler_driver_->GetCompiler()->JitCompile(self, code_cache, method, osr); - if (success && (perf_file_ != nullptr)) { - const void* ptr = method->GetEntryPointFromQuickCompiledCode(); - std::ostringstream stream; - stream << std::hex - << reinterpret_cast<uintptr_t>(ptr) - << " " - << code_cache->GetMemorySizeOfCodePointer(ptr) - << " " - << method->PrettyMethod() - << std::endl; - std::string str = stream.str(); - bool res = perf_file_->WriteFully(str.c_str(), str.size()); - CHECK(res); + if (success && (jit_logger_ != nullptr)) { + jit_logger_->WriteLog(code_cache, method); } } diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h index ea2747c085..f0f24d345e 100644 --- a/compiler/jit/jit_compiler.h +++ b/compiler/jit/jit_compiler.h @@ -19,6 +19,7 @@ #include "base/mutex.h" #include "compiled_method.h" +#include "jit_logger.h" #include "driver/compiler_driver.h" #include "driver/compiler_options.h" @@ -50,7 +51,7 @@ class JitCompiler { std::unique_ptr<CumulativeLogger> cumulative_logger_; std::unique_ptr<CompilerDriver> compiler_driver_; std::unique_ptr<const InstructionSetFeatures> instruction_set_features_; - std::unique_ptr<File> perf_file_; + std::unique_ptr<JitLogger> jit_logger_; JitCompiler(); diff --git a/compiler/jit/jit_logger.cc b/compiler/jit/jit_logger.cc new file mode 100644 index 0000000000..9ce3b0cfe8 --- /dev/null +++ b/compiler/jit/jit_logger.cc @@ -0,0 +1,312 @@ +/* + * Copyright 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "jit_logger.h" + +#include "arch/instruction_set.h" +#include "art_method-inl.h" +#include "base/time_utils.h" +#include "base/unix_file/fd_file.h" +#include "driver/compiler_driver.h" +#include "jit/jit.h" +#include "jit/jit_code_cache.h" + +namespace art { +namespace jit { + +#ifdef ART_TARGET_ANDROID +static const char* kLogPrefix = "/data/misc/trace"; +#else +static const char* kLogPrefix = "/tmp"; +#endif + +// File format of perf-PID.map: +// +---------------------+ +// |ADDR SIZE symbolname1| +// |ADDR SIZE symbolname2| +// |... | +// +---------------------+ +void JitLogger::OpenPerfMapLog() { + std::string pid_str = std::to_string(getpid()); + std::string perf_filename = std::string(kLogPrefix) + "/perf-" + pid_str + ".map"; + perf_file_.reset(OS::CreateEmptyFileWriteOnly(perf_filename.c_str())); + if (perf_file_ == nullptr) { + LOG(ERROR) << "Could not create perf file at " << perf_filename << + " Are you on a user build? Perf only works on userdebug/eng builds"; + } +} + +void JitLogger::WritePerfMapLog(JitCodeCache* code_cache, ArtMethod* method) { + if (perf_file_ != nullptr) { + const void* ptr = method->GetEntryPointFromQuickCompiledCode(); + size_t code_size = code_cache->GetMemorySizeOfCodePointer(ptr); + std::string method_name = method->PrettyMethod(); + + std::ostringstream stream; + stream << std::hex + << reinterpret_cast<uintptr_t>(ptr) + << " " + << code_size + << " " + << method_name + << std::endl; + std::string str = stream.str(); + bool res = perf_file_->WriteFully(str.c_str(), str.size()); + if (!res) { + LOG(WARNING) << "Failed to write jitted method info in log: write failure."; + } + } else { + LOG(WARNING) << "Failed to write jitted method info in log: log file doesn't exist."; + } +} + +void JitLogger::ClosePerfMapLog() { + if (perf_file_ != nullptr) { + UNUSED(perf_file_->Flush()); + UNUSED(perf_file_->Close()); + } +} + +// File format of jit-PID.jump: +// +// +--------------------------------+ +// | PerfJitHeader | +// +--------------------------------+ +// | PerfJitCodeLoad { | . +// | struct PerfJitBase; | . +// | uint32_t process_id_; | . +// | uint32_t thread_id_; | . +// | uint64_t vma_; | . +// | uint64_t code_address_; | . +// | uint64_t code_size_; | . +// | uint64_t code_id_; | . +// | } | . +// +- -+ . +// | method_name'\0' | +--> one jitted method +// +- -+ . +// | jitted code binary | . +// | ... | . +// +--------------------------------+ . +// | PerfJitCodeDebugInfo { | . +// | struct PerfJitBase; | . +// | uint64_t address_; | . +// | uint64_t entry_count_; | . +// | struct PerfJitDebugEntry; | . +// | } | . +// +--------------------------------+ +// | PerfJitCodeLoad | +// ... +// +struct PerfJitHeader { + uint32_t magic_; // Characters "JiTD" + uint32_t version_; // Header version + uint32_t size_; // Total size of header + uint32_t elf_mach_target_; // Elf mach target + uint32_t reserved_; // Reserved, currently not used + uint32_t process_id_; // Process ID of the JIT compiler + uint64_t time_stamp_; // Timestamp when the header is generated + uint64_t flags_; // Currently the flags are only used for choosing clock for timestamp, + // we set it to 0 to tell perf that we use CLOCK_MONOTONIC clock. + static const uint32_t kMagic = 0x4A695444; // "JiTD" + static const uint32_t kVersion = 1; +}; + +// Each record starts with such basic information: event type, total size, and timestamp. +struct PerfJitBase { + enum PerfJitEvent { + // A jitted code load event. + // In ART JIT, it is used to log a new method is jit compiled and committed to jit-code-cache. + // Note that such kLoad event supports code cache GC in ART JIT. + // For every kLoad event recorded in jit-PID.dump and every perf sample recorded in perf.data, + // each event/sample has time stamp. In case code cache GC happens in ART JIT, and a new + // jitted method is committed to the same address of a previously deleted method, + // the time stamp information can help profiler to tell whether this sample belongs to the + // era of the first jitted method, or does it belong to the period of the second jitted method. + // JitCodeCache doesn't have to record any event on 'code delete'. + kLoad = 0, + + // A jitted code move event, i,e. a jitted code moved from one address to another address. + // It helps profiler to map samples to the right symbol even when the code is moved. + // In ART JIT, this event can help log such behavior: + // A jitted method is recorded in previous kLoad event, but due to some reason, + // it is moved to another address in jit-code-cache. + kMove = 1, + + // Logs debug line/column information. + kDebugInfo = 2, + + // Logs JIT VM end of life event. + kClose = 3 + }; + uint32_t event_; // Must be one of the events defined in PerfJitEvent. + uint32_t size_; // Total size of this event record. + // For example, for kLoad event, size of the event record is: + // sizeof(PerfJitCodeLoad) + method_name.size() + compiled code size. + uint64_t time_stamp_; // Timestamp for the event. +}; + +// Logs a jitted code load event (kLoad). +// In ART JIT, it is used to log a new method is jit compiled and commited to jit-code-cache. +struct PerfJitCodeLoad : PerfJitBase { + uint32_t process_id_; // Process ID who performs the jit code load. + // In ART JIT, it is the pid of the JIT compiler. + uint32_t thread_id_; // Thread ID who performs the jit code load. + // In ART JIT, it is the tid of the JIT compiler. + uint64_t vma_; // Address of the code section. In ART JIT, because code_address_ + // uses absolute address, this field is 0. + uint64_t code_address_; // Address where is jitted code is loaded. + uint64_t code_size_; // Size of the jitted code. + uint64_t code_id_; // Unique ID for each jitted code. +}; + +// This structure is for source line/column mapping. +// Currently this feature is not implemented in ART JIT yet. +struct PerfJitDebugEntry { + uint64_t address_; // Code address which maps to the line/column in source. + uint32_t line_number_; // Source line number starting at 1. + uint32_t column_; // Column discriminator, default 0. + const char name_[0]; // Followed by null-terminated name or \0xff\0 if same as previous. +}; + +// Logs debug line information (kDebugInfo). +// This structure is for source line/column mapping. +// Currently this feature is not implemented in ART JIT yet. +struct PerfJitCodeDebugInfo : PerfJitBase { + uint64_t address_; // Starting code address which the debug info describes. + uint64_t entry_count_; // How many instances of PerfJitDebugEntry. + PerfJitDebugEntry entries_[0]; // Followed by entry_count_ instances of PerfJitDebugEntry. +}; + +static uint32_t GetElfMach() { +#if defined(__arm__) + static const uint32_t kElfMachARM = 0x28; + return kElfMachARM; +#elif defined(__aarch64__) + static const uint32_t kElfMachARM64 = 0xB7; + return kElfMachARM64; +#elif defined(__i386__) + static const uint32_t kElfMachIA32 = 0x3; + return kElfMachIA32; +#elif defined(__x86_64__) + static const uint32_t kElfMachX64 = 0x3E; + return kElfMachX64; +#else + UNIMPLEMENTED(WARNING) << "Unsupported architecture in JitLogger"; + return 0; +#endif +} + +void JitLogger::OpenMarkerFile() { + int fd = jit_dump_file_->Fd(); + // The 'perf inject' tool requires that the jit-PID.dump file + // must have a mmap(PROT_READ|PROT_EXEC) record in perf.data. + marker_address_ = mmap(nullptr, kPageSize, PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0); + if (marker_address_ == MAP_FAILED) { + LOG(WARNING) << "Failed to create record in perf.data. JITed code profiling will not work."; + return; + } +} + +void JitLogger::CloseMarkerFile() { + if (marker_address_ != nullptr) { + munmap(marker_address_, kPageSize); + } +} + +void JitLogger::WriteJitDumpDebugInfo() { + // In the future, we can add java source file line/column mapping here. +} + +void JitLogger::WriteJitDumpHeader() { + PerfJitHeader header; + + std::memset(&header, 0, sizeof(header)); + header.magic_ = PerfJitHeader::kMagic; + header.version_ = PerfJitHeader::kVersion; + header.size_ = sizeof(header); + header.elf_mach_target_ = GetElfMach(); + header.process_id_ = static_cast<uint32_t>(getpid()); + header.time_stamp_ = art::NanoTime(); // CLOCK_MONOTONIC clock is required. + header.flags_ = 0; + + bool res = jit_dump_file_->WriteFully(reinterpret_cast<const char*>(&header), sizeof(header)); + if (!res) { + LOG(WARNING) << "Failed to write profiling log. The 'perf inject' tool will not work."; + } +} + +void JitLogger::OpenJitDumpLog() { + std::string pid_str = std::to_string(getpid()); + std::string jitdump_filename = std::string(kLogPrefix) + "/jit-" + pid_str + ".dump"; + + jit_dump_file_.reset(OS::CreateEmptyFile(jitdump_filename.c_str())); + if (jit_dump_file_ == nullptr) { + LOG(ERROR) << "Could not create jit dump file at " << jitdump_filename << + " Are you on a user build? Perf only works on userdebug/eng builds"; + return; + } + + OpenMarkerFile(); + + // Continue to write jit-PID.dump file even above OpenMarkerFile() fails. + // Even if that means 'perf inject' tool cannot work, developers can still use other tools + // to map the samples in perf.data to the information (symbol,address,code) recorded + // in the jit-PID.dump file, and still proceed the jitted code analysis. + WriteJitDumpHeader(); +} + +void JitLogger::WriteJitDumpLog(JitCodeCache* code_cache, ArtMethod* method) { + if (jit_dump_file_ != nullptr) { + const void* code = method->GetEntryPointFromQuickCompiledCode(); + size_t code_size = code_cache->GetMemorySizeOfCodePointer(code); + std::string method_name = method->PrettyMethod(); + + PerfJitCodeLoad jit_code; + std::memset(&jit_code, 0, sizeof(jit_code)); + jit_code.event_ = PerfJitCodeLoad::kLoad; + jit_code.size_ = sizeof(jit_code) + method_name.size() + 1 + code_size; + jit_code.time_stamp_ = art::NanoTime(); // CLOCK_MONOTONIC clock is required. + jit_code.process_id_ = static_cast<uint32_t>(getpid()); + jit_code.thread_id_ = static_cast<uint32_t>(art::GetTid()); + jit_code.vma_ = 0x0; + jit_code.code_address_ = reinterpret_cast<uint64_t>(code); + jit_code.code_size_ = code_size; + jit_code.code_id_ = code_index_++; + + // Write one complete jitted method info, including: + // - PerfJitCodeLoad structure + // - Method name + // - Complete generated code of this method + // + // Use UNUSED() here to avoid compiler warnings. + UNUSED(jit_dump_file_->WriteFully(reinterpret_cast<const char*>(&jit_code), sizeof(jit_code))); + UNUSED(jit_dump_file_->WriteFully(method_name.c_str(), method_name.size() + 1)); + UNUSED(jit_dump_file_->WriteFully(code, code_size)); + + WriteJitDumpDebugInfo(); + } +} + +void JitLogger::CloseJitDumpLog() { + if (jit_dump_file_ != nullptr) { + CloseMarkerFile(); + UNUSED(jit_dump_file_->Flush()); + UNUSED(jit_dump_file_->Close()); + } +} + +} // namespace jit +} // namespace art diff --git a/compiler/jit/jit_logger.h b/compiler/jit/jit_logger.h new file mode 100644 index 0000000000..0f8cfe4e2f --- /dev/null +++ b/compiler/jit/jit_logger.h @@ -0,0 +1,137 @@ +/* + * Copyright 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_JIT_JIT_LOGGER_H_ +#define ART_COMPILER_JIT_JIT_LOGGER_H_ + +#include "base/mutex.h" +#include "compiled_method.h" +#include "driver/compiler_driver.h" +#include "driver/compiler_options.h" + +namespace art { + +class ArtMethod; + +namespace jit { + +// +// JitLogger supports two approaches of perf profiling. +// +// (1) perf-map: +// The perf-map mechanism generates perf-PID.map file, +// which provides simple "address, size, method_name" information to perf, +// and allows perf to map samples in jit-code-cache to jitted method symbols. +// +// Command line Example: +// $ perf record dalvikvm -Xcompiler-option --generate-debug-info -cp <classpath> Test +// $ perf report +// NOTE: +// - Make sure that the perf-PID.map file is available for 'perf report' tool to access, +// so that jitted method can be displayed. +// +// +// (2) perf-inject: +// The perf-inject mechansim generates jit-PID.dump file, +// which provides rich informations about a jitted method. +// It allows perf or other profiling tools to do advanced analysis on jitted code, +// for example instruction level profiling. +// +// Command line Example: +// $ perf record -k mono dalvikvm -Xcompiler-option --generate-debug-info -cp <classpath> Test +// $ perf inject -i perf.data -o perf.data.jitted +// $ perf report -i perf.data.jitted +// $ perf annotate -i perf.data.jitted +// NOTE: +// REQUIREMENTS +// - The 'perf record -k mono' option requires 4.1 (or higher) Linux kernel. +// - The 'perf inject' (generating jit ELF files feature) requires perf 4.6 (or higher). +// PERF RECORD +// - The '-k mono' option tells 'perf record' to use CLOCK_MONOTONIC clock during sampling; +// which is required by 'perf inject', to make sure that both perf.data and jit-PID.dump +// have unified clock source for timestamps. +// PERF INJECT +// - The 'perf inject' tool injects information from jit-PID.dump into perf.data file, +// and generates small ELF files (jitted-TID-CODEID.so) for each jitted method. +// - On Android devices, the jit-PID.dump file is generated in /data/misc/trace/ folder, and +// such location is recorded in perf.data file. +// The 'perf inject' tool is going to look for jit-PID.dump and generates small ELF files in +// this /data/misc/trace/ folder. +// Make sure that you have the read/write access to /data/misc/trace/ folder. +// - On non-Android devices, the jit-PID.dump file is generated in /tmp/ folder, and +// 'perf inject' tool operates on this folder. +// Make sure that you have the read/write access to /tmp/ folder. +// - If you are executing 'perf inject' on non-Android devices (host), but perf.data and +// jit-PID.dump files are adb-pulled from Android devices, make sure that there is a +// /data/misc/trace/ folder on host, and jit-PID.dump file is copied to this folder. +// - Currently 'perf inject' doesn't provide option to change the path for jit-PID.dump and +// generated ELF files. +// PERF ANNOTATE +// - The 'perf annotate' tool displays assembly level profiling report. +// Source code can also be displayed if the ELF file has debug symbols. +// - Make sure above small ELF files are available for 'perf annotate' tool to access, +// so that jitted code can be displayed in assembly view. +// +class JitLogger { + public: + JitLogger() : code_index_(0), marker_address_(nullptr) {} + + void OpenLog() { + OpenPerfMapLog(); + OpenJitDumpLog(); + } + + void WriteLog(JitCodeCache* code_cache, ArtMethod* method) + REQUIRES_SHARED(Locks::mutator_lock_) { + WritePerfMapLog(code_cache, method); + WriteJitDumpLog(code_cache, method); + } + + void CloseLog() { + ClosePerfMapLog(); + CloseJitDumpLog(); + } + + private: + // For perf-map profiling + void OpenPerfMapLog(); + void WritePerfMapLog(JitCodeCache* code_cache, ArtMethod* method) + REQUIRES_SHARED(Locks::mutator_lock_); + void ClosePerfMapLog(); + + // For perf-inject profiling + void OpenJitDumpLog(); + void WriteJitDumpLog(JitCodeCache* code_cache, ArtMethod* method) + REQUIRES_SHARED(Locks::mutator_lock_); + void CloseJitDumpLog(); + + void OpenMarkerFile(); + void CloseMarkerFile(); + void WriteJitDumpHeader(); + void WriteJitDumpDebugInfo(); + + std::unique_ptr<File> perf_file_; + std::unique_ptr<File> jit_dump_file_; + uint64_t code_index_; + void* marker_address_; + + DISALLOW_COPY_AND_ASSIGN(JitLogger); +}; + +} // namespace jit +} // namespace art + +#endif // ART_COMPILER_JIT_JIT_LOGGER_H_ diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc index 4960a7343e..a9044a2047 100644 --- a/compiler/jni/jni_compiler_test.cc +++ b/compiler/jni/jni_compiler_test.cc @@ -2202,8 +2202,7 @@ void JniCompilerTest::NormalNativeImpl() { "()V", CURRENT_JNI_WRAPPER(Java_MyClassNatives_normalNative)); - ScopedObjectAccess soa(Thread::Current()); - ArtMethod* method = soa.DecodeMethod(jmethod_); + ArtMethod* method = jni::DecodeArtMethod(jmethod_); ASSERT_TRUE(method != nullptr); EXPECT_FALSE(method->IsAnnotatedWithCriticalNative()); @@ -2225,8 +2224,7 @@ void JniCompilerTest::FastNativeImpl() { "()V", CURRENT_JNI_WRAPPER(Java_MyClassNatives_fastNative)); - ScopedObjectAccess soa(Thread::Current()); - ArtMethod* method = soa.DecodeMethod(jmethod_); + ArtMethod* method = jni::DecodeArtMethod(jmethod_); ASSERT_TRUE(method != nullptr); EXPECT_FALSE(method->IsAnnotatedWithCriticalNative()); @@ -2255,8 +2253,7 @@ void JniCompilerTest::CriticalNativeImpl() { UpdateCurrentJni(JniKind::kCritical); ASSERT_TRUE(IsCurrentJniCritical()); - ScopedObjectAccess soa(Thread::Current()); - ArtMethod* method = soa.DecodeMethod(jmethod_); + ArtMethod* method = jni::DecodeArtMethod(jmethod_); ASSERT_TRUE(method != nullptr); EXPECT_TRUE(method->IsAnnotatedWithCriticalNative()); diff --git a/compiler/optimizing/bytecode_utils.h b/compiler/optimizing/bytecode_utils.h index 6dfffce117..133afa47fe 100644 --- a/compiler/optimizing/bytecode_utils.h +++ b/compiler/optimizing/bytecode_utils.h @@ -26,7 +26,8 @@ namespace art { class CodeItemIterator : public ValueObject { public: - CodeItemIterator(const DexFile::CodeItem& code_item, uint32_t start_dex_pc = 0u) + explicit CodeItemIterator(const DexFile::CodeItem& code_item) : CodeItemIterator(code_item, 0u) {} + CodeItemIterator(const DexFile::CodeItem& code_item, uint32_t start_dex_pc) : code_ptr_(code_item.insns_ + start_dex_pc), code_end_(code_item.insns_ + code_item.insns_size_in_code_units_), dex_pc_(start_dex_pc) {} diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index b2bfe42548..25d3855e39 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -489,8 +489,14 @@ class TypeCheckSlowPathARM : public SlowPathCodeARM { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); - Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) - : locations->Out(); + Location arg0, arg1; + if (instruction_->IsInstanceOf()) { + arg0 = locations->InAt(1); + arg1 = locations->Out(); + } else { + arg0 = locations->InAt(0); + arg1 = locations->InAt(1); + } DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); @@ -504,26 +510,26 @@ class TypeCheckSlowPathARM : public SlowPathCodeARM { // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; - codegen->EmitParallelMoves( - locations->InAt(1), - Location::RegisterLocation(calling_convention.GetRegisterAt(0)), - Primitive::kPrimNot, - object_class, - Location::RegisterLocation(calling_convention.GetRegisterAt(1)), - Primitive::kPrimNot); - + codegen->EmitParallelMoves(arg0, + Location::RegisterLocation(calling_convention.GetRegisterAt(0)), + Primitive::kPrimNot, + arg1, + Location::RegisterLocation(calling_convention.GetRegisterAt(1)), + Primitive::kPrimNot); if (instruction_->IsInstanceOf()) { arm_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, instruction_->GetDexPc(), this); - CheckEntrypointTypes< - kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>(); + CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>(); arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0)); } else { DCHECK(instruction_->IsCheckCast()); - arm_codegen->InvokeRuntime(kQuickCheckCast, instruction_, instruction_->GetDexPc(), this); - CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>(); + arm_codegen->InvokeRuntime(kQuickCheckInstanceOf, + instruction_, + instruction_->GetDexPc(), + this); + CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>(); } if (!is_fatal_) { @@ -5880,9 +5886,6 @@ HLoadString::LoadKind CodeGeneratorARM::GetSupportedLoadStringKind( break; case HLoadString::LoadKind::kBootImageAddress: break; - case HLoadString::LoadKind::kDexCacheAddress: - DCHECK(Runtime::Current()->UseJitCompilation()); - break; case HLoadString::LoadKind::kBssEntry: DCHECK(!Runtime::Current()->UseJitCompilation()); break; @@ -6021,12 +6024,26 @@ void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) { CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>(); } -static bool TypeCheckNeedsATemporary(TypeCheckKind type_check_kind) { - return kEmitCompilerReadBarrier && - (kUseBakerReadBarrier || - type_check_kind == TypeCheckKind::kAbstractClassCheck || - type_check_kind == TypeCheckKind::kClassHierarchyCheck || - type_check_kind == TypeCheckKind::kArrayObjectCheck); +// Temp is used for read barrier. +static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) { + if (kEmitCompilerReadBarrier && + (kUseBakerReadBarrier || + type_check_kind == TypeCheckKind::kAbstractClassCheck || + type_check_kind == TypeCheckKind::kClassHierarchyCheck || + type_check_kind == TypeCheckKind::kArrayObjectCheck)) { + return 1; + } + return 0; +} + +// InteraceCheck has 3 temps, one for holding the number of interfaces, one for the current +// interface pointer, one for loading the current interface. +// The other checks have one temp for loading the object's class. +static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) { + if (type_check_kind == TypeCheckKind::kInterfaceCheck) { + return 3; + } + return 1 + NumberOfInstanceOfTemps(type_check_kind); } void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) { @@ -6058,11 +6075,7 @@ void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) { // The "out" register is used as a temporary, so it overlaps with the inputs. // Note that TypeCheckSlowPathARM uses this register too. locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); - // When read barriers are enabled, we need a temporary register for - // some cases. - if (TypeCheckNeedsATemporary(type_check_kind)) { - locations->AddTemp(Location::RequiresRegister()); - } + locations->AddRegisterTemps(NumberOfInstanceOfTemps(type_check_kind)); } void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) { @@ -6073,9 +6086,9 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) { Register cls = locations->InAt(1).AsRegister<Register>(); Location out_loc = locations->Out(); Register out = out_loc.AsRegister<Register>(); - Location maybe_temp_loc = TypeCheckNeedsATemporary(type_check_kind) ? - locations->GetTemp(0) : - Location::NoLocation(); + const size_t num_temps = NumberOfInstanceOfTemps(type_check_kind); + DCHECK_LE(num_temps, 1u); + Location maybe_temp_loc = (num_temps >= 1) ? locations->GetTemp(0) : Location::NoLocation(); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); @@ -6090,7 +6103,12 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) { } // /* HeapReference<Class> */ out = obj->klass_ - GenerateReferenceLoadTwoRegisters(instruction, out_loc, obj_loc, class_offset, maybe_temp_loc); + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + kEmitCompilerReadBarrier); switch (type_check_kind) { case TypeCheckKind::kExactCheck: { @@ -6242,13 +6260,7 @@ void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); - // Note that TypeCheckSlowPathARM uses this "temp" register too. - locations->AddTemp(Location::RequiresRegister()); - // When read barriers are enabled, we need an additional temporary - // register for some cases. - if (TypeCheckNeedsATemporary(type_check_kind)) { - locations->AddTemp(Location::RequiresRegister()); - } + locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind)); } void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) { @@ -6259,13 +6271,18 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) { Register cls = locations->InAt(1).AsRegister<Register>(); Location temp_loc = locations->GetTemp(0); Register temp = temp_loc.AsRegister<Register>(); - Location maybe_temp2_loc = TypeCheckNeedsATemporary(type_check_kind) ? - locations->GetTemp(1) : - Location::NoLocation(); - uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); - uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); - uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); - uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value(); + const size_t num_temps = NumberOfCheckCastTemps(type_check_kind); + DCHECK_LE(num_temps, 3u); + Location maybe_temp2_loc = (num_temps >= 2) ? locations->GetTemp(1) : Location::NoLocation(); + Location maybe_temp3_loc = (num_temps >= 3) ? locations->GetTemp(2) : Location::NoLocation(); + const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); + const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); + const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); + const uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value(); + const uint32_t iftable_offset = mirror::Class::IfTableOffset().Uint32Value(); + const uint32_t array_length_offset = mirror::Array::LengthOffset().Uint32Value(); + const uint32_t object_array_data_offset = + mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value(); bool is_type_check_slow_path_fatal = (type_check_kind == TypeCheckKind::kExactCheck || @@ -6284,12 +6301,17 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) { __ CompareAndBranchIfZero(obj, &done); } - // /* HeapReference<Class> */ temp = obj->klass_ - GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc); - switch (type_check_kind) { case TypeCheckKind::kExactCheck: case TypeCheckKind::kArrayCheck: { + // /* HeapReference<Class> */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + kEmitCompilerReadBarrier); + __ cmp(temp, ShifterOperand(cls)); // Jump to slow path for throwing the exception or doing a // more involved array check. @@ -6298,34 +6320,40 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) { } case TypeCheckKind::kAbstractClassCheck: { + // /* HeapReference<Class> */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + kEmitCompilerReadBarrier); + // If the class is abstract, we eagerly fetch the super class of the // object to avoid doing a comparison we know will fail. - Label loop, compare_classes; + Label loop; __ Bind(&loop); // /* HeapReference<Class> */ temp = temp->super_class_ GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc); - // If the class reference currently in `temp` is not null, jump - // to the `compare_classes` label to compare it with the checked - // class. - __ CompareAndBranchIfNonZero(temp, &compare_classes); - // Otherwise, jump to the slow path to throw the exception. - // - // But before, move back the object's class into `temp` before - // going into the slow path, as it has been overwritten in the - // meantime. - // /* HeapReference<Class> */ temp = obj->klass_ - GenerateReferenceLoadTwoRegisters( - instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc); - __ b(type_check_slow_path->GetEntryLabel()); + // If the class reference currently in `temp` is null, jump to the slow path to throw the + // exception. + __ CompareAndBranchIfZero(temp, type_check_slow_path->GetEntryLabel()); - __ Bind(&compare_classes); + // Otherwise, compare the classes. __ cmp(temp, ShifterOperand(cls)); __ b(&loop, NE); break; } case TypeCheckKind::kClassHierarchyCheck: { + // /* HeapReference<Class> */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + kEmitCompilerReadBarrier); + // Walk over the class hierarchy to find a match. Label loop; __ Bind(&loop); @@ -6335,63 +6363,42 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) { // /* HeapReference<Class> */ temp = temp->super_class_ GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc); - // If the class reference currently in `temp` is not null, jump - // back at the beginning of the loop. - __ CompareAndBranchIfNonZero(temp, &loop); - // Otherwise, jump to the slow path to throw the exception. - // - // But before, move back the object's class into `temp` before - // going into the slow path, as it has been overwritten in the - // meantime. - // /* HeapReference<Class> */ temp = obj->klass_ - GenerateReferenceLoadTwoRegisters( - instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc); - __ b(type_check_slow_path->GetEntryLabel()); + // If the class reference currently in `temp` is null, jump to the slow path to throw the + // exception. + __ CompareAndBranchIfZero(temp, type_check_slow_path->GetEntryLabel()); + // Otherwise, jump to the beginning of the loop. + __ b(&loop); break; } case TypeCheckKind::kArrayObjectCheck: { + // /* HeapReference<Class> */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + kEmitCompilerReadBarrier); + // Do an exact check. - Label check_non_primitive_component_type; __ cmp(temp, ShifterOperand(cls)); __ b(&done, EQ); // Otherwise, we need to check that the object's class is a non-primitive array. // /* HeapReference<Class> */ temp = temp->component_type_ GenerateReferenceLoadOneRegister(instruction, temp_loc, component_offset, maybe_temp2_loc); - - // If the component type is not null (i.e. the object is indeed - // an array), jump to label `check_non_primitive_component_type` - // to further check that this component type is not a primitive - // type. - __ CompareAndBranchIfNonZero(temp, &check_non_primitive_component_type); - // Otherwise, jump to the slow path to throw the exception. - // - // But before, move back the object's class into `temp` before - // going into the slow path, as it has been overwritten in the - // meantime. - // /* HeapReference<Class> */ temp = obj->klass_ - GenerateReferenceLoadTwoRegisters( - instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc); - __ b(type_check_slow_path->GetEntryLabel()); - - __ Bind(&check_non_primitive_component_type); + // If the component type is null, jump to the slow path to throw the exception. + __ CompareAndBranchIfZero(temp, type_check_slow_path->GetEntryLabel()); + // Otherwise,the object is indeed an array, jump to label `check_non_primitive_component_type` + // to further check that this component type is not a primitive type. __ LoadFromOffset(kLoadUnsignedHalfword, temp, temp, primitive_offset); static_assert(Primitive::kPrimNot == 0, "Expected 0 for art::Primitive::kPrimNot"); - __ CompareAndBranchIfZero(temp, &done); - // Same comment as above regarding `temp` and the slow path. - // /* HeapReference<Class> */ temp = obj->klass_ - GenerateReferenceLoadTwoRegisters( - instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc); - __ b(type_check_slow_path->GetEntryLabel()); + __ CompareAndBranchIfNonZero(temp, type_check_slow_path->GetEntryLabel()); break; } case TypeCheckKind::kUnresolvedCheck: - case TypeCheckKind::kInterfaceCheck: - // We always go into the type check slow path for the unresolved - // and interface check cases. - // + // We always go into the type check slow path for the unresolved check case. // We cannot directly call the CheckCast runtime entry point // without resorting to a type checking slow path here (i.e. by // calling InvokeRuntime directly), as it would require to @@ -6399,15 +6406,54 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) { // instruction (following the runtime calling convention), which // might be cluttered by the potential first read barrier // emission at the beginning of this method. - // - // TODO: Introduce a new runtime entry point taking the object - // to test (instead of its class) as argument, and let it deal - // with the read barrier issues. This will let us refactor this - // case of the `switch` code as it was previously (with a direct - // call to the runtime not using a type checking slow path). - // This should also be beneficial for the other cases above. + __ b(type_check_slow_path->GetEntryLabel()); break; + + case TypeCheckKind::kInterfaceCheck: { + // Avoid read barriers to improve performance of the fast path. We can not get false + // positives by doing this. + // /* HeapReference<Class> */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + /*emit_read_barrier*/ false); + + // /* HeapReference<Class> */ temp = temp->iftable_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + temp_loc, + iftable_offset, + maybe_temp2_loc, + /*emit_read_barrier*/ false); + Label is_null; + // Null iftable means it is empty and will always fail the check. + // Not cbz since the temp may not be a low register. + __ CompareAndBranchIfZero(temp, &is_null); + + // Loop through the iftable and check if any class matches. + __ ldr(maybe_temp2_loc.AsRegister<Register>(), Address(temp, array_length_offset)); + + Label start_loop; + __ Bind(&start_loop); + __ ldr(maybe_temp3_loc.AsRegister<Register>(), Address(temp, object_array_data_offset)); + __ MaybeUnpoisonHeapReference(maybe_temp3_loc.AsRegister<Register>()); + __ cmp(cls, ShifterOperand(maybe_temp3_loc.AsRegister<Register>())); + __ b(&done, EQ); // Return if same class. + // Go to next interface. + __ add(temp, temp, ShifterOperand(2 * kHeapReferenceSize)); + __ sub(maybe_temp2_loc.AsRegister<Register>(), + maybe_temp2_loc.AsRegister<Register>(), + ShifterOperand(2)); + // Not cbnz since the temp may not be a low register. + __ CompareAndBranchIfNonZero(maybe_temp2_loc.AsRegister<Register>(), &start_loop); + __ Bind(&is_null); + + __ b(type_check_slow_path->GetEntryLabel()); + break; + } } __ Bind(&done); @@ -6715,10 +6761,12 @@ void InstructionCodeGeneratorARM::GenerateReferenceLoadTwoRegisters(HInstruction Location out, Location obj, uint32_t offset, - Location maybe_temp) { + Location maybe_temp, + bool emit_read_barrier) { Register out_reg = out.AsRegister<Register>(); Register obj_reg = obj.AsRegister<Register>(); - if (kEmitCompilerReadBarrier) { + if (emit_read_barrier) { + DCHECK(kEmitCompilerReadBarrier); if (kUseBakerReadBarrier) { DCHECK(maybe_temp.IsRegister()) << maybe_temp; // Load with fast path based Baker's read barrier. diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h index 3d46aab31f..6561984fe4 100644 --- a/compiler/optimizing/code_generator_arm.h +++ b/compiler/optimizing/code_generator_arm.h @@ -278,7 +278,8 @@ class InstructionCodeGeneratorARM : public InstructionCodeGenerator { Location out, Location obj, uint32_t offset, - Location maybe_temp); + Location maybe_temp, + bool emit_read_barrier); // Generate a GC root reference load: // // root <- *(obj + offset) diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 37322c68f1..c54e8e1130 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -459,9 +459,15 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); - Location class_to_check = locations->InAt(1); - Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) - : locations->Out(); + Location arg0, arg1; + if (instruction_->IsInstanceOf()) { + arg0 = locations->InAt(1); + arg1 = locations->Out(); + } else { + arg0 = locations->InAt(0); + arg1 = locations->InAt(1); + } + DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); @@ -476,21 +482,22 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; - codegen->EmitParallelMoves( - class_to_check, LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot, - object_class, LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot); - + codegen->EmitParallelMoves(arg0, + LocationFrom(calling_convention.GetRegisterAt(0)), + Primitive::kPrimNot, + arg1, + LocationFrom(calling_convention.GetRegisterAt(1)), + Primitive::kPrimNot); if (instruction_->IsInstanceOf()) { arm64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this); - CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, - const mirror::Class*, const mirror::Class*>(); + CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>(); Primitive::Type ret_type = instruction_->GetType(); Location ret_loc = calling_convention.GetReturnLocation(ret_type); arm64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type); } else { DCHECK(instruction_->IsCheckCast()); - arm64_codegen->InvokeRuntime(kQuickCheckCast, instruction_, dex_pc, this); - CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>(); + arm64_codegen->InvokeRuntime(kQuickCheckInstanceOf, instruction_, dex_pc, this); + CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>(); } if (!is_fatal_) { @@ -3325,12 +3332,26 @@ void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* ins HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull()); } -static bool TypeCheckNeedsATemporary(TypeCheckKind type_check_kind) { - return kEmitCompilerReadBarrier && +// Temp is used for read barrier. +static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) { + if (kEmitCompilerReadBarrier && (kUseBakerReadBarrier || - type_check_kind == TypeCheckKind::kAbstractClassCheck || - type_check_kind == TypeCheckKind::kClassHierarchyCheck || - type_check_kind == TypeCheckKind::kArrayObjectCheck); + type_check_kind == TypeCheckKind::kAbstractClassCheck || + type_check_kind == TypeCheckKind::kClassHierarchyCheck || + type_check_kind == TypeCheckKind::kArrayObjectCheck)) { + return 1; + } + return 0; +} + +// InteraceCheck has 3 temps, one for holding the number of interfaces, one for the current +// interface pointer, one for loading the current interface. +// The other checks have one temp for loading the object's class. +static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) { + if (type_check_kind == TypeCheckKind::kInterfaceCheck) { + return 3; + } + return 1 + NumberOfInstanceOfTemps(type_check_kind); } void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) { @@ -3362,11 +3383,8 @@ void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) { // The "out" register is used as a temporary, so it overlaps with the inputs. // Note that TypeCheckSlowPathARM64 uses this register too. locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); - // When read barriers are enabled, we need a temporary register for - // some cases. - if (TypeCheckNeedsATemporary(type_check_kind)) { - locations->AddTemp(Location::RequiresRegister()); - } + // Add temps if necessary for read barriers. + locations->AddRegisterTemps(NumberOfInstanceOfTemps(type_check_kind)); } void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { @@ -3377,9 +3395,9 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { Register cls = InputRegisterAt(instruction, 1); Location out_loc = locations->Out(); Register out = OutputRegister(instruction); - Location maybe_temp_loc = TypeCheckNeedsATemporary(type_check_kind) ? - locations->GetTemp(0) : - Location::NoLocation(); + const size_t num_temps = NumberOfInstanceOfTemps(type_check_kind); + DCHECK_LE(num_temps, 1u); + Location maybe_temp_loc = (num_temps >= 1) ? locations->GetTemp(0) : Location::NoLocation(); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); @@ -3395,7 +3413,12 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { } // /* HeapReference<Class> */ out = obj->klass_ - GenerateReferenceLoadTwoRegisters(instruction, out_loc, obj_loc, class_offset, maybe_temp_loc); + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + maybe_temp_loc, + kEmitCompilerReadBarrier); switch (type_check_kind) { case TypeCheckKind::kExactCheck: { @@ -3547,13 +3570,8 @@ void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RequiresRegister()); - // Note that TypeCheckSlowPathARM64 uses this "temp" register too. - locations->AddTemp(Location::RequiresRegister()); - // When read barriers are enabled, we need an additional temporary - // register for some cases. - if (TypeCheckNeedsATemporary(type_check_kind)) { - locations->AddTemp(Location::RequiresRegister()); - } + // Add temps for read barriers and other uses. One is used by TypeCheckSlowPathARM64. + locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind)); } void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) { @@ -3562,15 +3580,21 @@ void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) { Location obj_loc = locations->InAt(0); Register obj = InputRegisterAt(instruction, 0); Register cls = InputRegisterAt(instruction, 1); + const size_t num_temps = NumberOfCheckCastTemps(type_check_kind); + DCHECK_GE(num_temps, 1u); + DCHECK_LE(num_temps, 3u); Location temp_loc = locations->GetTemp(0); - Location maybe_temp2_loc = TypeCheckNeedsATemporary(type_check_kind) ? - locations->GetTemp(1) : - Location::NoLocation(); + Location maybe_temp2_loc = (num_temps >= 2) ? locations->GetTemp(1) : Location::NoLocation(); + Location maybe_temp3_loc = (num_temps >= 3) ? locations->GetTemp(2) : Location::NoLocation(); Register temp = WRegisterFrom(temp_loc); - uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); - uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); - uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); - uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value(); + const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); + const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); + const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); + const uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value(); + const uint32_t iftable_offset = mirror::Class::IfTableOffset().Uint32Value(); + const uint32_t array_length_offset = mirror::Array::LengthOffset().Uint32Value(); + const uint32_t object_array_data_offset = + mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value(); bool is_type_check_slow_path_fatal = (type_check_kind == TypeCheckKind::kExactCheck || @@ -3589,12 +3613,17 @@ void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) { __ Cbz(obj, &done); } - // /* HeapReference<Class> */ temp = obj->klass_ - GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc); - switch (type_check_kind) { case TypeCheckKind::kExactCheck: case TypeCheckKind::kArrayCheck: { + // /* HeapReference<Class> */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + kEmitCompilerReadBarrier); + __ Cmp(temp, cls); // Jump to slow path for throwing the exception or doing a // more involved array check. @@ -3603,34 +3632,39 @@ void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) { } case TypeCheckKind::kAbstractClassCheck: { + // /* HeapReference<Class> */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + kEmitCompilerReadBarrier); + // If the class is abstract, we eagerly fetch the super class of the // object to avoid doing a comparison we know will fail. - vixl::aarch64::Label loop, compare_classes; + vixl::aarch64::Label loop; __ Bind(&loop); // /* HeapReference<Class> */ temp = temp->super_class_ GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc); - // If the class reference currently in `temp` is not null, jump - // to the `compare_classes` label to compare it with the checked - // class. - __ Cbnz(temp, &compare_classes); - // Otherwise, jump to the slow path to throw the exception. - // - // But before, move back the object's class into `temp` before - // going into the slow path, as it has been overwritten in the - // meantime. - // /* HeapReference<Class> */ temp = obj->klass_ - GenerateReferenceLoadTwoRegisters( - instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc); - __ B(type_check_slow_path->GetEntryLabel()); - - __ Bind(&compare_classes); + // If the class reference currently in `temp` is null, jump to the slow path to throw the + // exception. + __ Cbz(temp, type_check_slow_path->GetEntryLabel()); + // Otherwise, compare classes. __ Cmp(temp, cls); __ B(ne, &loop); break; } case TypeCheckKind::kClassHierarchyCheck: { + // /* HeapReference<Class> */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + kEmitCompilerReadBarrier); + // Walk over the class hierarchy to find a match. vixl::aarch64::Label loop; __ Bind(&loop); @@ -3644,20 +3678,20 @@ void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) { // back at the beginning of the loop. __ Cbnz(temp, &loop); // Otherwise, jump to the slow path to throw the exception. - // - // But before, move back the object's class into `temp` before - // going into the slow path, as it has been overwritten in the - // meantime. - // /* HeapReference<Class> */ temp = obj->klass_ - GenerateReferenceLoadTwoRegisters( - instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc); __ B(type_check_slow_path->GetEntryLabel()); break; } case TypeCheckKind::kArrayObjectCheck: { + // /* HeapReference<Class> */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + kEmitCompilerReadBarrier); + // Do an exact check. - vixl::aarch64::Label check_non_primitive_component_type; __ Cmp(temp, cls); __ B(eq, &done); @@ -3665,37 +3699,18 @@ void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) { // /* HeapReference<Class> */ temp = temp->component_type_ GenerateReferenceLoadOneRegister(instruction, temp_loc, component_offset, maybe_temp2_loc); - // If the component type is not null (i.e. the object is indeed - // an array), jump to label `check_non_primitive_component_type` - // to further check that this component type is not a primitive - // type. - __ Cbnz(temp, &check_non_primitive_component_type); - // Otherwise, jump to the slow path to throw the exception. - // - // But before, move back the object's class into `temp` before - // going into the slow path, as it has been overwritten in the - // meantime. - // /* HeapReference<Class> */ temp = obj->klass_ - GenerateReferenceLoadTwoRegisters( - instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc); - __ B(type_check_slow_path->GetEntryLabel()); - - __ Bind(&check_non_primitive_component_type); + // If the component type is null, jump to the slow path to throw the exception. + __ Cbz(temp, type_check_slow_path->GetEntryLabel()); + // Otherwise, the object is indeed an array. Further check that this component type is not a + // primitive type. __ Ldrh(temp, HeapOperand(temp, primitive_offset)); static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot"); - __ Cbz(temp, &done); - // Same comment as above regarding `temp` and the slow path. - // /* HeapReference<Class> */ temp = obj->klass_ - GenerateReferenceLoadTwoRegisters( - instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc); - __ B(type_check_slow_path->GetEntryLabel()); + __ Cbnz(temp, type_check_slow_path->GetEntryLabel()); break; } case TypeCheckKind::kUnresolvedCheck: - case TypeCheckKind::kInterfaceCheck: - // We always go into the type check slow path for the unresolved - // and interface check cases. + // We always go into the type check slow path for the unresolved check cases. // // We cannot directly call the CheckCast runtime entry point // without resorting to a type checking slow path here (i.e. by @@ -3704,15 +3719,46 @@ void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) { // instruction (following the runtime calling convention), which // might be cluttered by the potential first read barrier // emission at the beginning of this method. - // - // TODO: Introduce a new runtime entry point taking the object - // to test (instead of its class) as argument, and let it deal - // with the read barrier issues. This will let us refactor this - // case of the `switch` code as it was previously (with a direct - // call to the runtime not using a type checking slow path). - // This should also be beneficial for the other cases above. __ B(type_check_slow_path->GetEntryLabel()); break; + case TypeCheckKind::kInterfaceCheck: { + // /* HeapReference<Class> */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + maybe_temp2_loc, + /*emit_read_barrier*/ false); + + // /* HeapReference<Class> */ temp = temp->iftable_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + temp_loc, + iftable_offset, + maybe_temp2_loc, + /*emit_read_barrier*/ false); + vixl::aarch64::Label is_null; + // Null iftable means it is empty and will always fail the check. + __ Cbz(temp, &is_null); + + // Loop through the iftable and check if any class matches. + __ Ldr(WRegisterFrom(maybe_temp2_loc), HeapOperand(temp.W(), array_length_offset)); + + vixl::aarch64::Label start_loop; + __ Bind(&start_loop); + __ Ldr(WRegisterFrom(maybe_temp3_loc), HeapOperand(temp.W(), object_array_data_offset)); + GetAssembler()->MaybeUnpoisonHeapReference(WRegisterFrom(maybe_temp3_loc)); + __ Cmp(cls, WRegisterFrom(maybe_temp3_loc)); + __ B(eq, &done); // Return if same class. + // Go to next interface. + __ Add(temp, temp, 2 * kHeapReferenceSize); + __ Sub(WRegisterFrom(maybe_temp2_loc), WRegisterFrom(maybe_temp2_loc), 2); + __ Cbnz(WRegisterFrom(maybe_temp2_loc), &start_loop); + __ Bind(&is_null); + + __ B(type_check_slow_path->GetEntryLabel()); + break; + } } __ Bind(&done); @@ -4425,9 +4471,6 @@ HLoadString::LoadKind CodeGeneratorARM64::GetSupportedLoadStringKind( break; case HLoadString::LoadKind::kBootImageAddress: break; - case HLoadString::LoadKind::kDexCacheAddress: - DCHECK(Runtime::Current()->UseJitCompilation()); - break; case HLoadString::LoadKind::kBssEntry: DCHECK(!Runtime::Current()->UseJitCompilation()); break; @@ -5252,11 +5295,13 @@ void InstructionCodeGeneratorARM64::GenerateReferenceLoadTwoRegisters(HInstructi Location out, Location obj, uint32_t offset, - Location maybe_temp) { + Location maybe_temp, + bool emit_read_barrier) { Primitive::Type type = Primitive::kPrimNot; Register out_reg = RegisterFrom(out, type); Register obj_reg = RegisterFrom(obj, type); - if (kEmitCompilerReadBarrier) { + if (emit_read_barrier) { + DCHECK(kEmitCompilerReadBarrier); if (kUseBakerReadBarrier) { // Load with fast path based Baker's read barrier. Register temp_reg = RegisterFrom(maybe_temp, type); diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h index 7f54b4b6b2..e8518f661b 100644 --- a/compiler/optimizing/code_generator_arm64.h +++ b/compiler/optimizing/code_generator_arm64.h @@ -284,7 +284,8 @@ class InstructionCodeGeneratorARM64 : public InstructionCodeGenerator { Location out, Location obj, uint32_t offset, - Location maybe_temp); + Location maybe_temp, + bool emit_read_barrier); // Generate a GC root reference load: // // root <- *(obj + offset) diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index 254d90898a..e7039e637d 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -443,8 +443,14 @@ class TypeCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); - Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) - : locations->Out(); + Location arg0, arg1; + if (instruction_->IsInstanceOf()) { + arg0 = locations->InAt(1); + arg1 = locations->Out(); + } else { + arg0 = locations->InAt(0); + arg1 = locations->InAt(1); + } DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); @@ -458,20 +464,22 @@ class TypeCheckSlowPathARMVIXL : public SlowPathCodeARMVIXL { // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConventionARMVIXL calling_convention; - codegen->EmitParallelMoves( - locations->InAt(1), - LocationFrom(calling_convention.GetRegisterAt(0)), - Primitive::kPrimNot, - object_class, - LocationFrom(calling_convention.GetRegisterAt(1)), - Primitive::kPrimNot); + codegen->EmitParallelMoves(arg0, + LocationFrom(calling_convention.GetRegisterAt(0)), + Primitive::kPrimNot, + arg1, + LocationFrom(calling_convention.GetRegisterAt(1)), + Primitive::kPrimNot); if (instruction_->IsInstanceOf()) { TODO_VIXL32(FATAL); } else { DCHECK(instruction_->IsCheckCast()); - arm_codegen->InvokeRuntime(kQuickCheckCast, instruction_, instruction_->GetDexPc(), this); - CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>(); + arm_codegen->InvokeRuntime(kQuickCheckInstanceOf, + instruction_, + instruction_->GetDexPc(), + this); + CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>(); } if (!is_fatal_) { @@ -660,7 +668,7 @@ CodeGeneratorARMVIXL::CodeGeneratorARMVIXL(HGraph* graph, GetVIXLAssembler()->GetScratchVRegisterList()->Combine(d15); } -#define __ reinterpret_cast<ArmVIXLAssembler*>(GetAssembler())->GetVIXLAssembler()-> +#define __ reinterpret_cast<ArmVIXLAssembler*>(GetAssembler())->GetVIXLAssembler()-> // NOLINT void CodeGeneratorARMVIXL::Finalize(CodeAllocator* allocator) { GetAssembler()->FinalizeCode(); diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index 12b1ab9abb..f19e2fec75 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -378,7 +378,14 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); - Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) : locations->Out(); + Location arg0, arg1; + if (instruction_->IsInstanceOf()) { + arg0 = locations->InAt(1); + arg1 = locations->Out(); + } else { + arg0 = locations->InAt(0); + arg1 = locations->InAt(1); + } uint32_t dex_pc = instruction_->GetDexPc(); DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); @@ -390,24 +397,22 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS { // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; - codegen->EmitParallelMoves(locations->InAt(1), + codegen->EmitParallelMoves(arg0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot, - object_class, + arg1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot); - if (instruction_->IsInstanceOf()) { mips_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this); - CheckEntrypointTypes< - kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>(); + CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>(); Primitive::Type ret_type = instruction_->GetType(); Location ret_loc = calling_convention.GetReturnLocation(ret_type); mips_codegen->MoveLocation(locations->Out(), ret_loc, ret_type); } else { DCHECK(instruction_->IsCheckCast()); - mips_codegen->InvokeRuntime(kQuickCheckCast, instruction_, dex_pc, this); - CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>(); + mips_codegen->InvokeRuntime(kQuickCheckInstanceOf, instruction_, dex_pc, this); + CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>(); } RestoreLiveRegisters(codegen, locations); @@ -5194,10 +5199,6 @@ HLoadString::LoadKind CodeGeneratorMIPS::GetSupportedLoadStringKind( break; case HLoadString::LoadKind::kBootImageAddress: break; - case HLoadString::LoadKind::kDexCacheAddress: - DCHECK(Runtime::Current()->UseJitCompilation()); - fallback_load = false; - break; case HLoadString::LoadKind::kBssEntry: DCHECK(!Runtime::Current()->UseJitCompilation()); break; diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 010bf24232..7598740d3c 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -322,7 +322,15 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); - Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) : locations->Out(); + Location arg0, arg1; + if (instruction_->IsInstanceOf()) { + arg0 = locations->InAt(1); + arg1 = locations->Out(); + } else { + arg0 = locations->InAt(0); + arg1 = locations->InAt(1); + } + uint32_t dex_pc = instruction_->GetDexPc(); DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); @@ -334,24 +342,23 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; - codegen->EmitParallelMoves(locations->InAt(1), + codegen->EmitParallelMoves(arg0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot, - object_class, + arg1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot); - if (instruction_->IsInstanceOf()) { mips64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this); CheckEntrypointTypes< - kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>(); + kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>(); Primitive::Type ret_type = instruction_->GetType(); Location ret_loc = calling_convention.GetReturnLocation(ret_type); mips64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type); } else { DCHECK(instruction_->IsCheckCast()); - mips64_codegen->InvokeRuntime(kQuickCheckCast, instruction_, dex_pc, this); - CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>(); + mips64_codegen->InvokeRuntime(kQuickCheckInstanceOf, instruction_, dex_pc, this); + CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>(); } RestoreLiveRegisters(codegen, locations); diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 2782d567ad..8c6580690b 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -312,8 +312,14 @@ class TypeCheckSlowPathX86 : public SlowPathCode { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); - Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) - : locations->Out(); + Location arg0, arg1; + if (instruction_->IsInstanceOf()) { + arg0 = locations->InAt(1); + arg1 = locations->Out(); + } else { + arg0 = locations->InAt(0); + arg1 = locations->InAt(1); + } DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); @@ -327,25 +333,25 @@ class TypeCheckSlowPathX86 : public SlowPathCode { // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; - x86_codegen->EmitParallelMoves( - locations->InAt(1), - Location::RegisterLocation(calling_convention.GetRegisterAt(0)), - Primitive::kPrimNot, - object_class, - Location::RegisterLocation(calling_convention.GetRegisterAt(1)), - Primitive::kPrimNot); - + x86_codegen->EmitParallelMoves(arg0, + Location::RegisterLocation(calling_convention.GetRegisterAt(0)), + Primitive::kPrimNot, + arg1, + Location::RegisterLocation(calling_convention.GetRegisterAt(1)), + Primitive::kPrimNot); if (instruction_->IsInstanceOf()) { x86_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, instruction_->GetDexPc(), this); - CheckEntrypointTypes< - kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>(); + CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>(); } else { DCHECK(instruction_->IsCheckCast()); - x86_codegen->InvokeRuntime(kQuickCheckCast, instruction_, instruction_->GetDexPc(), this); - CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>(); + x86_codegen->InvokeRuntime(kQuickCheckInstanceOf, + instruction_, + instruction_->GetDexPc(), + this); + CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>(); } if (!is_fatal_) { @@ -6216,9 +6222,6 @@ HLoadString::LoadKind CodeGeneratorX86::GetSupportedLoadStringKind( break; case HLoadString::LoadKind::kBootImageAddress: break; - case HLoadString::LoadKind::kDexCacheAddress: - DCHECK(Runtime::Current()->UseJitCompilation()); - break; case HLoadString::LoadKind::kDexCacheViaMethod: break; } @@ -6338,12 +6341,26 @@ void InstructionCodeGeneratorX86::VisitThrow(HThrow* instruction) { CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>(); } -static bool TypeCheckNeedsATemporary(TypeCheckKind type_check_kind) { - return kEmitCompilerReadBarrier && +// Temp is used for read barrier. +static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) { + if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier && (type_check_kind == TypeCheckKind::kAbstractClassCheck || type_check_kind == TypeCheckKind::kClassHierarchyCheck || - type_check_kind == TypeCheckKind::kArrayObjectCheck); + type_check_kind == TypeCheckKind::kArrayObjectCheck)) { + return 1; + } + return 0; +} + +// InteraceCheck has 3 temps, one for holding the number of interfaces, one for the current +// interface pointer, one for loading the current interface. +// The other checks have one temp for loading the object's class. +static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) { + if (type_check_kind == TypeCheckKind::kInterfaceCheck && !kPoisonHeapReferences) { + return 2; + } + return 1 + NumberOfInstanceOfTemps(type_check_kind); } void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) { @@ -6374,11 +6391,8 @@ void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) { locations->SetInAt(1, Location::Any()); // Note that TypeCheckSlowPathX86 uses this "out" register too. locations->SetOut(Location::RequiresRegister()); - // When read barriers are enabled, we need a temporary register for - // some cases. - if (TypeCheckNeedsATemporary(type_check_kind)) { - locations->AddTemp(Location::RequiresRegister()); - } + // When read barriers are enabled, we need a temporary register for some cases. + locations->AddRegisterTemps(NumberOfInstanceOfTemps(type_check_kind)); } void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) { @@ -6389,9 +6403,9 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) { Location cls = locations->InAt(1); Location out_loc = locations->Out(); Register out = out_loc.AsRegister<Register>(); - Location maybe_temp_loc = TypeCheckNeedsATemporary(type_check_kind) ? - locations->GetTemp(0) : - Location::NoLocation(); + const size_t num_temps = NumberOfInstanceOfTemps(type_check_kind); + DCHECK_LE(num_temps, 1u); + Location maybe_temp_loc = (num_temps >= 1) ? locations->GetTemp(0) : Location::NoLocation(); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); @@ -6407,7 +6421,11 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) { } // /* HeapReference<Class> */ out = obj->klass_ - GenerateReferenceLoadTwoRegisters(instruction, out_loc, obj_loc, class_offset); + GenerateReferenceLoadTwoRegisters(instruction, + out_loc, + obj_loc, + class_offset, + kEmitCompilerReadBarrier); switch (type_check_kind) { case TypeCheckKind::kExactCheck: { @@ -6563,35 +6581,43 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) { } } +static bool IsTypeCheckSlowPathFatal(TypeCheckKind type_check_kind, bool throws_into_catch) { + switch (type_check_kind) { + case TypeCheckKind::kExactCheck: + case TypeCheckKind::kAbstractClassCheck: + case TypeCheckKind::kClassHierarchyCheck: + case TypeCheckKind::kArrayObjectCheck: + return !throws_into_catch && !kEmitCompilerReadBarrier; + case TypeCheckKind::kInterfaceCheck: + return !throws_into_catch && !kEmitCompilerReadBarrier && !kPoisonHeapReferences; + case TypeCheckKind::kArrayCheck: + case TypeCheckKind::kUnresolvedCheck: + return false; + } + LOG(FATAL) << "Unreachable"; + UNREACHABLE(); +} + void LocationsBuilderX86::VisitCheckCast(HCheckCast* instruction) { - LocationSummary::CallKind call_kind = LocationSummary::kNoCall; bool throws_into_catch = instruction->CanThrowIntoCatchBlock(); TypeCheckKind type_check_kind = instruction->GetTypeCheckKind(); - switch (type_check_kind) { - case TypeCheckKind::kExactCheck: - case TypeCheckKind::kAbstractClassCheck: - case TypeCheckKind::kClassHierarchyCheck: - case TypeCheckKind::kArrayObjectCheck: - call_kind = (throws_into_catch || kEmitCompilerReadBarrier) ? - LocationSummary::kCallOnSlowPath : - LocationSummary::kNoCall; // In fact, call on a fatal (non-returning) slow path. - break; - case TypeCheckKind::kArrayCheck: - case TypeCheckKind::kUnresolvedCheck: - case TypeCheckKind::kInterfaceCheck: - call_kind = LocationSummary::kCallOnSlowPath; - break; - } + LocationSummary::CallKind call_kind = + IsTypeCheckSlowPathFatal(type_check_kind, throws_into_catch) + ? LocationSummary::kNoCall + : LocationSummary::kCallOnSlowPath; LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind); locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::Any()); + if (type_check_kind == TypeCheckKind::kInterfaceCheck) { + // Require a register for the interface check since there is a loop that compares the class to + // a memory address. + locations->SetInAt(1, Location::RequiresRegister()); + } else { + locations->SetInAt(1, Location::Any()); + } // Note that TypeCheckSlowPathX86 uses this "temp" register too. locations->AddTemp(Location::RequiresRegister()); - // When read barriers are enabled, we need an additional temporary - // register for some cases. - if (TypeCheckNeedsATemporary(type_check_kind)) { - locations->AddTemp(Location::RequiresRegister()); - } + // When read barriers are enabled, we need an additional temporary register for some cases. + locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind)); } void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) { @@ -6602,20 +6628,22 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) { Location cls = locations->InAt(1); Location temp_loc = locations->GetTemp(0); Register temp = temp_loc.AsRegister<Register>(); - Location maybe_temp2_loc = TypeCheckNeedsATemporary(type_check_kind) ? - locations->GetTemp(1) : - Location::NoLocation(); - uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); - uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); - uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); - uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value(); + const size_t num_temps = NumberOfCheckCastTemps(type_check_kind); + DCHECK_GE(num_temps, 1u); + DCHECK_LE(num_temps, 2u); + Location maybe_temp2_loc = (num_temps >= 2) ? locations->GetTemp(1) : Location::NoLocation(); + const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); + const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); + const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); + const uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value(); + const uint32_t iftable_offset = mirror::Class::IfTableOffset().Uint32Value(); + const uint32_t array_length_offset = mirror::Array::LengthOffset().Uint32Value(); + const uint32_t object_array_data_offset = + mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value(); bool is_type_check_slow_path_fatal = - (type_check_kind == TypeCheckKind::kExactCheck || - type_check_kind == TypeCheckKind::kAbstractClassCheck || - type_check_kind == TypeCheckKind::kClassHierarchyCheck || - type_check_kind == TypeCheckKind::kArrayObjectCheck) && - !instruction->CanThrowIntoCatchBlock(); + IsTypeCheckSlowPathFatal(type_check_kind, instruction->CanThrowIntoCatchBlock()); + SlowPathCode* type_check_slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(instruction, is_type_check_slow_path_fatal); @@ -6628,12 +6656,16 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) { __ j(kEqual, &done); } - // /* HeapReference<Class> */ temp = obj->klass_ - GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset); - switch (type_check_kind) { case TypeCheckKind::kExactCheck: case TypeCheckKind::kArrayCheck: { + // /* HeapReference<Class> */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + kEmitCompilerReadBarrier); + if (cls.IsRegister()) { __ cmpl(temp, cls.AsRegister<Register>()); } else { @@ -6647,28 +6679,26 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) { } case TypeCheckKind::kAbstractClassCheck: { + // /* HeapReference<Class> */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + kEmitCompilerReadBarrier); + // If the class is abstract, we eagerly fetch the super class of the // object to avoid doing a comparison we know will fail. - NearLabel loop, compare_classes; + NearLabel loop; __ Bind(&loop); // /* HeapReference<Class> */ temp = temp->super_class_ GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc); - // If the class reference currently in `temp` is not null, jump - // to the `compare_classes` label to compare it with the checked - // class. + // If the class reference currently in `temp` is null, jump to the slow path to throw the + // exception. __ testl(temp, temp); - __ j(kNotEqual, &compare_classes); - // Otherwise, jump to the slow path to throw the exception. - // - // But before, move back the object's class into `temp` before - // going into the slow path, as it has been overwritten in the - // meantime. - // /* HeapReference<Class> */ temp = obj->klass_ - GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset); - __ jmp(type_check_slow_path->GetEntryLabel()); + __ j(kZero, type_check_slow_path->GetEntryLabel()); - __ Bind(&compare_classes); + // Otherwise, compare the classes if (cls.IsRegister()) { __ cmpl(temp, cls.AsRegister<Register>()); } else { @@ -6680,6 +6710,13 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) { } case TypeCheckKind::kClassHierarchyCheck: { + // /* HeapReference<Class> */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + kEmitCompilerReadBarrier); + // Walk over the class hierarchy to find a match. NearLabel loop; __ Bind(&loop); @@ -6697,21 +6734,21 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) { // If the class reference currently in `temp` is not null, jump // back at the beginning of the loop. __ testl(temp, temp); - __ j(kNotEqual, &loop); - // Otherwise, jump to the slow path to throw the exception. - // - // But before, move back the object's class into `temp` before - // going into the slow path, as it has been overwritten in the - // meantime. - // /* HeapReference<Class> */ temp = obj->klass_ - GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset); + __ j(kNotZero, &loop); + // Otherwise, jump to the slow path to throw the exception.; __ jmp(type_check_slow_path->GetEntryLabel()); break; } case TypeCheckKind::kArrayObjectCheck: { + // /* HeapReference<Class> */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + kEmitCompilerReadBarrier); + // Do an exact check. - NearLabel check_non_primitive_component_type; if (cls.IsRegister()) { __ cmpl(temp, cls.AsRegister<Register>()); } else { @@ -6724,36 +6761,18 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) { // /* HeapReference<Class> */ temp = temp->component_type_ GenerateReferenceLoadOneRegister(instruction, temp_loc, component_offset, maybe_temp2_loc); - // If the component type is not null (i.e. the object is indeed - // an array), jump to label `check_non_primitive_component_type` - // to further check that this component type is not a primitive - // type. + // If the component type is null (i.e. the object not an array), jump to the slow path to + // throw the exception. Otherwise proceed with the check. __ testl(temp, temp); - __ j(kNotEqual, &check_non_primitive_component_type); - // Otherwise, jump to the slow path to throw the exception. - // - // But before, move back the object's class into `temp` before - // going into the slow path, as it has been overwritten in the - // meantime. - // /* HeapReference<Class> */ temp = obj->klass_ - GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset); - __ jmp(type_check_slow_path->GetEntryLabel()); + __ j(kZero, type_check_slow_path->GetEntryLabel()); - __ Bind(&check_non_primitive_component_type); __ cmpw(Address(temp, primitive_offset), Immediate(Primitive::kPrimNot)); - __ j(kEqual, &done); - // Same comment as above regarding `temp` and the slow path. - // /* HeapReference<Class> */ temp = obj->klass_ - GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset); - __ jmp(type_check_slow_path->GetEntryLabel()); + __ j(kNotEqual, type_check_slow_path->GetEntryLabel()); break; } case TypeCheckKind::kUnresolvedCheck: - case TypeCheckKind::kInterfaceCheck: - // We always go into the type check slow path for the unresolved - // and interface check cases. - // + // We always go into the type check slow path for the unresolved check case. // We cannot directly call the CheckCast runtime entry point // without resorting to a type checking slow path here (i.e. by // calling InvokeRuntime directly), as it would require to @@ -6761,15 +6780,59 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) { // instruction (following the runtime calling convention), which // might be cluttered by the potential first read barrier // emission at the beginning of this method. - // - // TODO: Introduce a new runtime entry point taking the object - // to test (instead of its class) as argument, and let it deal - // with the read barrier issues. This will let us refactor this - // case of the `switch` code as it was previously (with a direct - // call to the runtime not using a type checking slow path). - // This should also be beneficial for the other cases above. __ jmp(type_check_slow_path->GetEntryLabel()); break; + + case TypeCheckKind::kInterfaceCheck: { + // Fast path for the interface check. Since we compare with a memory location in the inner + // loop we would need to have cls poisoned. However unpoisoning cls would reset the + // conditional flags and cause the conditional jump to be incorrect. Therefore we just jump + // to the slow path if we are running under poisoning + if (!kPoisonHeapReferences) { + // /* HeapReference<Class> */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + /*emit_read_barrier*/ false); + + // Try to avoid read barriers to improve the fast path. We can not get false positives by + // doing this. + // /* HeapReference<Class> */ temp = obj->klass_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + obj_loc, + class_offset, + /*emit_read_barrier*/ false); + + // /* HeapReference<Class> */ temp = temp->iftable_ + GenerateReferenceLoadTwoRegisters(instruction, + temp_loc, + temp_loc, + iftable_offset, + /*emit_read_barrier*/ false); + NearLabel is_null; + // Null iftable means it is empty. + __ testl(temp, temp); + __ j(kZero, &is_null); + + // Loop through the iftable and check if any class matches. + __ movl(maybe_temp2_loc.AsRegister<Register>(), Address(temp, array_length_offset)); + + NearLabel start_loop; + __ Bind(&start_loop); + __ cmpl(cls.AsRegister<Register>(), Address(temp, object_array_data_offset)); + __ j(kEqual, &done); // Return if same class. + // Go to next interface. + __ addl(temp, Immediate(2 * kHeapReferenceSize)); + __ subl(maybe_temp2_loc.AsRegister<Register>(), Immediate(2)); + __ j(kNotZero, &start_loop); + __ Bind(&is_null); + } + + __ jmp(type_check_slow_path->GetEntryLabel()); + break; + } } __ Bind(&done); @@ -6961,10 +7024,12 @@ void InstructionCodeGeneratorX86::GenerateReferenceLoadOneRegister(HInstruction* void InstructionCodeGeneratorX86::GenerateReferenceLoadTwoRegisters(HInstruction* instruction, Location out, Location obj, - uint32_t offset) { + uint32_t offset, + bool emit_read_barrier) { Register out_reg = out.AsRegister<Register>(); Register obj_reg = obj.AsRegister<Register>(); - if (kEmitCompilerReadBarrier) { + if (emit_read_barrier) { + DCHECK(kEmitCompilerReadBarrier); if (kUseBakerReadBarrier) { // Load with fast path based Baker's read barrier. // /* HeapReference<Object> */ out = *(obj + offset) diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h index 1b51999546..d2249023e1 100644 --- a/compiler/optimizing/code_generator_x86.h +++ b/compiler/optimizing/code_generator_x86.h @@ -254,7 +254,8 @@ class InstructionCodeGeneratorX86 : public InstructionCodeGenerator { void GenerateReferenceLoadTwoRegisters(HInstruction* instruction, Location out, Location obj, - uint32_t offset); + uint32_t offset, + bool emit_read_barrier); // Generate a GC root reference load: // // root <- *address diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index a3327e261c..524cd5b21d 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -332,8 +332,14 @@ class TypeCheckSlowPathX86_64 : public SlowPathCode { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); - Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) - : locations->Out(); + Location arg0, arg1; + if (instruction_->IsInstanceOf()) { + arg0 = locations->InAt(1); + arg1 = locations->Out(); + } else { + arg0 = locations->InAt(0); + arg1 = locations->InAt(1); + } uint32_t dex_pc = instruction_->GetDexPc(); DCHECK(instruction_->IsCheckCast() || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg())); @@ -348,22 +354,19 @@ class TypeCheckSlowPathX86_64 : public SlowPathCode { // We're moving two locations to locations that could overlap, so we need a parallel // move resolver. InvokeRuntimeCallingConvention calling_convention; - codegen->EmitParallelMoves( - locations->InAt(1), - Location::RegisterLocation(calling_convention.GetRegisterAt(0)), - Primitive::kPrimNot, - object_class, - Location::RegisterLocation(calling_convention.GetRegisterAt(1)), - Primitive::kPrimNot); - + codegen->EmitParallelMoves(arg0, + Location::RegisterLocation(calling_convention.GetRegisterAt(0)), + Primitive::kPrimNot, + arg1, + Location::RegisterLocation(calling_convention.GetRegisterAt(1)), + Primitive::kPrimNot); if (instruction_->IsInstanceOf()) { x86_64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this); - CheckEntrypointTypes< - kQuickInstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*>(); + CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*>(); } else { DCHECK(instruction_->IsCheckCast()); - x86_64_codegen->InvokeRuntime(kQuickCheckCast, instruction_, dex_pc, this); - CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>(); + x86_64_codegen->InvokeRuntime(kQuickCheckInstanceOf, instruction_, dex_pc, this); + CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>(); } if (!is_fatal_) { @@ -5628,9 +5631,6 @@ HLoadString::LoadKind CodeGeneratorX86_64::GetSupportedLoadStringKind( break; case HLoadString::LoadKind::kBootImageAddress: break; - case HLoadString::LoadKind::kDexCacheAddress: - DCHECK(Runtime::Current()->UseJitCompilation()); - break; case HLoadString::LoadKind::kBssEntry: DCHECK(!Runtime::Current()->UseJitCompilation()); break; @@ -5990,7 +5990,7 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) { } } -bool IsTypeCheckSlowPathFatal(TypeCheckKind type_check_kind, bool throws_into_catch) { +static bool IsTypeCheckSlowPathFatal(TypeCheckKind type_check_kind, bool throws_into_catch) { switch (type_check_kind) { case TypeCheckKind::kExactCheck: case TypeCheckKind::kAbstractClassCheck: @@ -6050,7 +6050,8 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) { const uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value(); const uint32_t iftable_offset = mirror::Class::IfTableOffset().Uint32Value(); const uint32_t array_length_offset = mirror::Array::LengthOffset().Uint32Value(); - const int object_array_data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value(); + const uint32_t object_array_data_offset = + mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value(); bool is_type_check_slow_path_fatal = IsTypeCheckSlowPathFatal(type_check_kind, instruction->CanThrowIntoCatchBlock()); @@ -6104,30 +6105,16 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) { kEmitCompilerReadBarrier); // If the class is abstract, we eagerly fetch the super class of the // object to avoid doing a comparison we know will fail. - NearLabel loop, compare_classes; + NearLabel loop; __ Bind(&loop); // /* HeapReference<Class> */ temp = temp->super_class_ GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc); - // If the class reference currently in `temp` is not null, jump - // to the `compare_classes` label to compare it with the checked - // class. + // If the class reference currently in `temp` is null, jump to the slow path to throw the + // exception. __ testl(temp, temp); - __ j(kNotEqual, &compare_classes); - // Otherwise, jump to the slow path to throw the exception. - // - // But before, move back the object's class into `temp` before - // going into the slow path, as it has been overwritten in the - // meantime. - // /* HeapReference<Class> */ temp = obj->klass_ - GenerateReferenceLoadTwoRegisters(instruction, - temp_loc, - obj_loc, - class_offset, - kEmitCompilerReadBarrier); - __ jmp(type_check_slow_path->GetEntryLabel()); - - __ Bind(&compare_classes); + // Otherwise, compare the classes. + __ j(kZero, type_check_slow_path->GetEntryLabel()); if (cls.IsRegister()) { __ cmpl(temp, cls.AsRegister<CpuRegister>()); } else { @@ -6170,18 +6157,8 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) { // If the class reference currently in `temp` is not null, jump // back at the beginning of the loop. __ testl(temp, temp); - __ j(kNotEqual, &loop); + __ j(kNotZero, &loop); // Otherwise, jump to the slow path to throw the exception. - // - // But before, move back the object's class into `temp` before - // going into the slow path, as it has been overwritten in the - // meantime. - // /* HeapReference<Class> */ temp = obj->klass_ - GenerateReferenceLoadTwoRegisters(instruction, - temp_loc, - obj_loc, - class_offset, - kEmitCompilerReadBarrier); __ jmp(type_check_slow_path->GetEntryLabel()); __ Bind(&done); break; @@ -6224,36 +6201,36 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) { // to further check that this component type is not a primitive // type. __ testl(temp, temp); - __ j(kNotEqual, &check_non_primitive_component_type); // Otherwise, jump to the slow path to throw the exception. + __ j(kZero, type_check_slow_path->GetEntryLabel()); + __ cmpw(Address(temp, primitive_offset), Immediate(Primitive::kPrimNot)); + __ j(kNotEqual, type_check_slow_path->GetEntryLabel()); + __ Bind(&done); + break; + } + + case TypeCheckKind::kUnresolvedCheck: { + // We always go into the type check slow path for the unresolved case. // - // But before, move back the object's class into `temp` before - // going into the slow path, as it has been overwritten in the - // meantime. - // /* HeapReference<Class> */ temp = obj->klass_ - GenerateReferenceLoadTwoRegisters(instruction, - temp_loc, - obj_loc, - class_offset, - kEmitCompilerReadBarrier); - __ jmp(type_check_slow_path->GetEntryLabel()); + // We cannot directly call the CheckCast runtime entry point + // without resorting to a type checking slow path here (i.e. by + // calling InvokeRuntime directly), as it would require to + // assign fixed registers for the inputs of this HInstanceOf + // instruction (following the runtime calling convention), which + // might be cluttered by the potential first read barrier + // emission at the beginning of this method. - __ Bind(&check_non_primitive_component_type); - __ cmpw(Address(temp, primitive_offset), Immediate(Primitive::kPrimNot)); - __ j(kEqual, &done); - // Same comment as above regarding `temp` and the slow path. - // /* HeapReference<Class> */ temp = obj->klass_ - GenerateReferenceLoadTwoRegisters(instruction, - temp_loc, - obj_loc, - class_offset, - kEmitCompilerReadBarrier); + NearLabel done; + // Avoid null check if we know obj is not null. + if (instruction->MustDoNullCheck()) { + __ testl(obj, obj); + __ j(kEqual, &done); + } __ jmp(type_check_slow_path->GetEntryLabel()); __ Bind(&done); break; } - case TypeCheckKind::kUnresolvedCheck: case TypeCheckKind::kInterfaceCheck: NearLabel done; @@ -6263,28 +6240,9 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) { __ j(kEqual, &done); } - - // We always go into the type check slow path for the unresolved case. - // - // We cannot directly call the CheckCast runtime entry point - // without resorting to a type checking slow path here (i.e. by - // calling InvokeRuntime directly), as it would require to - // assign fixed registers for the inputs of this HInstanceOf - // instruction (following the runtime calling convention), which - // might be cluttered by the potential first read barrier - // emission at the beginning of this method. - // - // TODO: Introduce a new runtime entry point taking the object - // to test (instead of its class) as argument, and let it deal - // with the read barrier issues. This will let us refactor this - // case of the `switch` code as it was previously (with a direct - // call to the runtime not using a type checking slow path). - // This should also be beneficial for the other cases above. - - // Fast path for the interface check. Since we compare with a memory location in the inner - // loop we would need to have cls poisoned. However unpoisoning cls would reset the - // conditional flags and cause the conditional jump to be incorrect. - if (type_check_kind == TypeCheckKind::kInterfaceCheck && !kPoisonHeapReferences) { + // Fast path for the interface check. We always go slow path for heap poisoning since + // unpoisoning cls would require an extra temp. + if (!kPoisonHeapReferences) { // Try to avoid read barriers to improve the fast path. We can not get false positives by // doing this. // /* HeapReference<Class> */ temp = obj->klass_ @@ -6302,31 +6260,22 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) { /*emit_read_barrier*/ false); NearLabel is_null; // Null iftable means it is empty. - __ testl(temp_loc.AsRegister<CpuRegister>(), temp_loc.AsRegister<CpuRegister>()); + __ testl(temp, temp); __ j(kZero, &is_null); // Loop through the iftable and check if any class matches. - __ movl(maybe_temp2_loc.AsRegister<CpuRegister>(), - Address(temp_loc.AsRegister<CpuRegister>(), array_length_offset)); + __ movl(maybe_temp2_loc.AsRegister<CpuRegister>(), Address(temp, array_length_offset)); NearLabel start_loop; __ Bind(&start_loop); - __ cmpl(cls.AsRegister<CpuRegister>(), - Address(temp_loc.AsRegister<CpuRegister>(), object_array_data_offset)); + __ cmpl(cls.AsRegister<CpuRegister>(), Address(temp, object_array_data_offset)); __ j(kEqual, &done); // Return if same class. // Go to next interface. - __ addq(temp_loc.AsRegister<CpuRegister>(), Immediate(2 * kHeapReferenceSize)); - __ subq(maybe_temp2_loc.AsRegister<CpuRegister>(), Immediate(2)); + __ addl(temp, Immediate(2 * kHeapReferenceSize)); + __ subl(maybe_temp2_loc.AsRegister<CpuRegister>(), Immediate(2)); __ j(kNotZero, &start_loop); __ Bind(&is_null); } - - // Since we clobbered temp_loc holding the class, we need to reload it. - GenerateReferenceLoadTwoRegisters(instruction, - temp_loc, - obj_loc, - class_offset, - kEmitCompilerReadBarrier); __ jmp(type_check_slow_path->GetEntryLabel()); __ Bind(&done); break; diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc index 9ec32df578..ac83bd9b0c 100644 --- a/compiler/optimizing/codegen_test.cc +++ b/compiler/optimizing/codegen_test.cc @@ -259,7 +259,7 @@ static void ValidateGraph(HGraph* graph) { GraphChecker graph_checker(graph); graph_checker.Run(); if (!graph_checker.IsValid()) { - for (auto error : graph_checker.GetErrors()) { + for (const auto& error : graph_checker.GetErrors()) { std::cout << error << std::endl; } } @@ -269,7 +269,7 @@ static void ValidateGraph(HGraph* graph) { template <typename Expected> static void RunCodeNoCheck(CodeGenerator* codegen, HGraph* graph, - std::function<void(HGraph*)> hook_before_codegen, + const std::function<void(HGraph*)>& hook_before_codegen, bool has_result, Expected expected) { SsaLivenessAnalysis liveness(graph, codegen); diff --git a/compiler/optimizing/constant_folding.h b/compiler/optimizing/constant_folding.h index e10b1d6b2e..05c6df4a93 100644 --- a/compiler/optimizing/constant_folding.h +++ b/compiler/optimizing/constant_folding.h @@ -39,8 +39,7 @@ namespace art { */ class HConstantFolding : public HOptimization { public: - HConstantFolding(HGraph* graph, const char* name = kConstantFoldingPassName) - : HOptimization(graph, name) {} + HConstantFolding(HGraph* graph, const char* name) : HOptimization(graph, name) {} void Run() OVERRIDE; diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc index d1a2a2649a..5fac3acb8a 100644 --- a/compiler/optimizing/constant_folding_test.cc +++ b/compiler/optimizing/constant_folding_test.cc @@ -42,7 +42,7 @@ class ConstantFoldingTest : public CommonCompilerTest { const std::string& expected_before, const std::string& expected_after_cf, const std::string& expected_after_dce, - std::function<void(HGraph*)> check_after_cf, + const std::function<void(HGraph*)>& check_after_cf, Primitive::Type return_type = Primitive::kPrimInt) { graph_ = CreateCFG(&allocator_, data, return_type); TestCodeOnReadyGraph(expected_before, @@ -54,7 +54,7 @@ class ConstantFoldingTest : public CommonCompilerTest { void TestCodeOnReadyGraph(const std::string& expected_before, const std::string& expected_after_cf, const std::string& expected_after_dce, - std::function<void(HGraph*)> check_after_cf) { + const std::function<void(HGraph*)>& check_after_cf) { ASSERT_NE(graph_, nullptr); StringPrettyPrinter printer_before(graph_); @@ -65,7 +65,7 @@ class ConstantFoldingTest : public CommonCompilerTest { std::unique_ptr<const X86InstructionSetFeatures> features_x86( X86InstructionSetFeatures::FromCppDefines()); x86::CodeGeneratorX86 codegenX86(graph_, *features_x86.get(), CompilerOptions()); - HConstantFolding(graph_).Run(); + HConstantFolding(graph_, "constant_folding").Run(); GraphChecker graph_checker_cf(graph_); graph_checker_cf.Run(); ASSERT_TRUE(graph_checker_cf.IsValid()); @@ -77,7 +77,7 @@ class ConstantFoldingTest : public CommonCompilerTest { check_after_cf(graph_); - HDeadCodeElimination(graph_).Run(); + HDeadCodeElimination(graph_, nullptr /* stats */, "dead_code_elimination").Run(); GraphChecker graph_checker_dce(graph_); graph_checker_dce.Run(); ASSERT_TRUE(graph_checker_dce.IsValid()); diff --git a/compiler/optimizing/dead_code_elimination.h b/compiler/optimizing/dead_code_elimination.h index 58e700deba..84fd890eee 100644 --- a/compiler/optimizing/dead_code_elimination.h +++ b/compiler/optimizing/dead_code_elimination.h @@ -29,9 +29,7 @@ namespace art { */ class HDeadCodeElimination : public HOptimization { public: - HDeadCodeElimination(HGraph* graph, - OptimizingCompilerStats* stats = nullptr, - const char* name = kDeadCodeEliminationPassName) + HDeadCodeElimination(HGraph* graph, OptimizingCompilerStats* stats, const char* name) : HOptimization(graph, name, stats) {} void Run() OVERRIDE; diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc index fe52aacef7..fdd77e7261 100644 --- a/compiler/optimizing/dead_code_elimination_test.cc +++ b/compiler/optimizing/dead_code_elimination_test.cc @@ -44,7 +44,7 @@ static void TestCode(const uint16_t* data, std::unique_ptr<const X86InstructionSetFeatures> features_x86( X86InstructionSetFeatures::FromCppDefines()); x86::CodeGeneratorX86 codegenX86(graph, *features_x86.get(), CompilerOptions()); - HDeadCodeElimination(graph).Run(); + HDeadCodeElimination(graph, nullptr /* stats */, "dead_code_elimination").Run(); GraphChecker graph_checker(graph); graph_checker.Run(); ASSERT_TRUE(graph_checker.IsValid()); diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index cc420b3260..9e816237dd 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -1315,8 +1315,8 @@ size_t HInliner::RunOptimizations(HGraph* callee_graph, const DexCompilationUnit& dex_compilation_unit) { // Note: if the outermost_graph_ is being compiled OSR, we should not run any // optimization that could lead to a HDeoptimize. The following optimizations do not. - HDeadCodeElimination dce(callee_graph, stats_); - HConstantFolding fold(callee_graph); + HDeadCodeElimination dce(callee_graph, stats_, "dead_code_elimination$inliner"); + HConstantFolding fold(callee_graph, "constant_folding$inliner"); HSharpening sharpening(callee_graph, codegen_, dex_compilation_unit, compiler_driver_); InstructionSimplifier simplify(callee_graph, stats_); IntrinsicsRecognizer intrinsics(callee_graph, stats_); diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc index e06fdee370..85b461dcf6 100644 --- a/compiler/optimizing/instruction_simplifier.cc +++ b/compiler/optimizing/instruction_simplifier.cc @@ -106,6 +106,7 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor { void SimplifyFP2Int(HInvoke* invoke); void SimplifyStringCharAt(HInvoke* invoke); void SimplifyStringIsEmptyOrLength(HInvoke* invoke); + void SimplifyNPEOnArgN(HInvoke* invoke, size_t); void SimplifyMemBarrier(HInvoke* invoke, MemBarrierKind barrier_kind); OptimizingCompilerStats* stats_; @@ -1858,6 +1859,16 @@ void InstructionSimplifierVisitor::SimplifyStringIsEmptyOrLength(HInvoke* invoke invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, replacement); } +// This method should only be used on intrinsics whose sole way of throwing an +// exception is raising a NPE when the nth argument is null. If that argument +// is provably non-null, we can clear the flag. +void InstructionSimplifierVisitor::SimplifyNPEOnArgN(HInvoke* invoke, size_t n) { + HInstruction* arg = invoke->InputAt(n); + if (!arg->CanBeNull()) { + invoke->SetCanThrow(false); + } +} + void InstructionSimplifierVisitor::SimplifyMemBarrier(HInvoke* invoke, MemBarrierKind barrier_kind) { uint32_t dex_pc = invoke->GetDexPc(); HMemoryBarrier* mem_barrier = new (GetGraph()->GetArena()) HMemoryBarrier(barrier_kind, dex_pc); @@ -1911,6 +1922,10 @@ void InstructionSimplifierVisitor::VisitInvoke(HInvoke* instruction) { case Intrinsics::kStringLength: SimplifyStringIsEmptyOrLength(instruction); break; + case Intrinsics::kStringStringIndexOf: + case Intrinsics::kStringStringIndexOfAfter: + SimplifyNPEOnArgN(instruction, 1); // 0th has own NullCheck + break; case Intrinsics::kUnsafeLoadFence: SimplifyMemBarrier(instruction, MemBarrierKind::kLoadAny); break; diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc index 3e6b0afb3f..8234b2467d 100644 --- a/compiler/optimizing/intrinsics_arm.cc +++ b/compiler/optimizing/intrinsics_arm.cc @@ -2611,6 +2611,9 @@ UNIMPLEMENTED_INTRINSIC(ARM, LongHighestOneBit) UNIMPLEMENTED_INTRINSIC(ARM, IntegerLowestOneBit) UNIMPLEMENTED_INTRINSIC(ARM, LongLowestOneBit) +UNIMPLEMENTED_INTRINSIC(ARM, StringStringIndexOf); +UNIMPLEMENTED_INTRINSIC(ARM, StringStringIndexOfAfter); + // 1.8. UNIMPLEMENTED_INTRINSIC(ARM, UnsafeGetAndAddInt) UNIMPLEMENTED_INTRINSIC(ARM, UnsafeGetAndAddLong) diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 9b17931d74..451abc56d3 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -2779,6 +2779,9 @@ UNIMPLEMENTED_INTRINSIC(ARM64, LongHighestOneBit) UNIMPLEMENTED_INTRINSIC(ARM64, IntegerLowestOneBit) UNIMPLEMENTED_INTRINSIC(ARM64, LongLowestOneBit) +UNIMPLEMENTED_INTRINSIC(ARM64, StringStringIndexOf); +UNIMPLEMENTED_INTRINSIC(ARM64, StringStringIndexOfAfter); + // 1.8. UNIMPLEMENTED_INTRINSIC(ARM64, UnsafeGetAndAddInt) UNIMPLEMENTED_INTRINSIC(ARM64, UnsafeGetAndAddLong) diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc index 1450b5a490..e4bef3446c 100644 --- a/compiler/optimizing/intrinsics_arm_vixl.cc +++ b/compiler/optimizing/intrinsics_arm_vixl.cc @@ -2699,6 +2699,9 @@ UNIMPLEMENTED_INTRINSIC(ARMVIXL, LongHighestOneBit) UNIMPLEMENTED_INTRINSIC(ARMVIXL, IntegerLowestOneBit) UNIMPLEMENTED_INTRINSIC(ARMVIXL, LongLowestOneBit) +UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOf); +UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOfAfter); + // 1.8. UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeGetAndAddInt) UNIMPLEMENTED_INTRINSIC(ARMVIXL, UnsafeGetAndAddLong) diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc index 5239f8f020..7c81588cda 100644 --- a/compiler/optimizing/intrinsics_mips.cc +++ b/compiler/optimizing/intrinsics_mips.cc @@ -2495,6 +2495,9 @@ UNIMPLEMENTED_INTRINSIC(MIPS, MathSinh) UNIMPLEMENTED_INTRINSIC(MIPS, MathTan) UNIMPLEMENTED_INTRINSIC(MIPS, MathTanh) +UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOf); +UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOfAfter); + // 1.8. UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndAddInt) UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndAddLong) diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc index 1d153e2e18..2d4f417b14 100644 --- a/compiler/optimizing/intrinsics_mips64.cc +++ b/compiler/optimizing/intrinsics_mips64.cc @@ -1947,6 +1947,9 @@ UNIMPLEMENTED_INTRINSIC(MIPS64, MathSinh) UNIMPLEMENTED_INTRINSIC(MIPS64, MathTan) UNIMPLEMENTED_INTRINSIC(MIPS64, MathTanh) +UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOf); +UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOfAfter); + // 1.8. UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndAddInt) UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndAddLong) diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index 80b4df62a9..06ab46f536 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -3329,6 +3329,9 @@ UNIMPLEMENTED_INTRINSIC(X86, LongHighestOneBit) UNIMPLEMENTED_INTRINSIC(X86, IntegerLowestOneBit) UNIMPLEMENTED_INTRINSIC(X86, LongLowestOneBit) +UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOf); +UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOfAfter); + // 1.8. UNIMPLEMENTED_INTRINSIC(X86, UnsafeGetAndAddInt) UNIMPLEMENTED_INTRINSIC(X86, UnsafeGetAndAddLong) diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index 7ecf4ba6c4..2ea8670100 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -2998,6 +2998,9 @@ void IntrinsicCodeGeneratorX86_64::VisitReferenceGetReferent(HInvoke* invoke) { UNIMPLEMENTED_INTRINSIC(X86_64, FloatIsInfinite) UNIMPLEMENTED_INTRINSIC(X86_64, DoubleIsInfinite) +UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOf); +UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOfAfter); + // 1.8. UNIMPLEMENTED_INTRINSIC(X86_64, UnsafeGetAndAddInt) UNIMPLEMENTED_INTRINSIC(X86_64, UnsafeGetAndAddLong) diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc index 5b2cbf783d..15e605971e 100644 --- a/compiler/optimizing/load_store_elimination.cc +++ b/compiler/optimizing/load_store_elimination.cc @@ -33,11 +33,11 @@ class ReferenceInfo : public ArenaObject<kArenaAllocMisc> { public: ReferenceInfo(HInstruction* reference, size_t pos) : reference_(reference), position_(pos) { is_singleton_ = true; - is_singleton_and_not_returned_ = true; + is_singleton_and_non_escaping_ = true; if (!reference_->IsNewInstance() && !reference_->IsNewArray()) { // For references not allocated in the method, don't assume anything. is_singleton_ = false; - is_singleton_and_not_returned_ = false; + is_singleton_and_non_escaping_ = false; return; } @@ -50,7 +50,7 @@ class ReferenceInfo : public ArenaObject<kArenaAllocMisc> { // BoundType shouldn't normally be necessary for a NewInstance. // Just be conservative for the uncommon cases. is_singleton_ = false; - is_singleton_and_not_returned_ = false; + is_singleton_and_non_escaping_ = false; return; } if (user->IsPhi() || user->IsSelect() || user->IsInvoke() || @@ -62,21 +62,37 @@ class ReferenceInfo : public ArenaObject<kArenaAllocMisc> { // reference_ is merged to HPhi/HSelect, passed to a callee, or stored to heap. // reference_ isn't the only name that can refer to its value anymore. is_singleton_ = false; - is_singleton_and_not_returned_ = false; + is_singleton_and_non_escaping_ = false; return; } if ((user->IsUnresolvedInstanceFieldGet() && (reference_ == user->InputAt(0))) || (user->IsUnresolvedInstanceFieldSet() && (reference_ == user->InputAt(0)))) { - // The field is accessed in an unresolved way. We mark the object as a singleton to - // disable load/store optimizations on it. + // The field is accessed in an unresolved way. We mark the object as a non-singleton + // to disable load/store optimizations on it. // Note that we could optimize this case and still perform some optimizations until // we hit the unresolved access, but disabling is the simplest. is_singleton_ = false; - is_singleton_and_not_returned_ = false; + is_singleton_and_non_escaping_ = false; return; } if (user->IsReturn()) { - is_singleton_and_not_returned_ = false; + is_singleton_and_non_escaping_ = false; + } + } + + if (!is_singleton_ || !is_singleton_and_non_escaping_) { + return; + } + + // Look at Environment uses and if it's for HDeoptimize, it's treated the same + // as a return which escapes at the end of executing the compiled code. We don't + // do store elimination for singletons that escape through HDeoptimize. + // Other Environment uses are fine since LSE is disabled for debuggable. + for (const HUseListNode<HEnvironment*>& use : reference_->GetEnvUses()) { + HEnvironment* user = use.GetUser(); + if (user->GetHolder()->IsDeoptimize()) { + is_singleton_and_non_escaping_ = false; + break; } } } @@ -96,17 +112,22 @@ class ReferenceInfo : public ArenaObject<kArenaAllocMisc> { return is_singleton_; } - // Returns true if reference_ is a singleton and not returned to the caller. + // Returns true if reference_ is a singleton and not returned to the caller or + // used as an environment local of an HDeoptimize instruction. // The allocation and stores into reference_ may be eliminated for such cases. - bool IsSingletonAndNotReturned() const { - return is_singleton_and_not_returned_; + bool IsSingletonAndNonEscaping() const { + return is_singleton_and_non_escaping_; } private: HInstruction* const reference_; const size_t position_; // position in HeapLocationCollector's ref_info_array_. bool is_singleton_; // can only be referred to by a single name in the method. - bool is_singleton_and_not_returned_; // reference_ is singleton and not returned to caller. + + // reference_ is singleton and does not escape in the end either by + // returning to the caller, or being used as an environment local of an + // HDeoptimize instruction. + bool is_singleton_and_non_escaping_; DISALLOW_COPY_AND_ASSIGN(ReferenceInfo); }; @@ -202,8 +223,7 @@ class HeapLocationCollector : public HGraphVisitor { kArenaAllocLSE), has_heap_stores_(false), has_volatile_(false), - has_monitor_operations_(false), - may_deoptimize_(false) {} + has_monitor_operations_(false) {} size_t GetNumberOfHeapLocations() const { return heap_locations_.size(); @@ -236,13 +256,6 @@ class HeapLocationCollector : public HGraphVisitor { return has_monitor_operations_; } - // Returns whether this method may be deoptimized. - // Currently we don't have meta data support for deoptimizing - // a method that eliminates allocations/stores. - bool MayDeoptimize() const { - return may_deoptimize_; - } - // Find and return the heap location index in heap_locations_. size_t FindHeapLocationIndex(ReferenceInfo* ref_info, size_t offset, @@ -493,10 +506,6 @@ class HeapLocationCollector : public HGraphVisitor { CreateReferenceInfoForReferenceType(instruction); } - void VisitDeoptimize(HDeoptimize* instruction ATTRIBUTE_UNUSED) OVERRIDE { - may_deoptimize_ = true; - } - void VisitMonitorOperation(HMonitorOperation* monitor ATTRIBUTE_UNUSED) OVERRIDE { has_monitor_operations_ = true; } @@ -508,7 +517,6 @@ class HeapLocationCollector : public HGraphVisitor { // alias analysis and won't be as effective. bool has_volatile_; // If there are volatile field accesses. bool has_monitor_operations_; // If there are monitor operations. - bool may_deoptimize_; // Only true for HDeoptimize with single-frame deoptimization. DISALLOW_COPY_AND_ASSIGN(HeapLocationCollector); }; @@ -671,7 +679,7 @@ class LSEVisitor : public HGraphVisitor { bool from_all_predecessors = true; ReferenceInfo* ref_info = heap_location_collector_.GetHeapLocation(i)->GetReferenceInfo(); HInstruction* singleton_ref = nullptr; - if (ref_info->IsSingletonAndNotReturned()) { + if (ref_info->IsSingletonAndNonEscaping()) { // We do more analysis of liveness when merging heap values for such // cases since stores into such references may potentially be eliminated. singleton_ref = ref_info->GetReference(); @@ -844,8 +852,7 @@ class LSEVisitor : public HGraphVisitor { } else if (index != nullptr) { // For array element, don't eliminate stores since it can be easily aliased // with non-constant index. - } else if (!heap_location_collector_.MayDeoptimize() && - ref_info->IsSingletonAndNotReturned()) { + } else if (ref_info->IsSingletonAndNonEscaping()) { // Store into a field of a singleton that's not returned. The value cannot be // killed due to aliasing/invocation. It can be redundant since future loads can // directly get the value set by this instruction. The value can still be killed due to @@ -1019,8 +1026,7 @@ class LSEVisitor : public HGraphVisitor { // new_instance isn't used for field accesses. No need to process it. return; } - if (!heap_location_collector_.MayDeoptimize() && - ref_info->IsSingletonAndNotReturned() && + if (ref_info->IsSingletonAndNonEscaping() && !new_instance->IsFinalizable() && !new_instance->NeedsAccessCheck()) { singleton_new_instances_.push_back(new_instance); diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h index 52747c0cc4..091b58a63d 100644 --- a/compiler/optimizing/locations.h +++ b/compiler/optimizing/locations.h @@ -525,6 +525,12 @@ class LocationSummary : public ArenaObject<kArenaAllocLocationSummary> { temps_.push_back(location); } + void AddRegisterTemps(size_t count) { + for (size_t i = 0; i < count; ++i) { + AddTemp(Location::RequiresRegister()); + } + } + Location GetTemp(uint32_t at) const { return temps_[at]; } diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index 45c7eb1a46..91553226c6 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -1432,10 +1432,10 @@ HBasicBlock* HBasicBlock::SplitBefore(HInstruction* cursor) { AddInstruction(new (GetGraph()->GetArena()) HGoto(new_block->GetDexPc())); for (HBasicBlock* successor : GetSuccessors()) { - new_block->successors_.push_back(successor); successor->predecessors_[successor->GetPredecessorIndexOf(this)] = new_block; } - successors_.clear(); + new_block->successors_.swap(successors_); + DCHECK(successors_.empty()); AddSuccessor(new_block); GetGraph()->AddBlock(new_block); @@ -1449,10 +1449,10 @@ HBasicBlock* HBasicBlock::CreateImmediateDominator() { HBasicBlock* new_block = new (GetGraph()->GetArena()) HBasicBlock(GetGraph(), GetDexPc()); for (HBasicBlock* predecessor : GetPredecessors()) { - new_block->predecessors_.push_back(predecessor); predecessor->successors_[predecessor->GetSuccessorIndexOf(this)] = new_block; } - predecessors_.clear(); + new_block->predecessors_.swap(predecessors_); + DCHECK(predecessors_.empty()); AddPredecessor(new_block); GetGraph()->AddBlock(new_block); @@ -1477,16 +1477,16 @@ HBasicBlock* HBasicBlock::SplitBeforeForInlining(HInstruction* cursor) { new_block->instructions_.SetBlockOfInstructions(new_block); for (HBasicBlock* successor : GetSuccessors()) { - new_block->successors_.push_back(successor); successor->predecessors_[successor->GetPredecessorIndexOf(this)] = new_block; } - successors_.clear(); + new_block->successors_.swap(successors_); + DCHECK(successors_.empty()); for (HBasicBlock* dominated : GetDominatedBlocks()) { dominated->dominator_ = new_block; - new_block->dominated_blocks_.push_back(dominated); } - dominated_blocks_.clear(); + new_block->dominated_blocks_.swap(dominated_blocks_); + DCHECK(dominated_blocks_.empty()); return new_block; } @@ -1504,16 +1504,16 @@ HBasicBlock* HBasicBlock::SplitAfterForInlining(HInstruction* cursor) { new_block->instructions_.SetBlockOfInstructions(new_block); for (HBasicBlock* successor : GetSuccessors()) { - new_block->successors_.push_back(successor); successor->predecessors_[successor->GetPredecessorIndexOf(this)] = new_block; } - successors_.clear(); + new_block->successors_.swap(successors_); + DCHECK(successors_.empty()); for (HBasicBlock* dominated : GetDominatedBlocks()) { dominated->dominator_ = new_block; - new_block->dominated_blocks_.push_back(dominated); } - dominated_blocks_.clear(); + new_block->dominated_blocks_.swap(dominated_blocks_); + DCHECK(dominated_blocks_.empty()); return new_block; } @@ -1852,17 +1852,19 @@ void HBasicBlock::MergeWith(HBasicBlock* other) { // Update links to the successors of `other`. successors_.clear(); - while (!other->successors_.empty()) { - HBasicBlock* successor = other->GetSuccessors()[0]; - successor->ReplacePredecessor(other, this); + for (HBasicBlock* successor : other->GetSuccessors()) { + successor->predecessors_[successor->GetPredecessorIndexOf(other)] = this; } + successors_.swap(other->successors_); + DCHECK(other->successors_.empty()); // Update the dominator tree. RemoveDominatedBlock(other); for (HBasicBlock* dominated : other->GetDominatedBlocks()) { - dominated_blocks_.push_back(dominated); dominated->SetDominator(this); } + dominated_blocks_.insert( + dominated_blocks_.end(), other->dominated_blocks_.begin(), other->dominated_blocks_.end()); other->dominated_blocks_.clear(); other->dominator_ = nullptr; @@ -1889,16 +1891,18 @@ void HBasicBlock::MergeWithInlined(HBasicBlock* other) { // Update links to the successors of `other`. successors_.clear(); - while (!other->successors_.empty()) { - HBasicBlock* successor = other->GetSuccessors()[0]; - successor->ReplacePredecessor(other, this); + for (HBasicBlock* successor : other->GetSuccessors()) { + successor->predecessors_[successor->GetPredecessorIndexOf(other)] = this; } + successors_.swap(other->successors_); + DCHECK(other->successors_.empty()); // Update the dominator tree. for (HBasicBlock* dominated : other->GetDominatedBlocks()) { - dominated_blocks_.push_back(dominated); dominated->SetDominator(this); } + dominated_blocks_.insert( + dominated_blocks_.end(), other->dominated_blocks_.begin(), other->dominated_blocks_.end()); other->dominated_blocks_.clear(); other->dominator_ = nullptr; other->graph_ = nullptr; @@ -2521,8 +2525,6 @@ std::ostream& operator<<(std::ostream& os, HLoadString::LoadKind rhs) { return os << "BootImageLinkTimePcRelative"; case HLoadString::LoadKind::kBootImageAddress: return os << "BootImageAddress"; - case HLoadString::LoadKind::kDexCacheAddress: - return os << "DexCacheAddress"; case HLoadString::LoadKind::kBssEntry: return os << "BssEntry"; case HLoadString::LoadKind::kDexCacheViaMethod: diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index ce2edde1c1..883ac65c9c 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -1956,7 +1956,7 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> { bool IsRemovable() const { return - !HasSideEffects() && + !DoesAnyWrite() && !CanThrow() && !IsSuspendCheck() && !IsControlFlow() && @@ -3782,6 +3782,8 @@ class HInvoke : public HInstruction { return GetEnvironment()->IsFromInlinedInvoke(); } + void SetCanThrow(bool can_throw) { SetPackedFlag<kFlagCanThrow>(can_throw); } + bool CanThrow() const OVERRIDE { return GetPackedFlag<kFlagCanThrow>(); } bool CanBeMoved() const OVERRIDE { return IsIntrinsic(); } @@ -3840,8 +3842,6 @@ class HInvoke : public HInstruction { SetPackedFlag<kFlagCanThrow>(true); } - void SetCanThrow(bool can_throw) { SetPackedFlag<kFlagCanThrow>(can_throw); } - uint32_t number_of_arguments_; ArtMethod* const resolved_method_; ArenaVector<HUserRecord<HInstruction*>> inputs_; @@ -5679,10 +5679,6 @@ class HLoadString FINAL : public HInstruction { // GetIncludePatchInformation(). kBootImageAddress, - // Load from the resolved strings array at an absolute address. - // Used for strings outside the boot image referenced by JIT-compiled code. - kDexCacheAddress, - // Load from an entry in the .bss section using a PC-relative load. // Used for strings outside boot image when .bss is accessible with a PC-relative load. kBssEntry, @@ -5807,7 +5803,7 @@ class HLoadString FINAL : public HInstruction { } static bool HasAddress(LoadKind load_kind) { - return load_kind == LoadKind::kBootImageAddress || load_kind == LoadKind::kDexCacheAddress; + return load_kind == LoadKind::kBootImageAddress; } void SetLoadKindInternal(LoadKind load_kind); diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index a4847601f5..6f84cdcc4f 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -758,7 +758,7 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph, graph, stats, "dead_code_elimination$after_inlining"); HDeadCodeElimination* dce3 = new (arena) HDeadCodeElimination( graph, stats, "dead_code_elimination$final"); - HConstantFolding* fold1 = new (arena) HConstantFolding(graph); + HConstantFolding* fold1 = new (arena) HConstantFolding(graph, "constant_folding"); InstructionSimplifier* simplify1 = new (arena) InstructionSimplifier(graph, stats); HSelectGenerator* select_generator = new (arena) HSelectGenerator(graph, stats); HConstantFolding* fold2 = new (arena) HConstantFolding( diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc index fd1db592bb..63e4ca674e 100644 --- a/compiler/optimizing/sharpening.cc +++ b/compiler/optimizing/sharpening.cc @@ -325,7 +325,6 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) { load_string->SetLoadKindWithStringReference(load_kind, dex_file, string_index); break; case HLoadString::LoadKind::kBootImageAddress: - case HLoadString::LoadKind::kDexCacheAddress: DCHECK_NE(address, 0u); load_string->SetLoadKindWithAddress(load_kind, address); break; diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc index 58dd047081..fa32178796 100644 --- a/dex2oat/dex2oat_test.cc +++ b/dex2oat/dex2oat_test.cc @@ -438,9 +438,7 @@ class Dex2oatVeryLargeTest : public Dex2oatTest { Copy(GetDexSrc1(), dex_location); - std::vector<std::string> copy(extra_args); - - GenerateOdexForTest(dex_location, odex_location, filter, copy); + GenerateOdexForTest(dex_location, odex_location, filter, extra_args); CheckValidity(); ASSERT_TRUE(success_); diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc index 2b30a1be08..aa806557c2 100644 --- a/dexlayout/dexlayout.cc +++ b/dexlayout/dexlayout.cc @@ -1527,7 +1527,7 @@ static void ProcessDexFile(const char* file_name, const DexFile* dex_file, size_ // Output dex file. if (options_.output_dex_directory_ != nullptr) { std::string output_location(options_.output_dex_directory_); - size_t last_slash = dex_file->GetLocation().rfind("/"); + size_t last_slash = dex_file->GetLocation().rfind('/'); output_location.append(dex_file->GetLocation().substr(last_slash)); DexWriter::OutputDexFile(*header, output_location.c_str()); } diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc index 89544d7ef4..c7f36be905 100644 --- a/dexlayout/dexlayout_test.cc +++ b/dexlayout/dexlayout_test.cc @@ -37,12 +37,12 @@ class DexLayoutTest : public CommonRuntimeTest { bool FullPlainOutputExec(std::string* error_msg) { // TODO: dexdump2 -> dexdump ? ScratchFile dexdump_output; - std::string dexdump_filename = dexdump_output.GetFilename(); + const std::string& dexdump_filename = dexdump_output.GetFilename(); std::string dexdump = GetTestAndroidRoot() + "/bin/dexdump2"; EXPECT_TRUE(OS::FileExists(dexdump.c_str())) << dexdump << " should be a valid file path"; ScratchFile dexlayout_output; - std::string dexlayout_filename = dexlayout_output.GetFilename(); + const std::string& dexlayout_filename = dexlayout_output.GetFilename(); std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout"; EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path"; @@ -70,8 +70,8 @@ class DexLayoutTest : public CommonRuntimeTest { // Runs DexFileOutput test. bool DexFileOutputExec(std::string* error_msg) { ScratchFile tmp_file; - std::string tmp_name = tmp_file.GetFilename(); - size_t tmp_last_slash = tmp_name.rfind("/"); + const std::string& tmp_name = tmp_file.GetFilename(); + size_t tmp_last_slash = tmp_name.rfind('/'); std::string tmp_dir = tmp_name.substr(0, tmp_last_slash + 1); std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout"; EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path"; @@ -84,7 +84,7 @@ class DexLayoutTest : public CommonRuntimeTest { return false; } - size_t dex_file_last_slash = dex_file.rfind("/"); + size_t dex_file_last_slash = dex_file.rfind('/'); std::string dex_file_name = dex_file.substr(dex_file_last_slash + 1); std::vector<std::string> unzip_exec_argv = { "/usr/bin/unzip", dex_file, "classes.dex", "-d", tmp_dir}; diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc index d1d127d980..a374686dc5 100644 --- a/imgdiag/imgdiag.cc +++ b/imgdiag/imgdiag.cc @@ -89,7 +89,7 @@ class ImgDiagDumper { // Return suffix of the file path after the last /. (e.g. /foo/bar -> bar, bar -> bar) static std::string BaseName(const std::string& str) { - size_t idx = str.rfind("/"); + size_t idx = str.rfind('/'); if (idx == std::string::npos) { return str; } diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc index 7ea5beab37..3c8c1a397c 100644 --- a/patchoat/patchoat.cc +++ b/patchoat/patchoat.cc @@ -1068,7 +1068,7 @@ static int patchoat_image(TimingLogger& timings, TimingLogger::ScopedTiming pt("patch image and oat", &timings); std::string output_directory = - output_image_filename.substr(0, output_image_filename.find_last_of("/")); + output_image_filename.substr(0, output_image_filename.find_last_of('/')); bool ret = PatchOat::Patch(input_image_location, base_delta, output_directory, isa, &timings); if (kIsDebugBuild) { diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc index cb8edffb94..01b3f349d4 100644 --- a/runtime/arch/arm/entrypoints_init_arm.cc +++ b/runtime/arch/arm/entrypoints_init_arm.cc @@ -30,8 +30,7 @@ namespace art { // Cast entrypoints. -extern "C" size_t artIsAssignableFromCode(const mirror::Class* klass, - const mirror::Class* ref_class); +extern "C" size_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class); // Read barrier entrypoints. // art_quick_read_barrier_mark_regX uses an non-standard calling @@ -73,7 +72,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { // Cast qpoints->pInstanceofNonTrivial = artIsAssignableFromCode; - qpoints->pCheckCast = art_quick_check_cast; + qpoints->pCheckInstanceOf = art_quick_check_instance_of; // Math qpoints->pIdivmod = __aeabi_idivmod; diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S index b76050ce18..3a83eaf9c1 100644 --- a/runtime/arch/arm/quick_entrypoints_arm.S +++ b/runtime/arch/arm/quick_entrypoints_arm.S @@ -764,11 +764,12 @@ ENTRY art_quick_unlock_object_no_inline END art_quick_unlock_object_no_inline /* - * Entry from managed code that calls artIsAssignableFromCode and on failure calls - * artThrowClassCastException. + * Entry from managed code that calls artInstanceOfFromCode and on failure calls + * artThrowClassCastExceptionForObject. */ - .extern artThrowClassCastException -ENTRY art_quick_check_cast + .extern artInstanceOfFromCode + .extern artThrowClassCastExceptionForObject +ENTRY art_quick_check_instance_of push {r0-r1, lr} @ save arguments, link register and pad .cfi_adjust_cfa_offset 12 .cfi_rel_offset r0, 0 @@ -776,7 +777,7 @@ ENTRY art_quick_check_cast .cfi_rel_offset lr, 8 sub sp, #4 .cfi_adjust_cfa_offset 4 - bl artIsAssignableFromCode + bl artInstanceOfFromCode cbz r0, .Lthrow_class_cast_exception add sp, #4 .cfi_adjust_cfa_offset -4 @@ -792,9 +793,9 @@ ENTRY art_quick_check_cast .cfi_restore lr SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r2 @ save all registers as basis for long jump context mov r2, r9 @ pass Thread::Current - bl artThrowClassCastException @ (Class*, Class*, Thread*) + bl artThrowClassCastExceptionForObject @ (Object*, Class*, Thread*) bkpt -END art_quick_check_cast +END art_quick_check_instance_of // Restore rReg's value from [sp, #offset] if rReg is not the same as rExclude. .macro POP_REG_NE rReg, offset, rExclude diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc index c2078f02c1..3c77672aac 100644 --- a/runtime/arch/arm64/entrypoints_init_arm64.cc +++ b/runtime/arch/arm64/entrypoints_init_arm64.cc @@ -30,8 +30,7 @@ namespace art { // Cast entrypoints. -extern "C" size_t artIsAssignableFromCode(const mirror::Class* klass, - const mirror::Class* ref_class); +extern "C" size_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class); // Read barrier entrypoints. // art_quick_read_barrier_mark_regX uses an non-standard calling @@ -76,7 +75,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { // Cast qpoints->pInstanceofNonTrivial = artIsAssignableFromCode; - qpoints->pCheckCast = art_quick_check_cast; + qpoints->pCheckInstanceOf = art_quick_check_instance_of; // Math // TODO null entrypoints not needed for ARM64 - generate inline. diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S index 74643f70a0..73bca037b8 100644 --- a/runtime/arch/arm64/quick_entrypoints_arm64.S +++ b/runtime/arch/arm64/quick_entrypoints_arm64.S @@ -1294,18 +1294,19 @@ ENTRY art_quick_unlock_object_no_inline END art_quick_unlock_object_no_inline /* - * Entry from managed code that calls artIsAssignableFromCode and on failure calls - * artThrowClassCastException. + * Entry from managed code that calls artInstanceOfFromCode and on failure calls + * artThrowClassCastExceptionForObject. */ - .extern artThrowClassCastException -ENTRY art_quick_check_cast + .extern artInstanceOfFromCode + .extern artThrowClassCastExceptionForObject +ENTRY art_quick_check_instance_of // Store arguments and link register // Stack needs to be 16B aligned on calls. SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 32 SAVE_REG xLR, 24 // Call runtime code - bl artIsAssignableFromCode + bl artInstanceOfFromCode // Check for exception cbz x0, .Lthrow_class_cast_exception @@ -1324,9 +1325,9 @@ ENTRY art_quick_check_cast SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context mov x2, xSELF // pass Thread::Current - bl artThrowClassCastException // (Class*, Class*, Thread*) + bl artThrowClassCastExceptionForObject // (Object*, Class*, Thread*) brk 0 // We should not return here... -END art_quick_check_cast +END art_quick_check_instance_of // Restore xReg's value from [sp, #offset] if xReg is not the same as xExclude. .macro POP_REG_NE xReg, offset, xExclude diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc index e10d4e6a74..e3230f65dd 100644 --- a/runtime/arch/mips/entrypoints_init_mips.cc +++ b/runtime/arch/mips/entrypoints_init_mips.cc @@ -30,8 +30,7 @@ namespace art { // Cast entrypoints. -extern "C" size_t artIsAssignableFromCode(const mirror::Class* klass, - const mirror::Class* ref_class); +extern "C" size_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class); // Math entrypoints. extern int32_t CmpgDouble(double a, double b); @@ -73,8 +72,8 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { // Cast qpoints->pInstanceofNonTrivial = artIsAssignableFromCode; static_assert(IsDirectEntrypoint(kQuickInstanceofNonTrivial), "Direct C stub not marked direct."); - qpoints->pCheckCast = art_quick_check_cast; - static_assert(!IsDirectEntrypoint(kQuickCheckCast), "Non-direct C stub marked direct."); + qpoints->pCheckInstanceOf = art_quick_check_instance_of; + static_assert(!IsDirectEntrypoint(kQuickCheckInstanceOf), "Non-direct C stub marked direct."); // DexCache qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage; diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S index c3c188233b..34e34b40ff 100644 --- a/runtime/arch/mips/quick_entrypoints_mips.S +++ b/runtime/arch/mips/quick_entrypoints_mips.S @@ -1171,10 +1171,11 @@ ENTRY art_quick_unlock_object_no_inline END art_quick_unlock_object_no_inline /* - * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure. + * Entry from managed code that calls artInstanceOfFromCode and delivers exception on failure. */ - .extern artThrowClassCastException -ENTRY art_quick_check_cast + .extern artInstanceOfFromCode + .extern artThrowClassCastExceptionForObject +ENTRY art_quick_check_instance_of addiu $sp, $sp, -32 .cfi_adjust_cfa_offset 32 sw $gp, 16($sp) @@ -1183,7 +1184,7 @@ ENTRY art_quick_check_cast sw $t9, 8($sp) sw $a1, 4($sp) sw $a0, 0($sp) - la $t9, artIsAssignableFromCode + la $t9, artInstanceOfFromCode jalr $t9 addiu $sp, $sp, -16 # reserve argument slots on the stack addiu $sp, $sp, 16 @@ -1200,10 +1201,10 @@ ENTRY art_quick_check_cast addiu $sp, $sp, 32 .cfi_adjust_cfa_offset -32 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME - la $t9, artThrowClassCastException - jalr $zero, $t9 # artThrowClassCastException (Class*, Class*, Thread*) + la $t9, artThrowClassCastExceptionForObject + jalr $zero, $t9 # artThrowClassCastException (Object*, Class*, Thread*) move $a2, rSELF # pass Thread::Current -END art_quick_check_cast +END art_quick_check_instance_of /* * Restore rReg's value from offset($sp) if rReg is not the same as rExclude. diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc index a0379053bc..43b73f127a 100644 --- a/runtime/arch/mips64/entrypoints_init_mips64.cc +++ b/runtime/arch/mips64/entrypoints_init_mips64.cc @@ -30,8 +30,8 @@ namespace art { // Cast entrypoints. -extern "C" size_t artIsAssignableFromCode(const mirror::Class* klass, - const mirror::Class* ref_class); +extern "C" size_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class); + // Math entrypoints. extern int32_t CmpgDouble(double a, double b); extern int32_t CmplDouble(double a, double b); @@ -64,7 +64,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { // Cast qpoints->pInstanceofNonTrivial = artIsAssignableFromCode; - qpoints->pCheckCast = art_quick_check_cast; + qpoints->pCheckInstanceOf = art_quick_check_instance_of; // Math qpoints->pCmpgDouble = CmpgDouble; diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S index cb2d1c816b..0861d2d73e 100644 --- a/runtime/arch/mips64/quick_entrypoints_mips64.S +++ b/runtime/arch/mips64/quick_entrypoints_mips64.S @@ -1256,10 +1256,11 @@ ENTRY_NO_GP art_quick_unlock_object_no_inline END art_quick_unlock_object_no_inline /* - * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure. + * Entry from managed code that calls artInstanceOfFromCode and delivers exception on failure. */ - .extern artThrowClassCastException -ENTRY art_quick_check_cast + .extern artInstanceOfFromCode + .extern artThrowClassCastExceptionForObject +ENTRY art_quick_check_instance_of daddiu $sp, $sp, -32 .cfi_adjust_cfa_offset 32 sd $ra, 24($sp) @@ -1267,7 +1268,7 @@ ENTRY art_quick_check_cast sd $t9, 16($sp) sd $a1, 8($sp) sd $a0, 0($sp) - jal artIsAssignableFromCode + jal artInstanceOfFromCode .cpreturn # Restore gp from t8 in branch delay slot. # t8 may be clobbered in artIsAssignableFromCode. beq $v0, $zero, .Lthrow_class_cast_exception @@ -1283,10 +1284,10 @@ ENTRY art_quick_check_cast .cfi_adjust_cfa_offset -32 SETUP_GP SETUP_SAVE_ALL_CALLEE_SAVES_FRAME - dla $t9, artThrowClassCastException - jalr $zero, $t9 # artThrowClassCastException (Class*, Class*, Thread*) + dla $t9, artThrowClassCastExceptionForObject + jalr $zero, $t9 # artThrowClassCastException (Object*, Class*, Thread*) move $a2, rSELF # pass Thread::Current -END art_quick_check_cast +END art_quick_check_instance_of /* diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc index c151f00289..bbf9a8b93c 100644 --- a/runtime/arch/stub_test.cc +++ b/runtime/arch/stub_test.cc @@ -23,6 +23,7 @@ #include "common_runtime_test.h" #include "entrypoints/quick/quick_entrypoints_enum.h" #include "imt_conflict_table.h" +#include "jni_internal.h" #include "linear_alloc.h" #include "mirror/class-inl.h" #include "mirror/string-inl.h" @@ -805,7 +806,7 @@ TEST_F(StubTest, UnlockObject) { #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \ (defined(__x86_64__) && !defined(__APPLE__)) -extern "C" void art_quick_check_cast(void); +extern "C" void art_quick_check_instance_of(void); #endif TEST_F(StubTest, CheckCast) { @@ -813,65 +814,89 @@ TEST_F(StubTest, CheckCast) { (defined(__x86_64__) && !defined(__APPLE__)) Thread* self = Thread::Current(); - const uintptr_t art_quick_check_cast = StubTest::GetEntrypoint(self, kQuickCheckCast); + const uintptr_t art_quick_check_instance_of = + StubTest::GetEntrypoint(self, kQuickCheckInstanceOf); // Find some classes. ScopedObjectAccess soa(self); // garbage is created during ClassLinker::Init - StackHandleScope<4> hs(soa.Self()); - Handle<mirror::Class> c( - hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;"))); - Handle<mirror::Class> c2( - hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;"))); - Handle<mirror::Class> list( - hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/util/List;"))); - Handle<mirror::Class> array_list( - hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/util/ArrayList;"))); + VariableSizedHandleScope hs(soa.Self()); + Handle<mirror::Class> klass_obj( + hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;"))); + Handle<mirror::Class> klass_str( + hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/String;"))); + Handle<mirror::Class> klass_list( + hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/util/List;"))); + Handle<mirror::Class> klass_cloneable( + hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Cloneable;"))); + Handle<mirror::Class> klass_array_list( + hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/util/ArrayList;"))); + Handle<mirror::Object> obj(hs.NewHandle(klass_obj->AllocObject(soa.Self()))); + Handle<mirror::String> string(hs.NewHandle( + mirror::String::AllocFromModifiedUtf8(soa.Self(), "ABCD"))); + Handle<mirror::Object> array_list(hs.NewHandle(klass_array_list->AllocObject(soa.Self()))); EXPECT_FALSE(self->IsExceptionPending()); - Invoke3(reinterpret_cast<size_t>(c.Get()), - reinterpret_cast<size_t>(c.Get()), + Invoke3(reinterpret_cast<size_t>(obj.Get()), + reinterpret_cast<size_t>(klass_obj.Get()), 0U, - art_quick_check_cast, + art_quick_check_instance_of, self); EXPECT_FALSE(self->IsExceptionPending()); - Invoke3(reinterpret_cast<size_t>(c2.Get()), - reinterpret_cast<size_t>(c2.Get()), + // Expected true: Test string instance of java.lang.String. + Invoke3(reinterpret_cast<size_t>(string.Get()), + reinterpret_cast<size_t>(klass_str.Get()), 0U, - art_quick_check_cast, + art_quick_check_instance_of, self); EXPECT_FALSE(self->IsExceptionPending()); - Invoke3(reinterpret_cast<size_t>(c.Get()), - reinterpret_cast<size_t>(c2.Get()), + // Expected true: Test string instance of java.lang.Object. + Invoke3(reinterpret_cast<size_t>(string.Get()), + reinterpret_cast<size_t>(klass_obj.Get()), + 0U, + art_quick_check_instance_of, + self); + EXPECT_FALSE(self->IsExceptionPending()); + + // Expected false: Test object instance of java.lang.String. + Invoke3(reinterpret_cast<size_t>(obj.Get()), + reinterpret_cast<size_t>(klass_str.Get()), + 0U, + art_quick_check_instance_of, + self); + EXPECT_TRUE(self->IsExceptionPending()); + self->ClearException(); + + Invoke3(reinterpret_cast<size_t>(array_list.Get()), + reinterpret_cast<size_t>(klass_list.Get()), 0U, - art_quick_check_cast, + art_quick_check_instance_of, self); EXPECT_FALSE(self->IsExceptionPending()); - Invoke3(reinterpret_cast<size_t>(list.Get()), - reinterpret_cast<size_t>(array_list.Get()), + Invoke3(reinterpret_cast<size_t>(array_list.Get()), + reinterpret_cast<size_t>(klass_cloneable.Get()), 0U, - art_quick_check_cast, + art_quick_check_instance_of, self); EXPECT_FALSE(self->IsExceptionPending()); - Invoke3(reinterpret_cast<size_t>(list.Get()), - reinterpret_cast<size_t>(c2.Get()), + Invoke3(reinterpret_cast<size_t>(string.Get()), + reinterpret_cast<size_t>(klass_array_list.Get()), 0U, - art_quick_check_cast, + art_quick_check_instance_of, self); EXPECT_TRUE(self->IsExceptionPending()); self->ClearException(); - // TODO: Make the following work. But that would require correct managed frames. - Invoke3(reinterpret_cast<size_t>(c2.Get()), - reinterpret_cast<size_t>(c.Get()), + Invoke3(reinterpret_cast<size_t>(string.Get()), + reinterpret_cast<size_t>(klass_cloneable.Get()), 0U, - art_quick_check_cast, + art_quick_check_instance_of, self); EXPECT_TRUE(self->IsExceptionPending()); self->ClearException(); @@ -1990,7 +2015,7 @@ TEST_F(StubTest, DISABLED_IMT) { ASSERT_NE(nullptr, add_jmethod); // Get representation. - ArtMethod* contains_amethod = soa.DecodeMethod(contains_jmethod); + ArtMethod* contains_amethod = jni::DecodeArtMethod(contains_jmethod); // Patch up ArrayList.contains. if (contains_amethod->GetEntryPointFromQuickCompiledCode() == nullptr) { @@ -2008,7 +2033,7 @@ TEST_F(StubTest, DISABLED_IMT) { ASSERT_NE(nullptr, inf_contains_jmethod); // Get mirror representation. - ArtMethod* inf_contains = soa.DecodeMethod(inf_contains_jmethod); + ArtMethod* inf_contains = jni::DecodeArtMethod(inf_contains_jmethod); // Object diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc index 0a10a3cceb..877df8f7b0 100644 --- a/runtime/arch/x86/entrypoints_init_x86.cc +++ b/runtime/arch/x86/entrypoints_init_x86.cc @@ -27,8 +27,7 @@ namespace art { // Cast entrypoints. -extern "C" size_t art_quick_is_assignable(const mirror::Class* klass, - const mirror::Class* ref_class); +extern "C" size_t art_quick_is_assignable(mirror::Class* klass, mirror::Class* ref_class); // Read barrier entrypoints. // art_quick_read_barrier_mark_regX uses an non-standard calling @@ -50,7 +49,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { // Cast qpoints->pInstanceofNonTrivial = art_quick_is_assignable; - qpoints->pCheckCast = art_quick_check_cast; + qpoints->pCheckInstanceOf = art_quick_check_instance_of; // More math. qpoints->pCos = cos; diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S index 0f1efcee69..761a510bfe 100644 --- a/runtime/arch/x86/quick_entrypoints_x86.S +++ b/runtime/arch/x86/quick_entrypoints_x86.S @@ -1361,11 +1361,11 @@ DEFINE_FUNCTION art_quick_is_assignable ret END_FUNCTION art_quick_is_assignable -DEFINE_FUNCTION art_quick_check_cast +DEFINE_FUNCTION art_quick_check_instance_of PUSH eax // alignment padding - PUSH ecx // pass arg2 - obj->klass - PUSH eax // pass arg1 - checked class - call SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass) + PUSH ecx // pass arg2 - checked class + PUSH eax // pass arg1 - obj + call SYMBOL(artInstanceOfFromCode) // (Object* obj, Class* ref_klass) testl %eax, %eax jz 1f // jump forward if not assignable addl LITERAL(12), %esp // pop arguments @@ -1385,9 +1385,9 @@ DEFINE_FUNCTION art_quick_check_cast CFI_ADJUST_CFA_OFFSET(4) PUSH ecx // pass arg2 PUSH eax // pass arg1 - call SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*) + call SYMBOL(artThrowClassCastExceptionForObject) // (Object* src, Class* dest, Thread*) UNREACHABLE -END_FUNCTION art_quick_check_cast +END_FUNCTION art_quick_check_instance_of // Restore reg's value if reg is not the same as exclude_reg, otherwise just adjust stack. MACRO2(POP_REG_NE, reg, exclude_reg) diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc index 8c425d53d3..59c9dfeb6f 100644 --- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc +++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc @@ -30,8 +30,7 @@ namespace art { // Cast entrypoints. -extern "C" size_t art_quick_assignable_from_code(const mirror::Class* klass, - const mirror::Class* ref_class); +extern "C" size_t art_quick_assignable_from_code(mirror::Class* klass, mirror::Class* ref_class); // Read barrier entrypoints. // art_quick_read_barrier_mark_regX uses an non-standard calling @@ -65,7 +64,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { // Cast qpoints->pInstanceofNonTrivial = art_quick_assignable_from_code; - qpoints->pCheckCast = art_quick_check_cast; + qpoints->pCheckInstanceOf = art_quick_check_instance_of; // More math. qpoints->pCos = cos; diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S index 0d0ab93b8a..20ee3f5b3a 100644 --- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S +++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S @@ -1480,14 +1480,14 @@ DEFINE_FUNCTION art_quick_unlock_object_no_inline RETURN_IF_EAX_ZERO END_FUNCTION art_quick_unlock_object_no_inline -DEFINE_FUNCTION art_quick_check_cast +DEFINE_FUNCTION art_quick_check_instance_of // We could check the super classes here but that is usually already checked in the caller. PUSH rdi // Save args for exc PUSH rsi subq LITERAL(8), %rsp // Alignment padding. CFI_ADJUST_CFA_OFFSET(8) SETUP_FP_CALLEE_SAVE_FRAME - call SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass) + call SYMBOL(artInstanceOfFromCode) // (Object* obj, Class* ref_klass) testq %rax, %rax jz 1f // jump forward if not assignable RESTORE_FP_CALLEE_SAVE_FRAME @@ -1506,9 +1506,9 @@ DEFINE_FUNCTION art_quick_check_cast POP rdi SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context mov %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current() - call SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*) + call SYMBOL(artThrowClassCastExceptionForObject) // (Object* src, Class* dest, Thread*) UNREACHABLE -END_FUNCTION art_quick_check_cast +END_FUNCTION art_quick_check_instance_of // Restore reg's value if reg is not the same as exclude_reg, otherwise just adjust stack. diff --git a/runtime/base/variant_map_test.cc b/runtime/base/variant_map_test.cc index ccb22eb64d..93336e0ac3 100644 --- a/runtime/base/variant_map_test.cc +++ b/runtime/base/variant_map_test.cc @@ -107,8 +107,8 @@ TEST(VariantMaps, RuleOfFive) { fmFilled.Set(FruitMap::Orange, 555.0); EXPECT_EQ(size_t(2), fmFilled.Size()); - // Test copy constructor - FruitMap fmEmptyCopy(fmEmpty); + // Test copy constructor (NOLINT as a reference is suggested, instead) + FruitMap fmEmptyCopy(fmEmpty); // NOLINT EXPECT_EQ(size_t(0), fmEmptyCopy.Size()); // Test copy constructor diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc index 5399dc5206..6c27bc61e4 100644 --- a/runtime/check_jni.cc +++ b/runtime/check_jni.cc @@ -282,7 +282,7 @@ class ScopedCheck { return false; } - ArtField* f = CheckFieldID(soa, fid); + ArtField* f = CheckFieldID(fid); if (f == nullptr) { return false; } @@ -313,7 +313,7 @@ class ScopedCheck { bool CheckMethodAndSig(ScopedObjectAccess& soa, jobject jobj, jclass jc, jmethodID mid, Primitive::Type type, InvokeType invoke) REQUIRES_SHARED(Locks::mutator_lock_) { - ArtMethod* m = CheckMethodID(soa, mid); + ArtMethod* m = CheckMethodID(mid); if (m == nullptr) { return false; } @@ -362,7 +362,7 @@ class ScopedCheck { bool CheckStaticFieldID(ScopedObjectAccess& soa, jclass java_class, jfieldID fid) REQUIRES_SHARED(Locks::mutator_lock_) { ObjPtr<mirror::Class> c = soa.Decode<mirror::Class>(java_class); - ArtField* f = CheckFieldID(soa, fid); + ArtField* f = CheckFieldID(fid); if (f == nullptr) { return false; } @@ -385,7 +385,7 @@ class ScopedCheck { */ bool CheckStaticMethod(ScopedObjectAccess& soa, jclass java_class, jmethodID mid) REQUIRES_SHARED(Locks::mutator_lock_) { - ArtMethod* m = CheckMethodID(soa, mid); + ArtMethod* m = CheckMethodID(mid); if (m == nullptr) { return false; } @@ -407,7 +407,7 @@ class ScopedCheck { */ bool CheckVirtualMethod(ScopedObjectAccess& soa, jobject java_object, jmethodID mid) REQUIRES_SHARED(Locks::mutator_lock_) { - ArtMethod* m = CheckMethodID(soa, mid); + ArtMethod* m = CheckMethodID(mid); if (m == nullptr) { return false; } @@ -577,9 +577,8 @@ class ScopedCheck { return true; } - bool CheckConstructor(ScopedObjectAccess& soa, jmethodID mid) - REQUIRES_SHARED(Locks::mutator_lock_) { - ArtMethod* method = soa.DecodeMethod(mid); + bool CheckConstructor(jmethodID mid) REQUIRES_SHARED(Locks::mutator_lock_) { + ArtMethod* method = jni::DecodeArtMethod(mid); if (method == nullptr) { AbortF("expected non-null constructor"); return false; @@ -682,7 +681,7 @@ class ScopedCheck { if (!is_static && !CheckInstanceFieldID(soa, obj, fid)) { return false; } - ArtField* field = soa.DecodeField(fid); + ArtField* field = jni::DecodeArtField(fid); DCHECK(field != nullptr); // Already checked by Check. if (is_static != field->IsStatic()) { AbortF("attempt to access %s field %s: %p", @@ -844,9 +843,9 @@ class ScopedCheck { case 'c': // jclass return CheckInstance(soa, kClass, arg.c, false); case 'f': // jfieldID - return CheckFieldID(soa, arg.f) != nullptr; + return CheckFieldID(arg.f) != nullptr; case 'm': // jmethodID - return CheckMethodID(soa, arg.m) != nullptr; + return CheckMethodID(arg.m) != nullptr; case 'r': // release int return CheckReleaseMode(arg.r); case 's': // jstring @@ -868,7 +867,7 @@ class ScopedCheck { REQUIRES_SHARED(Locks::mutator_lock_) { CHECK(args_p != nullptr); VarArgs args(args_p->Clone()); - ArtMethod* m = CheckMethodID(soa, args.GetMethodID()); + ArtMethod* m = CheckMethodID(args.GetMethodID()); if (m == nullptr) { return false; } @@ -962,7 +961,7 @@ class ScopedCheck { } case 'f': { // jfieldID jfieldID fid = arg.f; - ArtField* f = soa.DecodeField(fid); + ArtField* f = jni::DecodeArtField(fid); *msg += ArtField::PrettyField(f); if (!entry) { StringAppendF(msg, " (%p)", fid); @@ -971,7 +970,7 @@ class ScopedCheck { } case 'm': { // jmethodID jmethodID mid = arg.m; - ArtMethod* m = soa.DecodeMethod(mid); + ArtMethod* m = jni::DecodeArtMethod(mid); *msg += ArtMethod::PrettyMethod(m); if (!entry) { StringAppendF(msg, " (%p)", mid); @@ -981,7 +980,7 @@ class ScopedCheck { case '.': { const VarArgs* va = arg.va; VarArgs args(va->Clone()); - ArtMethod* m = soa.DecodeMethod(args.GetMethodID()); + ArtMethod* m = jni::DecodeArtMethod(args.GetMethodID()); uint32_t len; const char* shorty = m->GetShorty(&len); CHECK_GE(len, 1u); @@ -1147,13 +1146,12 @@ class ScopedCheck { return true; } - ArtField* CheckFieldID(ScopedObjectAccess& soa, jfieldID fid) - REQUIRES_SHARED(Locks::mutator_lock_) { + ArtField* CheckFieldID(jfieldID fid) REQUIRES_SHARED(Locks::mutator_lock_) { if (fid == nullptr) { AbortF("jfieldID was NULL"); return nullptr; } - ArtField* f = soa.DecodeField(fid); + ArtField* f = jni::DecodeArtField(fid); // TODO: Better check here. if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(f->GetDeclaringClass().Ptr())) { Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR)); @@ -1163,13 +1161,12 @@ class ScopedCheck { return f; } - ArtMethod* CheckMethodID(ScopedObjectAccess& soa, jmethodID mid) - REQUIRES_SHARED(Locks::mutator_lock_) { + ArtMethod* CheckMethodID(jmethodID mid) REQUIRES_SHARED(Locks::mutator_lock_) { if (mid == nullptr) { AbortF("jmethodID was NULL"); return nullptr; } - ArtMethod* m = soa.DecodeMethod(mid); + ArtMethod* m = jni::DecodeArtMethod(mid); // TODO: Better check here. if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(m->GetDeclaringClass())) { Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR)); @@ -2005,7 +2002,7 @@ class CheckJNI { VarArgs rest(mid, vargs); JniValueType args[4] = {{.E = env}, {.c = c}, {.m = mid}, {.va = &rest}}; if (sc.Check(soa, true, "Ecm.", args) && sc.CheckInstantiableNonArray(soa, c) && - sc.CheckConstructor(soa, mid)) { + sc.CheckConstructor(mid)) { JniValueType result; result.L = baseEnv(env)->NewObjectV(env, c, mid, vargs); if (sc.Check(soa, false, "L", &result)) { @@ -2029,7 +2026,7 @@ class CheckJNI { VarArgs rest(mid, vargs); JniValueType args[4] = {{.E = env}, {.c = c}, {.m = mid}, {.va = &rest}}; if (sc.Check(soa, true, "Ecm.", args) && sc.CheckInstantiableNonArray(soa, c) && - sc.CheckConstructor(soa, mid)) { + sc.CheckConstructor(mid)) { JniValueType result; result.L = baseEnv(env)->NewObjectA(env, c, mid, vargs); if (sc.Check(soa, false, "L", &result)) { diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index e7e5be7b75..6d45dad28f 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -63,6 +63,7 @@ #include "jit/jit.h" #include "jit/jit_code_cache.h" #include "jit/offline_profiling_info.h" +#include "jni_internal.h" #include "leb128.h" #include "linear_alloc.h" #include "mirror/class.h" @@ -1124,13 +1125,12 @@ bool ClassLinker::IsBootClassLoader(ScopedObjectAccessAlreadyRunnable& soa, class_loader->GetClass(); } -static mirror::String* GetDexPathListElementName(ScopedObjectAccessUnchecked& soa, - ObjPtr<mirror::Object> element) +static mirror::String* GetDexPathListElementName(ObjPtr<mirror::Object> element) REQUIRES_SHARED(Locks::mutator_lock_) { ArtField* const dex_file_field = - soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile); + jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile); ArtField* const dex_file_name_field = - soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_fileName); + jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_fileName); DCHECK(dex_file_field != nullptr); DCHECK(dex_file_name_field != nullptr); DCHECK(element != nullptr); @@ -1154,9 +1154,9 @@ static bool FlattenPathClassLoader(ObjPtr<mirror::ClassLoader> class_loader, DCHECK(error_msg != nullptr); ScopedObjectAccessUnchecked soa(Thread::Current()); ArtField* const dex_path_list_field = - soa.DecodeField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList); + jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList); ArtField* const dex_elements_field = - soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements); + jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList_dexElements); CHECK(dex_path_list_field != nullptr); CHECK(dex_elements_field != nullptr); while (!ClassLinker::IsBootClassLoader(soa, class_loader)) { @@ -1183,7 +1183,7 @@ static bool FlattenPathClassLoader(ObjPtr<mirror::ClassLoader> class_loader, *error_msg = StringPrintf("Null dex element at index %d", i); return false; } - ObjPtr<mirror::String> const name = GetDexPathListElementName(soa, element); + ObjPtr<mirror::String> const name = GetDexPathListElementName(element); if (name == nullptr) { *error_msg = StringPrintf("Null name for dex element at index %d", i); return false; @@ -1733,7 +1733,7 @@ bool ClassLinker::AddImageSpace( ObjPtr<mirror::Object> element = elements->GetWithoutChecks(i); if (element != nullptr) { // If we are somewhere in the middle of the array, there may be nulls at the end. - loader_dex_file_names.push_back(GetDexPathListElementName(soa, element)); + loader_dex_file_names.push_back(GetDexPathListElementName(element)); } } // Ignore the number of image dex files since we are adding those to the class loader anyways. @@ -2425,16 +2425,17 @@ bool ClassLinker::FindClassInBaseDexClassLoader(ScopedObjectAccessAlreadyRunnabl // Handle as if this is the child PathClassLoader. // The class loader is a PathClassLoader which inherits from BaseDexClassLoader. // We need to get the DexPathList and loop through it. - ArtField* const cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie); + ArtField* const cookie_field = + jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie); ArtField* const dex_file_field = - soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile); + jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile); ObjPtr<mirror::Object> dex_path_list = - soa.DecodeField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList)-> - GetObject(class_loader.Get()); + jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList)-> + GetObject(class_loader.Get()); if (dex_path_list != nullptr && dex_file_field != nullptr && cookie_field != nullptr) { // DexPathList has an array dexElements of Elements[] which each contain a dex file. ObjPtr<mirror::Object> dex_elements_obj = - soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements)-> + jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList_dexElements)-> GetObject(dex_path_list); // Loop through each dalvik.system.DexPathList$Element's dalvik.system.DexFile and look // at the mCookie which is a DexFile vector. @@ -8137,7 +8138,7 @@ jobject ClassLinker::CreatePathClassLoader(Thread* self, StackHandleScope<11> hs(self); ArtField* dex_elements_field = - soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements); + jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList_dexElements); Handle<mirror::Class> dex_elements_class(hs.NewHandle(dex_elements_field->GetType<true>())); DCHECK(dex_elements_class.Get() != nullptr); @@ -8150,13 +8151,13 @@ jobject ClassLinker::CreatePathClassLoader(Thread* self, hs.NewHandle(dex_elements_class->GetComponentType()); ArtField* element_file_field = - soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile); + jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile); DCHECK_EQ(h_dex_element_class.Get(), element_file_field->GetDeclaringClass()); - ArtField* cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie); + ArtField* cookie_field = jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie); DCHECK_EQ(cookie_field->GetDeclaringClass(), element_file_field->GetType<false>()); - ArtField* file_name_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_fileName); + ArtField* file_name_field = jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_fileName); DCHECK_EQ(file_name_field->GetDeclaringClass(), element_file_field->GetType<false>()); // Fill the elements array. @@ -8206,7 +8207,7 @@ jobject ClassLinker::CreatePathClassLoader(Thread* self, DCHECK(h_path_class_loader.Get() != nullptr); // Set DexPathList. ArtField* path_list_field = - soa.DecodeField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList); + jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList); DCHECK(path_list_field != nullptr); path_list_field->SetObject<false>(h_path_class_loader.Get(), h_dex_path_list.Get()); diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc index 3409938c50..8226e6049e 100644 --- a/runtime/common_runtime_test.cc +++ b/runtime/common_runtime_test.cc @@ -520,17 +520,17 @@ std::vector<const DexFile*> CommonRuntimeTestImpl::GetDexFiles(jobject jclass_lo // The class loader is a PathClassLoader which inherits from BaseDexClassLoader. // We need to get the DexPathList and loop through it. - ArtField* cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie); + ArtField* cookie_field = jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie); ArtField* dex_file_field = - soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile); + jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile); ObjPtr<mirror::Object> dex_path_list = - soa.DecodeField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList)-> - GetObject(class_loader.Get()); + jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList)-> + GetObject(class_loader.Get()); if (dex_path_list != nullptr && dex_file_field!= nullptr && cookie_field != nullptr) { // DexPathList has an array dexElements of Elements[] which each contain a dex file. ObjPtr<mirror::Object> dex_elements_obj = - soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements)-> - GetObject(dex_path_list); + jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList_dexElements)-> + GetObject(dex_path_list); // Loop through each dalvik.system.DexPathList$Element's dalvik.system.DexFile and look // at the mCookie which is a DexFile vector. if (dex_elements_obj != nullptr) { diff --git a/runtime/debugger.cc b/runtime/debugger.cc index 1da888e4b7..dc2ae2e215 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -39,6 +39,7 @@ #include "handle_scope.h" #include "jdwp/jdwp_priv.h" #include "jdwp/object_registry.h" +#include "jni_internal.h" #include "jvalue-inl.h" #include "mirror/class.h" #include "mirror/class-inl.h" @@ -2007,7 +2008,7 @@ JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string* name) mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id, &error); CHECK(thread_object != nullptr) << error; ArtField* java_lang_Thread_name_field = - soa.DecodeField(WellKnownClasses::java_lang_Thread_name); + jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name); ObjPtr<mirror::String> s(java_lang_Thread_name_field->GetObject(thread_object)->AsString()); if (s != nullptr) { *name = s->ToModifiedUtf8(); @@ -2032,7 +2033,7 @@ JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* p } else if (error == JDWP::ERR_NONE) { ObjPtr<mirror::Class> c = soa.Decode<mirror::Class>(WellKnownClasses::java_lang_Thread); CHECK(c != nullptr); - ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_group); + ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group); CHECK(f != nullptr); ObjPtr<mirror::Object> group = f->GetObject(thread_object); CHECK(group != nullptr); @@ -2074,7 +2075,7 @@ JDWP::JdwpError Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id, JDWP::Ex return error; } ScopedAssertNoThreadSuspension ants("Debugger: GetThreadGroupName"); - ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name); + ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_name); CHECK(f != nullptr); ObjPtr<mirror::String> s = f->GetObject(thread_group)->AsString(); @@ -2093,7 +2094,7 @@ JDWP::JdwpError Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id, JDWP:: ObjPtr<mirror::Object> parent; { ScopedAssertNoThreadSuspension ants("Debugger: GetThreadGroupParent"); - ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_parent); + ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_parent); CHECK(f != nullptr); parent = f->GetObject(thread_group); } @@ -2102,13 +2103,13 @@ JDWP::JdwpError Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id, JDWP:: return JDWP::ERR_NONE; } -static void GetChildThreadGroups(ScopedObjectAccessUnchecked& soa, mirror::Object* thread_group, +static void GetChildThreadGroups(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* child_thread_group_ids) REQUIRES_SHARED(Locks::mutator_lock_) { CHECK(thread_group != nullptr); // Get the int "ngroups" count of this thread group... - ArtField* ngroups_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_ngroups); + ArtField* ngroups_field = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_ngroups); CHECK(ngroups_field != nullptr); const int32_t size = ngroups_field->GetInt(thread_group); if (size == 0) { @@ -2116,7 +2117,7 @@ static void GetChildThreadGroups(ScopedObjectAccessUnchecked& soa, mirror::Objec } // Get the ThreadGroup[] "groups" out of this thread group... - ArtField* groups_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_groups); + ArtField* groups_field = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_groups); ObjPtr<mirror::Object> groups_array = groups_field->GetObject(thread_group); CHECK(groups_array != nullptr); @@ -2154,7 +2155,7 @@ JDWP::JdwpError Dbg::GetThreadGroupChildren(JDWP::ObjectId thread_group_id, // Add child thread groups. { std::vector<JDWP::ObjectId> child_thread_groups_ids; - GetChildThreadGroups(soa, thread_group, &child_thread_groups_ids); + GetChildThreadGroups(thread_group, &child_thread_groups_ids); expandBufAdd4BE(pReply, child_thread_groups_ids.size()); for (JDWP::ObjectId child_thread_group_id : child_thread_groups_ids) { expandBufAddObjectId(pReply, child_thread_group_id); @@ -2166,7 +2167,7 @@ JDWP::JdwpError Dbg::GetThreadGroupChildren(JDWP::ObjectId thread_group_id, JDWP::ObjectId Dbg::GetSystemThreadGroupId() { ScopedObjectAccessUnchecked soa(Thread::Current()); - ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup); + ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup); ObjPtr<mirror::Object> group = f->GetObject(f->GetDeclaringClass()); return gRegistry->Add(group); } @@ -2256,14 +2257,13 @@ JDWP::JdwpError Dbg::Interrupt(JDWP::ObjectId thread_id) { return JDWP::ERR_NONE; } -static bool IsInDesiredThreadGroup(ScopedObjectAccessUnchecked& soa, - mirror::Object* desired_thread_group, mirror::Object* peer) +static bool IsInDesiredThreadGroup(mirror::Object* desired_thread_group, mirror::Object* peer) REQUIRES_SHARED(Locks::mutator_lock_) { // Do we want threads from all thread groups? if (desired_thread_group == nullptr) { return true; } - ArtField* thread_group_field = soa.DecodeField(WellKnownClasses::java_lang_Thread_group); + ArtField* thread_group_field = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group); DCHECK(thread_group_field != nullptr); ObjPtr<mirror::Object> group = thread_group_field->GetObject(peer); return (group == desired_thread_group); @@ -2296,7 +2296,7 @@ void Dbg::GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* // Doing so might help us report ZOMBIE threads too. continue; } - if (IsInDesiredThreadGroup(soa, thread_group, peer)) { + if (IsInDesiredThreadGroup(thread_group, peer)) { thread_ids->push_back(gRegistry->Add(peer)); } } @@ -4093,7 +4093,7 @@ void Dbg::ExecuteMethodWithoutPendingException(ScopedObjectAccess& soa, DebugInv // Invoke the method. ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(pReq->receiver.Read())); - JValue result = InvokeWithJValues(soa, ref.get(), soa.EncodeMethod(m), + JValue result = InvokeWithJValues(soa, ref.get(), jni::EncodeArtMethod(m), reinterpret_cast<jvalue*>(pReq->arg_values.get())); // Prepare JDWP ids for the reply. @@ -4371,7 +4371,7 @@ void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) { CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type; ScopedObjectAccessUnchecked soa(Thread::Current()); StackHandleScope<1> hs(soa.Self()); - Handle<mirror::String> name(hs.NewHandle(t->GetThreadName(soa))); + Handle<mirror::String> name(hs.NewHandle(t->GetThreadName())); size_t char_count = (name.Get() != nullptr) ? name->GetLength() : 0; const jchar* chars = (name.Get() != nullptr) ? name->GetValue() : nullptr; bool is_compressed = (name.Get() != nullptr) ? name->IsCompressed() : false; @@ -5117,13 +5117,11 @@ jbyteArray Dbg::GetRecentAllocations() { } ArtMethod* DeoptimizationRequest::Method() const { - ScopedObjectAccessUnchecked soa(Thread::Current()); - return soa.DecodeMethod(method_); + return jni::DecodeArtMethod(method_); } void DeoptimizationRequest::SetMethod(ArtMethod* m) { - ScopedObjectAccessUnchecked soa(Thread::Current()); - method_ = soa.EncodeMethod(m); + method_ = jni::EncodeArtMethod(m); } void Dbg::VisitRoots(RootVisitor* visitor) { diff --git a/runtime/dex_file_annotations.cc b/runtime/dex_file_annotations.cc index 0765465db4..835f456c9b 100644 --- a/runtime/dex_file_annotations.cc +++ b/runtime/dex_file_annotations.cc @@ -22,6 +22,7 @@ #include "art_method-inl.h" #include "class_linker-inl.h" #include "dex_file-inl.h" +#include "jni_internal.h" #include "jvalue-inl.h" #include "mirror/field.h" #include "mirror/method.h" @@ -281,7 +282,7 @@ mirror::Object* ProcessEncodedAnnotation(Handle<mirror::Class> klass, const uint JValue result; ArtMethod* create_annotation_method = - soa.DecodeMethod(WellKnownClasses::libcore_reflect_AnnotationFactory_createAnnotation); + jni::DecodeArtMethod(WellKnownClasses::libcore_reflect_AnnotationFactory_createAnnotation); uint32_t args[2] = { static_cast<uint32_t>(reinterpret_cast<uintptr_t>(annotation_class.Get())), static_cast<uint32_t>(reinterpret_cast<uintptr_t>(h_element_array.Get())) }; create_annotation_method->Invoke(self, args, sizeof(args), &result, "LLL"); @@ -633,7 +634,7 @@ mirror::Object* CreateAnnotationMember(Handle<mirror::Class> klass, JValue result; ArtMethod* annotation_member_init = - soa.DecodeMethod(WellKnownClasses::libcore_reflect_AnnotationMember_init); + jni::DecodeArtMethod(WellKnownClasses::libcore_reflect_AnnotationMember_init); uint32_t args[5] = { static_cast<uint32_t>(reinterpret_cast<uintptr_t>(new_member.Get())), static_cast<uint32_t>(reinterpret_cast<uintptr_t>(string_name.Get())), static_cast<uint32_t>(reinterpret_cast<uintptr_t>(value_object.Get())), diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc index e39287018a..3801c228c0 100644 --- a/runtime/dex_file_verifier_test.cc +++ b/runtime/dex_file_verifier_test.cc @@ -58,7 +58,7 @@ class DexFileVerifierTest : public CommonRuntimeTest { void VerifyModification(const char* dex_file_base64_content, const char* location, - std::function<void(DexFile*)> f, + const std::function<void(DexFile*)>& f, const char* expected_error) { size_t length; std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(dex_file_base64_content, &length)); diff --git a/runtime/entrypoints/quick/quick_cast_entrypoints.cc b/runtime/entrypoints/quick/quick_cast_entrypoints.cc index 2732d687b5..083d5786ce 100644 --- a/runtime/entrypoints/quick/quick_cast_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_cast_entrypoints.cc @@ -27,4 +27,12 @@ extern "C" size_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* r return klass->IsAssignableFrom(ref_class) ? 1 : 0; } +// Is assignable test for code, won't throw. Null and equality test already performed. +extern "C" size_t artInstanceOfFromCode(mirror::Object* obj, mirror::Class* ref_class) + REQUIRES_SHARED(Locks::mutator_lock_) { + DCHECK(obj != nullptr); + DCHECK(ref_class != nullptr); + return obj->InstanceOf(ref_class) ? 1 : 0; +} + } // namespace art diff --git a/runtime/entrypoints/quick/quick_default_externs.h b/runtime/entrypoints/quick/quick_default_externs.h index cfa5325e45..64030f36bc 100644 --- a/runtime/entrypoints/quick/quick_default_externs.h +++ b/runtime/entrypoints/quick/quick_default_externs.h @@ -31,7 +31,7 @@ class ArtMethod; // These are extern declarations of assembly stubs with common names. // Cast entrypoints. -extern "C" void art_quick_check_cast(const art::mirror::Class*, const art::mirror::Class*); +extern "C" void art_quick_check_instance_of(art::mirror::Object*, art::mirror::Class*); // DexCache entrypoints. extern "C" void* art_quick_initialize_static_storage(uint32_t); diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h index 3cfee45462..dd8fe55420 100644 --- a/runtime/entrypoints/quick/quick_entrypoints_list.h +++ b/runtime/entrypoints/quick/quick_entrypoints_list.h @@ -33,8 +33,8 @@ V(AllocStringFromChars, void*, int32_t, int32_t, void*) \ V(AllocStringFromString, void*, void*) \ \ - V(InstanceofNonTrivial, size_t, const mirror::Class*, const mirror::Class*) \ - V(CheckCast, void, const mirror::Class*, const mirror::Class*) \ + V(InstanceofNonTrivial, size_t, mirror::Class*, mirror::Class*) \ + V(CheckInstanceOf, void, mirror::Object*, mirror::Class*) \ \ V(InitializeStaticStorage, void*, uint32_t) \ V(InitializeTypeAndVerifyAccess, void*, uint32_t) \ diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc index a205b17f1b..c8ee99a5d9 100644 --- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc @@ -111,6 +111,14 @@ extern "C" NO_RETURN void artThrowClassCastException(mirror::Class* dest_type, self->QuickDeliverException(); } +extern "C" NO_RETURN void artThrowClassCastExceptionForObject(mirror::Object* obj, + mirror::Class* dest_type, + Thread* self) + REQUIRES_SHARED(Locks::mutator_lock_) { + DCHECK(obj != nullptr); + artThrowClassCastException(dest_type, obj->GetClass(), self); +} + extern "C" NO_RETURN void artThrowArrayStoreException(mirror::Object* array, mirror::Object* value, Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) { diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc index cdb1051e08..b0463d7f11 100644 --- a/runtime/entrypoints_order_test.cc +++ b/runtime/entrypoints_order_test.cc @@ -174,8 +174,9 @@ class EntrypointsOrderTest : public CommonRuntimeTest { sizeof(void*)); EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAllocStringFromString, pInstanceofNonTrivial, sizeof(void*)); - EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInstanceofNonTrivial, pCheckCast, sizeof(void*)); - EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCheckCast, pInitializeStaticStorage, sizeof(void*)); + EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInstanceofNonTrivial, pCheckInstanceOf, sizeof(void*)); + EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCheckInstanceOf, pInitializeStaticStorage, + sizeof(void*)); EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeStaticStorage, pInitializeTypeAndVerifyAccess, sizeof(void*)); EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeTypeAndVerifyAccess, pInitializeType, diff --git a/runtime/image.cc b/runtime/image.cc index 299d5fd13f..bd5ba9350e 100644 --- a/runtime/image.cc +++ b/runtime/image.cc @@ -25,7 +25,7 @@ namespace art { const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' }; -const uint8_t ImageHeader::kImageVersion[] = { '0', '3', '1', '\0' }; +const uint8_t ImageHeader::kImageVersion[] = { '0', '3', '2', '\0' }; ImageHeader::ImageHeader(uint32_t image_begin, uint32_t image_size, diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc index 4843c4dc59..5e4bb4145d 100644 --- a/runtime/interpreter/interpreter_common.cc +++ b/runtime/interpreter/interpreter_common.cc @@ -934,16 +934,12 @@ inline bool DoInvokePolymorphic(Thread* self, // frame, which means that it is unknown at this point. We perform these // checks inside DoCallPolymorphic right before we do the actualy invoke. } else if (handle_kind == kInvokeDirect) { - if (called_method->IsConstructor()) { - // TODO(narayan) : We need to handle the case where the target method is a - // constructor here. - UNIMPLEMENTED(FATAL) << "Direct invokes for constructors are not implemented yet."; - return false; + // String constructors are a special case, they are replaced with StringFactory + // methods. + if (called_method->IsConstructor() && called_method->GetDeclaringClass()->IsStringClass()) { + DCHECK(handle_type->GetRType()->IsStringClass()); + called_method = WellKnownClasses::StringInitToStringFactory(called_method); } - - // Nothing special to do in the case where we're not dealing with a - // constructor. It's a private method, and we've already access checked at - // the point of creating the handle. } else if (handle_kind == kInvokeSuper) { ObjPtr<mirror::Class> declaring_class = called_method->GetDeclaringClass(); @@ -1148,8 +1144,6 @@ static inline bool DoCallPolymorphic(ArtMethod* called_method, uint32_t (&arg)[Instruction::kMaxVarArgRegs], uint32_t first_src_reg, const MethodHandleKind handle_kind) { - // TODO(narayan): Wire in the String.init hacks. - // Compute method information. const DexFile::CodeItem* code_item = called_method->GetCodeItem(); diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc index 0217a67559..01a2ad8f23 100644 --- a/runtime/jni_internal.cc +++ b/runtime/jni_internal.cc @@ -157,14 +157,14 @@ static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class, ThrowNoSuchMethodError(soa, c, name, sig, is_static ? "static" : "non-static"); return nullptr; } - return soa.EncodeMethod(method); + return jni::EncodeArtMethod(method); } static ObjPtr<mirror::ClassLoader> GetClassLoader(const ScopedObjectAccess& soa) REQUIRES_SHARED(Locks::mutator_lock_) { ArtMethod* method = soa.Self()->GetCurrentMethod(nullptr); // If we are running Runtime.nativeLoad, use the overriding ClassLoader it set. - if (method == soa.DecodeMethod(WellKnownClasses::java_lang_Runtime_nativeLoad)) { + if (method == jni::DecodeArtMethod(WellKnownClasses::java_lang_Runtime_nativeLoad)) { return soa.Decode<mirror::ClassLoader>(soa.Self()->GetClassLoaderOverride()); } // If we have a method, use its ClassLoader for context. @@ -235,7 +235,7 @@ static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, con sig, name, c->GetDescriptor(&temp)); return nullptr; } - return soa.EncodeField(field); + return jni::EncodeArtField(field); } static void ThrowAIOOBE(ScopedObjectAccess& soa, mirror::Array* array, jsize start, @@ -368,7 +368,7 @@ class JNI { static jmethodID FromReflectedMethod(JNIEnv* env, jobject jlr_method) { CHECK_NON_NULL_ARGUMENT(jlr_method); ScopedObjectAccess soa(env); - return soa.EncodeMethod(ArtMethod::FromReflectedMethod(soa, jlr_method)); + return jni::EncodeArtMethod(ArtMethod::FromReflectedMethod(soa, jlr_method)); } static jfieldID FromReflectedField(JNIEnv* env, jobject jlr_field) { @@ -380,13 +380,13 @@ class JNI { return nullptr; } ObjPtr<mirror::Field> field = ObjPtr<mirror::Field>::DownCast(obj_field); - return soa.EncodeField(field->GetArtField()); + return jni::EncodeArtField(field->GetArtField()); } static jobject ToReflectedMethod(JNIEnv* env, jclass, jmethodID mid, jboolean) { CHECK_NON_NULL_ARGUMENT(mid); ScopedObjectAccess soa(env); - ArtMethod* m = soa.DecodeMethod(mid); + ArtMethod* m = jni::DecodeArtMethod(mid); mirror::Executable* method; DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize); DCHECK(!Runtime::Current()->IsActiveTransaction()); @@ -401,7 +401,7 @@ class JNI { static jobject ToReflectedField(JNIEnv* env, jclass, jfieldID fid, jboolean) { CHECK_NON_NULL_ARGUMENT(fid); ScopedObjectAccess soa(env); - ArtField* f = soa.DecodeField(fid); + ArtField* f = jni::DecodeArtField(fid); return soa.AddLocalReference<jobject>( mirror::Field::CreateFromArtField<kRuntimePointerSize>(soa.Self(), f, true)); } @@ -631,8 +631,8 @@ class JNI { } if (c->IsStringClass()) { // Replace calls to String.<init> with equivalent StringFactory call. - jmethodID sf_mid = soa.EncodeMethod( - WellKnownClasses::StringInitToStringFactory(soa.DecodeMethod(mid))); + jmethodID sf_mid = jni::EncodeArtMethod( + WellKnownClasses::StringInitToStringFactory(jni::DecodeArtMethod(mid))); return CallStaticObjectMethodV(env, WellKnownClasses::java_lang_StringFactory, sf_mid, args); } ObjPtr<mirror::Object> result = c->AllocObject(soa.Self()); @@ -658,8 +658,8 @@ class JNI { } if (c->IsStringClass()) { // Replace calls to String.<init> with equivalent StringFactory call. - jmethodID sf_mid = soa.EncodeMethod( - WellKnownClasses::StringInitToStringFactory(soa.DecodeMethod(mid))); + jmethodID sf_mid = jni::EncodeArtMethod( + WellKnownClasses::StringInitToStringFactory(jni::DecodeArtMethod(mid))); return CallStaticObjectMethodA(env, WellKnownClasses::java_lang_StringFactory, sf_mid, args); } ObjPtr<mirror::Object> result = c->AllocObject(soa.Self()); @@ -1237,14 +1237,14 @@ class JNI { CHECK_NON_NULL_ARGUMENT(fid); ScopedObjectAccess soa(env); ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(obj); - ArtField* f = soa.DecodeField(fid); + ArtField* f = jni::DecodeArtField(fid); return soa.AddLocalReference<jobject>(f->GetObject(o)); } static jobject GetStaticObjectField(JNIEnv* env, jclass, jfieldID fid) { CHECK_NON_NULL_ARGUMENT(fid); ScopedObjectAccess soa(env); - ArtField* f = soa.DecodeField(fid); + ArtField* f = jni::DecodeArtField(fid); return soa.AddLocalReference<jobject>(f->GetObject(f->GetDeclaringClass())); } @@ -1254,7 +1254,7 @@ class JNI { ScopedObjectAccess soa(env); ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(java_object); ObjPtr<mirror::Object> v = soa.Decode<mirror::Object>(java_value); - ArtField* f = soa.DecodeField(fid); + ArtField* f = jni::DecodeArtField(fid); f->SetObject<false>(o, v); } @@ -1262,7 +1262,7 @@ class JNI { CHECK_NON_NULL_ARGUMENT_RETURN_VOID(fid); ScopedObjectAccess soa(env); ObjPtr<mirror::Object> v = soa.Decode<mirror::Object>(java_value); - ArtField* f = soa.DecodeField(fid); + ArtField* f = jni::DecodeArtField(fid); f->SetObject<false>(f->GetDeclaringClass(), v); } @@ -1271,13 +1271,13 @@ class JNI { CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(fid); \ ScopedObjectAccess soa(env); \ ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(instance); \ - ArtField* f = soa.DecodeField(fid); \ + ArtField* f = jni::DecodeArtField(fid); \ return f->Get ##fn (o) #define GET_STATIC_PRIMITIVE_FIELD(fn) \ CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(fid); \ ScopedObjectAccess soa(env); \ - ArtField* f = soa.DecodeField(fid); \ + ArtField* f = jni::DecodeArtField(fid); \ return f->Get ##fn (f->GetDeclaringClass()) #define SET_PRIMITIVE_FIELD(fn, instance, value) \ @@ -1285,13 +1285,13 @@ class JNI { CHECK_NON_NULL_ARGUMENT_RETURN_VOID(fid); \ ScopedObjectAccess soa(env); \ ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(instance); \ - ArtField* f = soa.DecodeField(fid); \ + ArtField* f = jni::DecodeArtField(fid); \ f->Set ##fn <false>(o, value) #define SET_STATIC_PRIMITIVE_FIELD(fn, value) \ CHECK_NON_NULL_ARGUMENT_RETURN_VOID(fid); \ ScopedObjectAccess soa(env); \ - ArtField* f = soa.DecodeField(fid); \ + ArtField* f = jni::DecodeArtField(fid); \ f->Set ##fn <false>(f->GetDeclaringClass(), value) static jboolean GetBooleanField(JNIEnv* env, jobject obj, jfieldID fid) { diff --git a/runtime/jni_internal.h b/runtime/jni_internal.h index b829934dd7..b3837c409d 100644 --- a/runtime/jni_internal.h +++ b/runtime/jni_internal.h @@ -20,6 +20,8 @@ #include <jni.h> #include <iosfwd> +#include "base/macros.h" + #ifndef NATIVE_METHOD #define NATIVE_METHOD(className, functionName, signature) \ { #functionName, signature, reinterpret_cast<void*>(className ## _ ## functionName) } @@ -36,6 +38,9 @@ namespace art { +class ArtField; +class ArtMethod; + const JNINativeInterface* GetJniNativeInterface(); const JNINativeInterface* GetRuntimeShutdownNativeInterface(); @@ -46,6 +51,29 @@ void RegisterNativeMethods(JNIEnv* env, const char* jni_class_name, const JNINat int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobject cause); +namespace jni { + +ALWAYS_INLINE +static inline ArtField* DecodeArtField(jfieldID fid) { + return reinterpret_cast<ArtField*>(fid); +} + +ALWAYS_INLINE +static inline jfieldID EncodeArtField(ArtField* field) { + return reinterpret_cast<jfieldID>(field); +} + +ALWAYS_INLINE +static inline jmethodID EncodeArtMethod(ArtMethod* art_method) { + return reinterpret_cast<jmethodID>(art_method); +} + +ALWAYS_INLINE +static inline ArtMethod* DecodeArtMethod(jmethodID method_id) { + return reinterpret_cast<ArtMethod*>(method_id); +} + +} // namespace jni } // namespace art std::ostream& operator<<(std::ostream& os, const jobjectRefType& rhs); diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc index 8d85425c10..adf35b6f01 100644 --- a/runtime/native/dalvik_system_VMDebug.cc +++ b/runtime/native/dalvik_system_VMDebug.cc @@ -420,8 +420,10 @@ static jobject VMDebug_getRuntimeStatInternal(JNIEnv* env, jclass, jint statId) } } -static bool SetRuntimeStatValue(JNIEnv* env, jobjectArray result, VMDebugRuntimeStatId id, - std::string value) { +static bool SetRuntimeStatValue(JNIEnv* env, + jobjectArray result, + VMDebugRuntimeStatId id, + const std::string& value) { ScopedLocalRef<jstring> jvalue(env, env->NewStringUTF(value.c_str())); if (jvalue.get() == nullptr) { return false; diff --git a/runtime/native/java_lang_reflect_Executable.cc b/runtime/native/java_lang_reflect_Executable.cc index 1b128fb187..73b81a71f8 100644 --- a/runtime/native/java_lang_reflect_Executable.cc +++ b/runtime/native/java_lang_reflect_Executable.cc @@ -136,7 +136,7 @@ static jobjectArray Executable_getParameters0(JNIEnv* env, jobject javaMethod) { Handle<mirror::Class> parameter_class = hs.NewHandle(soa.Decode<mirror::Class>(WellKnownClasses::java_lang_reflect_Parameter)); ArtMethod* parameter_init = - soa.DecodeMethod(WellKnownClasses::java_lang_reflect_Parameter_init); + jni::DecodeArtMethod(WellKnownClasses::java_lang_reflect_Parameter_init); // Mutable handles used in the loop below to ensure cleanup without scaling the number of // handles by the number of parameters. diff --git a/runtime/native_bridge_art_interface.cc b/runtime/native_bridge_art_interface.cc index 5ab6097aa4..c58854b13e 100644 --- a/runtime/native_bridge_art_interface.cc +++ b/runtime/native_bridge_art_interface.cc @@ -25,6 +25,7 @@ #include "base/logging.h" #include "base/macros.h" #include "dex_file-inl.h" +#include "jni_internal.h" #include "mirror/class-inl.h" #include "scoped_thread_state_change-inl.h" #include "sigchain.h" @@ -33,7 +34,7 @@ namespace art { static const char* GetMethodShorty(JNIEnv* env, jmethodID mid) { ScopedObjectAccess soa(env); - ArtMethod* m = soa.DecodeMethod(mid); + ArtMethod* m = jni::DecodeArtMethod(mid); return m->GetShorty(); } @@ -90,14 +91,14 @@ static android::NativeBridgeRuntimeCallbacks native_bridge_art_callbacks_ { GetMethodShorty, GetNativeMethodCount, GetNativeMethods }; -bool LoadNativeBridge(std::string& native_bridge_library_filename) { +bool LoadNativeBridge(const std::string& native_bridge_library_filename) { VLOG(startup) << "Runtime::Setup native bridge library: " << (native_bridge_library_filename.empty() ? "(empty)" : native_bridge_library_filename); return android::LoadNativeBridge(native_bridge_library_filename.c_str(), &native_bridge_art_callbacks_); } -void PreInitializeNativeBridge(std::string dir) { +void PreInitializeNativeBridge(const std::string& dir) { VLOG(startup) << "Runtime::Pre-initialize native bridge"; #ifndef __APPLE__ // Mac OS does not support CLONE_NEWNS. if (unshare(CLONE_NEWNS) == -1) { diff --git a/runtime/native_bridge_art_interface.h b/runtime/native_bridge_art_interface.h index 090cddb9b6..c86e5da0df 100644 --- a/runtime/native_bridge_art_interface.h +++ b/runtime/native_bridge_art_interface.h @@ -26,10 +26,10 @@ namespace art { // Mirror libnativebridge interface. Done to have the ART callbacks out of line, and not require // the system/core header file in other files. -bool LoadNativeBridge(std::string& native_bridge_library_filename); +bool LoadNativeBridge(const std::string& native_bridge_library_filename); // This is mostly for testing purposes, as in a full system this is called by Zygote code. -void PreInitializeNativeBridge(std::string dir); +void PreInitializeNativeBridge(const std::string& dir); void InitializeNativeBridge(JNIEnv* env, const char* instruction_set); diff --git a/runtime/native_stack_dump.cc b/runtime/native_stack_dump.cc index 00ab5771da..23768899bd 100644 --- a/runtime/native_stack_dump.cc +++ b/runtime/native_stack_dump.cc @@ -256,7 +256,7 @@ static void Addr2line(const std::string& map_src, Drain(2U, prefix, pipe, os); } -static bool RunCommand(std::string cmd) { +static bool RunCommand(const std::string& cmd) { FILE* stream = popen(cmd.c_str(), "r"); if (stream) { pclose(stream); diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc index d18e9464e6..d4337b971b 100644 --- a/runtime/oat_file_assistant_test.cc +++ b/runtime/oat_file_assistant_test.cc @@ -999,7 +999,7 @@ TEST_F(OatFileAssistantTest, GenNoDex) { // Turn an absolute path into a path relative to the current working // directory. -static std::string MakePathRelative(std::string target) { +static std::string MakePathRelative(const std::string& target) { char buf[MAXPATHLEN]; std::string cwd = getcwd(buf, MAXPATHLEN); diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc index cf9efe0782..651a6ee681 100644 --- a/runtime/oat_file_manager.cc +++ b/runtime/oat_file_manager.cc @@ -28,6 +28,7 @@ #include "gc/scoped_gc_critical_section.h" #include "gc/space/image_space.h" #include "handle_scope-inl.h" +#include "jni_internal.h" #include "mirror/class_loader.h" #include "oat_file_assistant.h" #include "obj_ptr-inl.h" @@ -224,9 +225,10 @@ static void AddNext(/*inout*/DexFileAndClassPair* original, } } +template <typename T> static void IterateOverJavaDexFile(ObjPtr<mirror::Object> dex_file, ArtField* const cookie_field, - std::function<bool(const DexFile*)> fn) + const T& fn) REQUIRES_SHARED(Locks::mutator_lock_) { if (dex_file != nullptr) { mirror::LongArray* long_array = cookie_field->GetObject(dex_file)->AsLongArray(); @@ -247,26 +249,27 @@ static void IterateOverJavaDexFile(ObjPtr<mirror::Object> dex_file, } } +template <typename T> static void IterateOverPathClassLoader( - ScopedObjectAccessAlreadyRunnable& soa, Handle<mirror::ClassLoader> class_loader, MutableHandle<mirror::ObjectArray<mirror::Object>> dex_elements, - std::function<bool(const DexFile*)> fn) REQUIRES_SHARED(Locks::mutator_lock_) { + const T& fn) REQUIRES_SHARED(Locks::mutator_lock_) { // Handle this step. // Handle as if this is the child PathClassLoader. // The class loader is a PathClassLoader which inherits from BaseDexClassLoader. // We need to get the DexPathList and loop through it. - ArtField* const cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie); + ArtField* const cookie_field = + jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie); ArtField* const dex_file_field = - soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile); + jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile); ObjPtr<mirror::Object> dex_path_list = - soa.DecodeField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList)-> - GetObject(class_loader.Get()); + jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList)-> + GetObject(class_loader.Get()); if (dex_path_list != nullptr && dex_file_field != nullptr && cookie_field != nullptr) { // DexPathList has an array dexElements of Elements[] which each contain a dex file. ObjPtr<mirror::Object> dex_elements_obj = - soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList_dexElements)-> - GetObject(dex_path_list); + jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList_dexElements)-> + GetObject(dex_path_list); // Loop through each dalvik.system.DexPathList$Element's dalvik.system.DexFile and look // at the mCookie which is a DexFile vector. if (dex_elements_obj != nullptr) { @@ -323,7 +326,7 @@ static bool GetDexFilesFromClassLoader( hs.NewHandle<mirror::ObjectArray<mirror::Object>>(nullptr)); Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(class_loader)); - IterateOverPathClassLoader(soa, h_class_loader, dex_elements, GetDexFilesFn); + IterateOverPathClassLoader(h_class_loader, dex_elements, GetDexFilesFn); return true; } @@ -337,9 +340,10 @@ static void GetDexFilesFromDexElementsArray( return; } - ArtField* const cookie_field = soa.DecodeField(WellKnownClasses::dalvik_system_DexFile_cookie); + ArtField* const cookie_field = + jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie); ArtField* const dex_file_field = - soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile); + jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile); ObjPtr<mirror::Class> const element_class = soa.Decode<mirror::Class>( WellKnownClasses::dalvik_system_DexPathList__Element); ObjPtr<mirror::Class> const dexfile_class = soa.Decode<mirror::Class>( diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc index d9031ea652..9d4b55411b 100644 --- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc +++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc @@ -277,7 +277,13 @@ class JvmtiFunctions { jobject initial_object, const jvmtiHeapCallbacks* callbacks, const void* user_data) { - return ERR(NOT_IMPLEMENTED); + HeapUtil heap_util(&gObjectTagTable); + return heap_util.FollowReferences(env, + heap_filter, + klass, + initial_object, + callbacks, + user_data); } static jvmtiError IterateThroughHeap(jvmtiEnv* env, diff --git a/runtime/openjdkjvmti/object_tagging.h b/runtime/openjdkjvmti/object_tagging.h index 997cedb093..0296f1ad80 100644 --- a/runtime/openjdkjvmti/object_tagging.h +++ b/runtime/openjdkjvmti/object_tagging.h @@ -34,7 +34,7 @@ class EventHandler; class ObjectTagTable : public art::gc::SystemWeakHolder { public: explicit ObjectTagTable(EventHandler* event_handler) - : art::gc::SystemWeakHolder(art::LockLevel::kAllocTrackerLock), + : art::gc::SystemWeakHolder(kTaggingLockLevel), update_since_last_sweep_(false), event_handler_(event_handler) { } @@ -180,6 +180,10 @@ class ObjectTagTable : public art::gc::SystemWeakHolder { } }; + // The tag table is used when visiting roots. So it needs to have a low lock level. + static constexpr art::LockLevel kTaggingLockLevel = + static_cast<art::LockLevel>(art::LockLevel::kAbortLock + 1); + std::unordered_map<art::GcRoot<art::mirror::Object>, jlong, HashGcRoot, diff --git a/runtime/openjdkjvmti/ti_heap.cc b/runtime/openjdkjvmti/ti_heap.cc index 6b207430ff..0eff469884 100644 --- a/runtime/openjdkjvmti/ti_heap.cc +++ b/runtime/openjdkjvmti/ti_heap.cc @@ -16,19 +16,25 @@ #include "ti_heap.h" +#include "art_field-inl.h" #include "art_jvmti.h" #include "base/macros.h" #include "base/mutex.h" #include "class_linker.h" #include "gc/heap.h" +#include "gc_root-inl.h" #include "jni_env_ext.h" +#include "jni_internal.h" #include "mirror/class.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" #include "object_callbacks.h" #include "object_tagging.h" #include "obj_ptr-inl.h" #include "runtime.h" #include "scoped_thread_state_change-inl.h" #include "thread-inl.h" +#include "thread_list.h" namespace openjdkjvmti { @@ -165,6 +171,466 @@ jvmtiError HeapUtil::IterateThroughHeap(jvmtiEnv* env ATTRIBUTE_UNUSED, return ERR(NONE); } +class FollowReferencesHelper FINAL { + public: + FollowReferencesHelper(HeapUtil* h, + art::ObjPtr<art::mirror::Object> initial_object ATTRIBUTE_UNUSED, + const jvmtiHeapCallbacks* callbacks, + const void* user_data) + : tag_table_(h->GetTags()), + callbacks_(callbacks), + user_data_(user_data), + start_(0), + stop_reports_(false) { + } + + void Init() + REQUIRES_SHARED(art::Locks::mutator_lock_) + REQUIRES(!*tag_table_->GetAllowDisallowLock()) { + CollectAndReportRootsVisitor carrv(this, tag_table_, &worklist_, &visited_); + art::Runtime::Current()->VisitRoots(&carrv); + art::Runtime::Current()->VisitImageRoots(&carrv); + stop_reports_ = carrv.IsStopReports(); + + if (stop_reports_) { + worklist_.clear(); + } + } + + void Work() + REQUIRES_SHARED(art::Locks::mutator_lock_) + REQUIRES(!*tag_table_->GetAllowDisallowLock()) { + // Currently implemented as a BFS. To lower overhead, we don't erase elements immediately + // from the head of the work list, instead postponing until there's a gap that's "large." + // + // Alternatively, we can implement a DFS and use the work list as a stack. + while (start_ < worklist_.size()) { + art::mirror::Object* cur_obj = worklist_[start_]; + start_++; + + if (start_ >= kMaxStart) { + worklist_.erase(worklist_.begin(), worklist_.begin() + start_); + start_ = 0; + } + + VisitObject(cur_obj); + + if (stop_reports_) { + break; + } + } + } + + private: + class CollectAndReportRootsVisitor FINAL : public art::RootVisitor { + public: + CollectAndReportRootsVisitor(FollowReferencesHelper* helper, + ObjectTagTable* tag_table, + std::vector<art::mirror::Object*>* worklist, + std::unordered_set<art::mirror::Object*>* visited) + : helper_(helper), + tag_table_(tag_table), + worklist_(worklist), + visited_(visited), + stop_reports_(false) {} + + void VisitRoots(art::mirror::Object*** roots, size_t count, const art::RootInfo& info) + OVERRIDE + REQUIRES_SHARED(art::Locks::mutator_lock_) + REQUIRES(!*helper_->tag_table_->GetAllowDisallowLock()) { + for (size_t i = 0; i != count; ++i) { + AddRoot(*roots[i], info); + } + } + + void VisitRoots(art::mirror::CompressedReference<art::mirror::Object>** roots, + size_t count, + const art::RootInfo& info) + OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) + REQUIRES(!*helper_->tag_table_->GetAllowDisallowLock()) { + for (size_t i = 0; i != count; ++i) { + AddRoot(roots[i]->AsMirrorPtr(), info); + } + } + + bool IsStopReports() { + return stop_reports_; + } + + private: + void AddRoot(art::mirror::Object* root_obj, const art::RootInfo& info) + REQUIRES_SHARED(art::Locks::mutator_lock_) + REQUIRES(!*tag_table_->GetAllowDisallowLock()) { + // We use visited_ to mark roots already so we do not need another set. + if (visited_->find(root_obj) == visited_->end()) { + visited_->insert(root_obj); + worklist_->push_back(root_obj); + } + ReportRoot(root_obj, info); + } + + jvmtiHeapReferenceKind GetReferenceKind(const art::RootInfo& info, + jvmtiHeapReferenceInfo* ref_info) + REQUIRES_SHARED(art::Locks::mutator_lock_) { + // TODO: Fill in ref_info. + memset(ref_info, 0, sizeof(jvmtiHeapReferenceInfo)); + + switch (info.GetType()) { + case art::RootType::kRootJNIGlobal: + return JVMTI_HEAP_REFERENCE_JNI_GLOBAL; + + case art::RootType::kRootJNILocal: + return JVMTI_HEAP_REFERENCE_JNI_LOCAL; + + case art::RootType::kRootJavaFrame: + return JVMTI_HEAP_REFERENCE_STACK_LOCAL; + + case art::RootType::kRootNativeStack: + case art::RootType::kRootThreadBlock: + case art::RootType::kRootThreadObject: + return JVMTI_HEAP_REFERENCE_THREAD; + + case art::RootType::kRootStickyClass: + case art::RootType::kRootInternedString: + // Note: this isn't a root in the RI. + return JVMTI_HEAP_REFERENCE_SYSTEM_CLASS; + + case art::RootType::kRootMonitorUsed: + case art::RootType::kRootJNIMonitor: + return JVMTI_HEAP_REFERENCE_MONITOR; + + case art::RootType::kRootFinalizing: + case art::RootType::kRootDebugger: + case art::RootType::kRootReferenceCleanup: + case art::RootType::kRootVMInternal: + case art::RootType::kRootUnknown: + return JVMTI_HEAP_REFERENCE_OTHER; + } + LOG(FATAL) << "Unreachable"; + UNREACHABLE(); + } + + void ReportRoot(art::mirror::Object* root_obj, const art::RootInfo& info) + REQUIRES_SHARED(art::Locks::mutator_lock_) + REQUIRES(!*tag_table_->GetAllowDisallowLock()) { + jvmtiHeapReferenceInfo ref_info; + jvmtiHeapReferenceKind kind = GetReferenceKind(info, &ref_info); + jint result = helper_->ReportReference(kind, &ref_info, nullptr, root_obj); + if ((result & JVMTI_VISIT_ABORT) != 0) { + stop_reports_ = true; + } + } + + private: + FollowReferencesHelper* helper_; + ObjectTagTable* tag_table_; + std::vector<art::mirror::Object*>* worklist_; + std::unordered_set<art::mirror::Object*>* visited_; + bool stop_reports_; + }; + + void VisitObject(art::mirror::Object* obj) + REQUIRES_SHARED(art::Locks::mutator_lock_) + REQUIRES(!*tag_table_->GetAllowDisallowLock()) { + if (obj->IsClass()) { + VisitClass(obj->AsClass()); + return; + } + if (obj->IsArrayInstance()) { + VisitArray(obj); + return; + } + + // TODO: We'll probably have to rewrite this completely with our own visiting logic, if we + // want to have a chance of getting the field indices computed halfway efficiently. For + // now, ignore them altogether. + + struct InstanceReferenceVisitor { + explicit InstanceReferenceVisitor(FollowReferencesHelper* helper_) + : helper(helper_), stop_reports(false) {} + + void operator()(art::mirror::Object* src, + art::MemberOffset field_offset, + bool is_static ATTRIBUTE_UNUSED) const + REQUIRES_SHARED(art::Locks::mutator_lock_) + REQUIRES(!*helper->tag_table_->GetAllowDisallowLock()) { + if (stop_reports) { + return; + } + + art::mirror::Object* trg = src->GetFieldObjectReferenceAddr(field_offset)->AsMirrorPtr(); + jvmtiHeapReferenceInfo reference_info; + memset(&reference_info, 0, sizeof(reference_info)); + + // TODO: Implement spec-compliant numbering. + reference_info.field.index = field_offset.Int32Value(); + + jvmtiHeapReferenceKind kind = + field_offset.Int32Value() == art::mirror::Object::ClassOffset().Int32Value() + ? JVMTI_HEAP_REFERENCE_CLASS + : JVMTI_HEAP_REFERENCE_FIELD; + const jvmtiHeapReferenceInfo* reference_info_ptr = + kind == JVMTI_HEAP_REFERENCE_CLASS ? nullptr : &reference_info; + + stop_reports = !helper->ReportReferenceMaybeEnqueue(kind, reference_info_ptr, src, trg); + } + + void VisitRoot(art::mirror::CompressedReference<art::mirror::Object>* root ATTRIBUTE_UNUSED) + const { + LOG(FATAL) << "Unreachable"; + } + void VisitRootIfNonNull( + art::mirror::CompressedReference<art::mirror::Object>* root ATTRIBUTE_UNUSED) const { + LOG(FATAL) << "Unreachable"; + } + + // "mutable" required by the visitor API. + mutable FollowReferencesHelper* helper; + mutable bool stop_reports; + }; + + InstanceReferenceVisitor visitor(this); + // Visit references, not native roots. + obj->VisitReferences<false>(visitor, art::VoidFunctor()); + + stop_reports_ = visitor.stop_reports; + } + + void VisitArray(art::mirror::Object* array) + REQUIRES_SHARED(art::Locks::mutator_lock_) + REQUIRES(!*tag_table_->GetAllowDisallowLock()) { + stop_reports_ = !ReportReferenceMaybeEnqueue(JVMTI_HEAP_REFERENCE_CLASS, + nullptr, + array, + array->GetClass()); + if (stop_reports_) { + return; + } + + if (array->IsObjectArray()) { + art::mirror::ObjectArray<art::mirror::Object>* obj_array = + array->AsObjectArray<art::mirror::Object>(); + int32_t length = obj_array->GetLength(); + for (int32_t i = 0; i != length; ++i) { + art::mirror::Object* elem = obj_array->GetWithoutChecks(i); + if (elem != nullptr) { + jvmtiHeapReferenceInfo reference_info; + reference_info.array.index = i; + stop_reports_ = !ReportReferenceMaybeEnqueue(JVMTI_HEAP_REFERENCE_ARRAY_ELEMENT, + &reference_info, + array, + elem); + if (stop_reports_) { + break; + } + } + } + } + } + + void VisitClass(art::mirror::Class* klass) + REQUIRES_SHARED(art::Locks::mutator_lock_) + REQUIRES(!*tag_table_->GetAllowDisallowLock()) { + // TODO: Are erroneous classes reported? Are non-prepared ones? For now, just use resolved ones. + if (!klass->IsResolved()) { + return; + } + + // Superclass. + stop_reports_ = !ReportReferenceMaybeEnqueue(JVMTI_HEAP_REFERENCE_SUPERCLASS, + nullptr, + klass, + klass->GetSuperClass()); + if (stop_reports_) { + return; + } + + // Directly implemented or extended interfaces. + art::Thread* self = art::Thread::Current(); + art::StackHandleScope<1> hs(self); + art::Handle<art::mirror::Class> h_klass(hs.NewHandle<art::mirror::Class>(klass)); + for (size_t i = 0; i < h_klass->NumDirectInterfaces(); ++i) { + art::ObjPtr<art::mirror::Class> inf_klass = + art::mirror::Class::GetDirectInterface(self, h_klass, i); + if (inf_klass == nullptr) { + // TODO: With a resolved class this should not happen... + self->ClearException(); + break; + } + + stop_reports_ = !ReportReferenceMaybeEnqueue(JVMTI_HEAP_REFERENCE_INTERFACE, + nullptr, + klass, + inf_klass.Ptr()); + if (stop_reports_) { + return; + } + } + + // Classloader. + // TODO: What about the boot classpath loader? We'll skip for now, but do we have to find the + // fake BootClassLoader? + if (klass->GetClassLoader() != nullptr) { + stop_reports_ = !ReportReferenceMaybeEnqueue(JVMTI_HEAP_REFERENCE_CLASS_LOADER, + nullptr, + klass, + klass->GetClassLoader()); + if (stop_reports_) { + return; + } + } + DCHECK_EQ(h_klass.Get(), klass); + + // Declared static fields. + for (auto& field : klass->GetSFields()) { + if (!field.IsPrimitiveType()) { + art::ObjPtr<art::mirror::Object> field_value = field.GetObject(klass); + if (field_value != nullptr) { + jvmtiHeapReferenceInfo reference_info; + memset(&reference_info, 0, sizeof(reference_info)); + + // TODO: Implement spec-compliant numbering. + reference_info.field.index = field.GetOffset().Int32Value(); + + stop_reports_ = !ReportReferenceMaybeEnqueue(JVMTI_HEAP_REFERENCE_STATIC_FIELD, + &reference_info, + klass, + field_value.Ptr()); + if (stop_reports_) { + return; + } + } + } + } + } + + void MaybeEnqueue(art::mirror::Object* obj) REQUIRES_SHARED(art::Locks::mutator_lock_) { + if (visited_.find(obj) == visited_.end()) { + worklist_.push_back(obj); + visited_.insert(obj); + } + } + + bool ReportReferenceMaybeEnqueue(jvmtiHeapReferenceKind kind, + const jvmtiHeapReferenceInfo* reference_info, + art::mirror::Object* referree, + art::mirror::Object* referrer) + REQUIRES_SHARED(art::Locks::mutator_lock_) + REQUIRES(!*tag_table_->GetAllowDisallowLock()) { + jint result = ReportReference(kind, reference_info, referree, referrer); + if ((result & JVMTI_VISIT_ABORT) == 0) { + if ((result & JVMTI_VISIT_OBJECTS) != 0) { + MaybeEnqueue(referrer); + } + return true; + } else { + return false; + } + } + + jint ReportReference(jvmtiHeapReferenceKind kind, + const jvmtiHeapReferenceInfo* reference_info, + art::mirror::Object* referrer, + art::mirror::Object* referree) + REQUIRES_SHARED(art::Locks::mutator_lock_) + REQUIRES(!*tag_table_->GetAllowDisallowLock()) { + if (referree == nullptr || stop_reports_) { + return 0; + } + + const jlong class_tag = tag_table_->GetTagOrZero(referree->GetClass()); + const jlong referrer_class_tag = + referrer == nullptr ? 0 : tag_table_->GetTagOrZero(referrer->GetClass()); + const jlong size = static_cast<jlong>(referree->SizeOf()); + jlong tag = tag_table_->GetTagOrZero(referree); + jlong saved_tag = tag; + jlong referrer_tag = 0; + jlong saved_referrer_tag = 0; + jlong* referrer_tag_ptr; + if (referrer == nullptr) { + referrer_tag_ptr = nullptr; + } else { + if (referrer == referree) { + referrer_tag_ptr = &tag; + } else { + referrer_tag = saved_referrer_tag = tag_table_->GetTagOrZero(referrer); + referrer_tag_ptr = &referrer_tag; + } + } + jint length = -1; + if (referree->IsArrayInstance()) { + length = referree->AsArray()->GetLength(); + } + + jint result = callbacks_->heap_reference_callback(kind, + reference_info, + class_tag, + referrer_class_tag, + size, + &tag, + referrer_tag_ptr, + length, + const_cast<void*>(user_data_)); + + if (tag != saved_tag) { + tag_table_->Set(referree, tag); + } + if (referrer_tag != saved_referrer_tag) { + tag_table_->Set(referrer, referrer_tag); + } + + return result; + } + + ObjectTagTable* tag_table_; + const jvmtiHeapCallbacks* callbacks_; + const void* user_data_; + + std::vector<art::mirror::Object*> worklist_; + size_t start_; + static constexpr size_t kMaxStart = 1000000U; + + std::unordered_set<art::mirror::Object*> visited_; + + bool stop_reports_; + + friend class CollectAndReportRootsVisitor; +}; + +jvmtiError HeapUtil::FollowReferences(jvmtiEnv* env ATTRIBUTE_UNUSED, + jint heap_filter ATTRIBUTE_UNUSED, + jclass klass ATTRIBUTE_UNUSED, + jobject initial_object, + const jvmtiHeapCallbacks* callbacks, + const void* user_data) { + if (callbacks == nullptr) { + return ERR(NULL_POINTER); + } + + if (callbacks->array_primitive_value_callback != nullptr) { + // TODO: Implement. + return ERR(NOT_IMPLEMENTED); + } + + art::Thread* self = art::Thread::Current(); + art::ScopedObjectAccess soa(self); // Now we know we have the shared lock. + + art::Runtime::Current()->GetHeap()->IncrementDisableMovingGC(self); + { + art::ObjPtr<art::mirror::Object> o_initial = soa.Decode<art::mirror::Object>(initial_object); + + art::ScopedThreadSuspension sts(self, art::kWaitingForVisitObjects); + art::ScopedSuspendAll ssa("FollowReferences"); + + FollowReferencesHelper frh(this, o_initial, callbacks, user_data); + frh.Init(); + frh.Work(); + } + art::Runtime::Current()->GetHeap()->DecrementDisableMovingGC(self); + + return ERR(NONE); +} + jvmtiError HeapUtil::GetLoadedClasses(jvmtiEnv* env, jint* class_count_ptr, jclass** classes_ptr) { @@ -215,5 +681,4 @@ jvmtiError HeapUtil::ForceGarbageCollection(jvmtiEnv* env ATTRIBUTE_UNUSED) { return ERR(NONE); } - } // namespace openjdkjvmti diff --git a/runtime/openjdkjvmti/ti_heap.h b/runtime/openjdkjvmti/ti_heap.h index 570dd0c6ce..72ee097566 100644 --- a/runtime/openjdkjvmti/ti_heap.h +++ b/runtime/openjdkjvmti/ti_heap.h @@ -36,6 +36,13 @@ class HeapUtil { const jvmtiHeapCallbacks* callbacks, const void* user_data); + jvmtiError FollowReferences(jvmtiEnv* env, + jint heap_filter, + jclass klass, + jobject initial_object, + const jvmtiHeapCallbacks* callbacks, + const void* user_data); + static jvmtiError ForceGarbageCollection(jvmtiEnv* env); ObjectTagTable* GetTags() { diff --git a/runtime/openjdkjvmti/ti_method.cc b/runtime/openjdkjvmti/ti_method.cc index 6210936772..e391a9d680 100644 --- a/runtime/openjdkjvmti/ti_method.cc +++ b/runtime/openjdkjvmti/ti_method.cc @@ -34,6 +34,7 @@ #include "art_jvmti.h" #include "art_method-inl.h" #include "base/enums.h" +#include "jni_internal.h" #include "modifiers.h" #include "scoped_thread_state_change-inl.h" @@ -45,7 +46,7 @@ jvmtiError MethodUtil::GetMethodName(jvmtiEnv* env, char** signature_ptr, char** generic_ptr) { art::ScopedObjectAccess soa(art::Thread::Current()); - art::ArtMethod* art_method = soa.DecodeMethod(method); + art::ArtMethod* art_method = art::jni::DecodeArtMethod(method); art_method = art_method->GetInterfaceMethodIfProxy(art::kRuntimePointerSize); JvmtiUniquePtr name_copy; @@ -93,10 +94,10 @@ jvmtiError MethodUtil::GetMethodDeclaringClass(jvmtiEnv* env ATTRIBUTE_UNUSED, return ERR(NULL_POINTER); } - art::ScopedObjectAccess soa(art::Thread::Current()); - art::ArtMethod* art_method = soa.DecodeMethod(method); + art::ArtMethod* art_method = art::jni::DecodeArtMethod(method); // Note: No GetInterfaceMethodIfProxy, we want to actual class. + art::ScopedObjectAccess soa(art::Thread::Current()); art::mirror::Class* klass = art_method->GetDeclaringClass(); *declaring_class_ptr = soa.AddLocalReference<jclass>(klass); @@ -110,9 +111,7 @@ jvmtiError MethodUtil::GetMethodModifiers(jvmtiEnv* env ATTRIBUTE_UNUSED, return ERR(NULL_POINTER); } - art::ScopedObjectAccess soa(art::Thread::Current()); - art::ArtMethod* art_method = soa.DecodeMethod(method); - + art::ArtMethod* art_method = art::jni::DecodeArtMethod(method); uint32_t modifiers = art_method->GetAccessFlags(); // Note: Keep this code in sync with Executable.fixMethodFlags. diff --git a/runtime/openjdkjvmti/ti_stack.cc b/runtime/openjdkjvmti/ti_stack.cc index 33e677f4b1..6f8976f03d 100644 --- a/runtime/openjdkjvmti/ti_stack.cc +++ b/runtime/openjdkjvmti/ti_stack.cc @@ -37,6 +37,7 @@ #include "dex_file.h" #include "dex_file_annotations.h" #include "jni_env_ext.h" +#include "jni_internal.h" #include "mirror/class.h" #include "mirror/dex_cache.h" #include "scoped_thread_state_change-inl.h" @@ -64,7 +65,7 @@ struct GetStackTraceVisitor : public art::StackVisitor { if (start == 0) { m = m->GetInterfaceMethodIfProxy(art::kRuntimePointerSize); - jmethodID id = soa.EncodeMethod(m); + jmethodID id = art::jni::EncodeArtMethod(m); art::mirror::DexCache* dex_cache = m->GetDexCache(); int32_t line_number = -1; diff --git a/runtime/openjdkjvmti/transform.cc b/runtime/openjdkjvmti/transform.cc index 3443aea744..fa2983c19c 100644 --- a/runtime/openjdkjvmti/transform.cc +++ b/runtime/openjdkjvmti/transform.cc @@ -283,7 +283,7 @@ jvmtiError GetTransformationData(ArtJvmTiEnv* env, // Install the new dex file. // TODO do error checks for bad state (method in a stack, changes to number of methods/fields/etc). jvmtiError MoveTransformedFileIntoRuntime(jclass jklass, - std::string original_location, + const std::string& original_location, jint data_len, unsigned char* dex_data) { const char* dex_file_name = "Ldalvik/system/DexFile;"; diff --git a/runtime/openjdkjvmti/transform.h b/runtime/openjdkjvmti/transform.h index 85bcb00eca..a76ed939b5 100644 --- a/runtime/openjdkjvmti/transform.h +++ b/runtime/openjdkjvmti/transform.h @@ -54,7 +54,7 @@ jvmtiError GetTransformationData(ArtJvmTiEnv* env, // Install the new dex file. jvmtiError MoveTransformedFileIntoRuntime(jclass jklass, - std::string original_location, + const std::string& original_location, jint data_len, unsigned char* dex_data); diff --git a/runtime/plugin.h b/runtime/plugin.h index 18f3977bd5..f077aaf3fb 100644 --- a/runtime/plugin.h +++ b/runtime/plugin.h @@ -34,7 +34,7 @@ using PluginDeinitializationFunction = bool (*)(); // single-threaded fashion so not much need class Plugin { public: - static Plugin Create(std::string lib) { + static Plugin Create(const std::string& lib) { return Plugin(lib); } @@ -66,7 +66,7 @@ class Plugin { } private: - explicit Plugin(std::string library) : library_(library), dlopen_handle_(nullptr) { } + explicit Plugin(const std::string& library) : library_(library), dlopen_handle_(nullptr) { } std::string library_; void* dlopen_handle_; diff --git a/runtime/reflection.cc b/runtime/reflection.cc index f88309baa1..3128380f76 100644 --- a/runtime/reflection.cc +++ b/runtime/reflection.cc @@ -453,7 +453,7 @@ JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa, jobject o return JValue(); } - ArtMethod* method = soa.DecodeMethod(mid); + ArtMethod* method = jni::DecodeArtMethod(mid); bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor(); if (is_string_init) { // Replace calls to String.<init> with equivalent StringFactory call. @@ -484,7 +484,7 @@ JValue InvokeWithJValues(const ScopedObjectAccessAlreadyRunnable& soa, jobject o return JValue(); } - ArtMethod* method = soa.DecodeMethod(mid); + ArtMethod* method = jni::DecodeArtMethod(mid); bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor(); if (is_string_init) { // Replace calls to String.<init> with equivalent StringFactory call. @@ -516,7 +516,7 @@ JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccessAlreadyRunnab } ObjPtr<mirror::Object> receiver = soa.Decode<mirror::Object>(obj); - ArtMethod* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid)); + ArtMethod* method = FindVirtualMethod(receiver, jni::DecodeArtMethod(mid)); bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor(); if (is_string_init) { // Replace calls to String.<init> with equivalent StringFactory call. @@ -548,7 +548,7 @@ JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccessAlreadyRunnab } ObjPtr<mirror::Object> receiver = soa.Decode<mirror::Object>(obj); - ArtMethod* method = FindVirtualMethod(receiver, soa.DecodeMethod(mid)); + ArtMethod* method = FindVirtualMethod(receiver, jni::DecodeArtMethod(mid)); bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor(); if (is_string_init) { // Replace calls to String.<init> with equivalent StringFactory call. @@ -739,8 +739,11 @@ ObjPtr<mirror::Object> BoxPrimitive(Primitive::Type src_class, const JValue& val arg_array.Append(value.GetI()); } - soa.DecodeMethod(m)->Invoke(soa.Self(), arg_array.GetArray(), arg_array.GetNumBytes(), - &result, shorty); + jni::DecodeArtMethod(m)->Invoke(soa.Self(), + arg_array.GetArray(), + arg_array.GetNumBytes(), + &result, + shorty); return result.GetL(); } diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc index 22076bbc05..e254dfe627 100644 --- a/runtime/reflection_test.cc +++ b/runtime/reflection_test.cc @@ -23,6 +23,7 @@ #include "art_method-inl.h" #include "base/enums.h" #include "common_compiler_test.h" +#include "jni_internal.h" #include "scoped_thread_state_change-inl.h" namespace art { @@ -136,7 +137,7 @@ class ReflectionTest : public CommonCompilerTest { ObjPtr<mirror::Object> receiver; ReflectionTestMakeExecutable(&method, &receiver, is_static, "nop", "()V"); ScopedLocalRef<jobject> receiver_ref(soa.Env(), soa.AddLocalReference<jobject>(receiver)); - InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), nullptr); + InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), nullptr); } void InvokeIdentityByteMethod(bool is_static) { @@ -148,20 +149,20 @@ class ReflectionTest : public CommonCompilerTest { jvalue args[1]; args[0].b = 0; - JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_EQ(0, result.GetB()); args[0].b = -1; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_EQ(-1, result.GetB()); args[0].b = SCHAR_MAX; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_EQ(SCHAR_MAX, result.GetB()); static_assert(SCHAR_MIN == -128, "SCHAR_MIN unexpected"); args[0].b = SCHAR_MIN; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_EQ(SCHAR_MIN, result.GetB()); } @@ -174,19 +175,19 @@ class ReflectionTest : public CommonCompilerTest { jvalue args[1]; args[0].i = 0; - JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_EQ(0, result.GetI()); args[0].i = -1; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_EQ(-1, result.GetI()); args[0].i = INT_MAX; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_EQ(INT_MAX, result.GetI()); args[0].i = INT_MIN; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_EQ(INT_MIN, result.GetI()); } @@ -199,19 +200,19 @@ class ReflectionTest : public CommonCompilerTest { jvalue args[1]; args[0].d = 0.0; - JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_DOUBLE_EQ(0.0, result.GetD()); args[0].d = -1.0; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_DOUBLE_EQ(-1.0, result.GetD()); args[0].d = DBL_MAX; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_DOUBLE_EQ(DBL_MAX, result.GetD()); args[0].d = DBL_MIN; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_DOUBLE_EQ(DBL_MIN, result.GetD()); } @@ -225,22 +226,22 @@ class ReflectionTest : public CommonCompilerTest { args[0].i = 1; args[1].i = 2; - JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_EQ(3, result.GetI()); args[0].i = -2; args[1].i = 5; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_EQ(3, result.GetI()); args[0].i = INT_MAX; args[1].i = INT_MIN; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_EQ(-1, result.GetI()); args[0].i = INT_MAX; args[1].i = INT_MAX; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_EQ(-2, result.GetI()); } @@ -255,31 +256,31 @@ class ReflectionTest : public CommonCompilerTest { args[0].i = 0; args[1].i = 0; args[2].i = 0; - JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_EQ(0, result.GetI()); args[0].i = 1; args[1].i = 2; args[2].i = 3; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_EQ(6, result.GetI()); args[0].i = -1; args[1].i = 2; args[2].i = -3; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_EQ(-2, result.GetI()); args[0].i = INT_MAX; args[1].i = INT_MIN; args[2].i = INT_MAX; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_EQ(2147483646, result.GetI()); args[0].i = INT_MAX; args[1].i = INT_MAX; args[2].i = INT_MAX; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_EQ(2147483645, result.GetI()); } @@ -295,35 +296,35 @@ class ReflectionTest : public CommonCompilerTest { args[1].i = 0; args[2].i = 0; args[3].i = 0; - JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_EQ(0, result.GetI()); args[0].i = 1; args[1].i = 2; args[2].i = 3; args[3].i = 4; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_EQ(10, result.GetI()); args[0].i = -1; args[1].i = 2; args[2].i = -3; args[3].i = 4; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_EQ(2, result.GetI()); args[0].i = INT_MAX; args[1].i = INT_MIN; args[2].i = INT_MAX; args[3].i = INT_MIN; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_EQ(-2, result.GetI()); args[0].i = INT_MAX; args[1].i = INT_MAX; args[2].i = INT_MAX; args[3].i = INT_MAX; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_EQ(-4, result.GetI()); } @@ -340,7 +341,7 @@ class ReflectionTest : public CommonCompilerTest { args[2].i = 0; args[3].i = 0; args[4].i = 0; - JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_EQ(0, result.GetI()); args[0].i = 1; @@ -348,7 +349,7 @@ class ReflectionTest : public CommonCompilerTest { args[2].i = 3; args[3].i = 4; args[4].i = 5; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_EQ(15, result.GetI()); args[0].i = -1; @@ -356,7 +357,7 @@ class ReflectionTest : public CommonCompilerTest { args[2].i = -3; args[3].i = 4; args[4].i = -5; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_EQ(-3, result.GetI()); args[0].i = INT_MAX; @@ -364,7 +365,7 @@ class ReflectionTest : public CommonCompilerTest { args[2].i = INT_MAX; args[3].i = INT_MIN; args[4].i = INT_MAX; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_EQ(2147483645, result.GetI()); args[0].i = INT_MAX; @@ -372,7 +373,7 @@ class ReflectionTest : public CommonCompilerTest { args[2].i = INT_MAX; args[3].i = INT_MAX; args[4].i = INT_MAX; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_EQ(2147483643, result.GetI()); } @@ -386,27 +387,27 @@ class ReflectionTest : public CommonCompilerTest { args[0].d = 0.0; args[1].d = 0.0; - JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_DOUBLE_EQ(0.0, result.GetD()); args[0].d = 1.0; args[1].d = 2.0; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_DOUBLE_EQ(3.0, result.GetD()); args[0].d = 1.0; args[1].d = -2.0; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_DOUBLE_EQ(-1.0, result.GetD()); args[0].d = DBL_MAX; args[1].d = DBL_MIN; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_DOUBLE_EQ(1.7976931348623157e308, result.GetD()); args[0].d = DBL_MAX; args[1].d = DBL_MAX; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_DOUBLE_EQ(INFINITY, result.GetD()); } @@ -421,19 +422,19 @@ class ReflectionTest : public CommonCompilerTest { args[0].d = 0.0; args[1].d = 0.0; args[2].d = 0.0; - JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_DOUBLE_EQ(0.0, result.GetD()); args[0].d = 1.0; args[1].d = 2.0; args[2].d = 3.0; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_DOUBLE_EQ(6.0, result.GetD()); args[0].d = 1.0; args[1].d = -2.0; args[2].d = 3.0; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_DOUBLE_EQ(2.0, result.GetD()); } @@ -449,21 +450,21 @@ class ReflectionTest : public CommonCompilerTest { args[1].d = 0.0; args[2].d = 0.0; args[3].d = 0.0; - JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_DOUBLE_EQ(0.0, result.GetD()); args[0].d = 1.0; args[1].d = 2.0; args[2].d = 3.0; args[3].d = 4.0; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_DOUBLE_EQ(10.0, result.GetD()); args[0].d = 1.0; args[1].d = -2.0; args[2].d = 3.0; args[3].d = -4.0; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_DOUBLE_EQ(-2.0, result.GetD()); } @@ -480,7 +481,7 @@ class ReflectionTest : public CommonCompilerTest { args[2].d = 0.0; args[3].d = 0.0; args[4].d = 0.0; - JValue result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + JValue result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_DOUBLE_EQ(0.0, result.GetD()); args[0].d = 1.0; @@ -488,7 +489,7 @@ class ReflectionTest : public CommonCompilerTest { args[2].d = 3.0; args[3].d = 4.0; args[4].d = 5.0; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_DOUBLE_EQ(15.0, result.GetD()); args[0].d = 1.0; @@ -496,7 +497,7 @@ class ReflectionTest : public CommonCompilerTest { args[2].d = 3.0; args[3].d = -4.0; args[4].d = 5.0; - result = InvokeWithJValues(soa, receiver_ref.get(), soa.EncodeMethod(method), args); + result = InvokeWithJValues(soa, receiver_ref.get(), jni::EncodeArtMethod(method), args); EXPECT_DOUBLE_EQ(3.0, result.GetD()); } @@ -531,7 +532,7 @@ TEST_F(ReflectionTest, StaticMainMethod) { jvalue args[1]; args[0].l = nullptr; - InvokeWithJValues(soa, nullptr, soa.EncodeMethod(method), args); + InvokeWithJValues(soa, nullptr, jni::EncodeArtMethod(method), args); } TEST_F(ReflectionTest, StaticNopMethod) { diff --git a/runtime/runtime.cc b/runtime/runtime.cc index b868563634..ca65c2bea3 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -559,7 +559,10 @@ static jobject CreateSystemClassLoader(Runtime* runtime) { "getSystemClassLoader", "()Ljava/lang/ClassLoader;", pointer_size); CHECK(getSystemClassLoader != nullptr); - JValue result = InvokeWithJValues(soa, nullptr, soa.EncodeMethod(getSystemClassLoader), nullptr); + JValue result = InvokeWithJValues(soa, + nullptr, + jni::EncodeArtMethod(getSystemClassLoader), + nullptr); JNIEnv* env = soa.Self()->GetJniEnv(); ScopedLocalRef<jobject> system_class_loader(env, soa.AddLocalReference<jobject>(result.GetL())); CHECK(system_class_loader.get() != nullptr); diff --git a/runtime/scoped_thread_state_change-inl.h b/runtime/scoped_thread_state_change-inl.h index bde23c8028..d4469f4357 100644 --- a/runtime/scoped_thread_state_change-inl.h +++ b/runtime/scoped_thread_state_change-inl.h @@ -86,30 +86,6 @@ inline ObjPtr<T, kPoison> ScopedObjectAccessAlreadyRunnable::Decode(jobject obj) return ObjPtr<T, kPoison>::DownCast(Self()->DecodeJObject(obj)); } -inline ArtField* ScopedObjectAccessAlreadyRunnable::DecodeField(jfieldID fid) const { - Locks::mutator_lock_->AssertSharedHeld(Self()); - DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states. - return reinterpret_cast<ArtField*>(fid); -} - -inline jfieldID ScopedObjectAccessAlreadyRunnable::EncodeField(ArtField* field) const { - Locks::mutator_lock_->AssertSharedHeld(Self()); - DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states. - return reinterpret_cast<jfieldID>(field); -} - -inline ArtMethod* ScopedObjectAccessAlreadyRunnable::DecodeMethod(jmethodID mid) const { - Locks::mutator_lock_->AssertSharedHeld(Self()); - DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states. - return reinterpret_cast<ArtMethod*>(mid); -} - -inline jmethodID ScopedObjectAccessAlreadyRunnable::EncodeMethod(ArtMethod* method) const { - Locks::mutator_lock_->AssertSharedHeld(Self()); - DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states. - return reinterpret_cast<jmethodID>(method); -} - inline bool ScopedObjectAccessAlreadyRunnable::IsRunnable() const { return self_->GetState() == kRunnable; } diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h index 04fd9141ea..b4992586ce 100644 --- a/runtime/scoped_thread_state_change.h +++ b/runtime/scoped_thread_state_change.h @@ -94,14 +94,6 @@ class ScopedObjectAccessAlreadyRunnable : public ValueObject { template<typename T, bool kPoison = kIsDebugBuild> ObjPtr<T, kPoison> Decode(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_); - ArtField* DecodeField(jfieldID fid) const REQUIRES_SHARED(Locks::mutator_lock_); - - jfieldID EncodeField(ArtField* field) const REQUIRES_SHARED(Locks::mutator_lock_); - - ArtMethod* DecodeMethod(jmethodID mid) const REQUIRES_SHARED(Locks::mutator_lock_); - - jmethodID EncodeMethod(ArtMethod* method) const REQUIRES_SHARED(Locks::mutator_lock_); - ALWAYS_INLINE bool IsRunnable() const; protected: diff --git a/runtime/thread.cc b/runtime/thread.cc index 3f7d0868b0..8ce9661dfe 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -410,9 +410,9 @@ void* Thread::CreateCallback(void* arg) { self->tlsPtr_.opeer = soa.Decode<mirror::Object>(self->tlsPtr_.jpeer).Ptr(); self->GetJniEnv()->DeleteGlobalRef(self->tlsPtr_.jpeer); self->tlsPtr_.jpeer = nullptr; - self->SetThreadName(self->GetThreadName(soa)->ToModifiedUtf8().c_str()); + self->SetThreadName(self->GetThreadName()->ToModifiedUtf8().c_str()); - ArtField* priorityField = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority); + ArtField* priorityField = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority); self->SetNativePriority(priorityField->GetInt(self->tlsPtr_.opeer)); Dbg::PostThreadStart(self); @@ -430,7 +430,7 @@ void* Thread::CreateCallback(void* arg) { Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa, mirror::Object* thread_peer) { - ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer); + ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer); Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetLong(thread_peer))); // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_ // to stop it from going away. @@ -562,7 +562,7 @@ void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_siz if (VLOG_IS_ON(threads)) { ScopedObjectAccess soa(env); - ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name); + ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name); ObjPtr<mirror::String> java_name = f->GetObject(soa.Decode<mirror::Object>(java_peer))->AsString(); std::string thread_name; @@ -823,7 +823,7 @@ void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) ScopedObjectAccess soa(self); StackHandleScope<1> hs(self); - MutableHandle<mirror::String> peer_thread_name(hs.NewHandle(GetThreadName(soa))); + MutableHandle<mirror::String> peer_thread_name(hs.NewHandle(GetThreadName())); if (peer_thread_name.Get() == nullptr) { // The Thread constructor should have set the Thread.name to a // non-null value. However, because we can run without code @@ -834,7 +834,7 @@ void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) } else { InitPeer<false>(soa, thread_is_daemon, thread_group, thread_name.get(), thread_priority); } - peer_thread_name.Assign(GetThreadName(soa)); + peer_thread_name.Assign(GetThreadName()); } // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null. if (peer_thread_name.Get() != nullptr) { @@ -845,13 +845,13 @@ void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) template<bool kTransactionActive> void Thread::InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group, jobject thread_name, jint thread_priority) { - soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)-> + jni::DecodeArtField(WellKnownClasses::java_lang_Thread_daemon)-> SetBoolean<kTransactionActive>(tlsPtr_.opeer, thread_is_daemon); - soa.DecodeField(WellKnownClasses::java_lang_Thread_group)-> + jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group)-> SetObject<kTransactionActive>(tlsPtr_.opeer, soa.Decode<mirror::Object>(thread_group)); - soa.DecodeField(WellKnownClasses::java_lang_Thread_name)-> + jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name)-> SetObject<kTransactionActive>(tlsPtr_.opeer, soa.Decode<mirror::Object>(thread_name)); - soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)-> + jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority)-> SetInt<kTransactionActive>(tlsPtr_.opeer, thread_priority); } @@ -947,8 +947,8 @@ void Thread::Dump(std::ostream& os, bool dump_native_stack, BacktraceMap* backtr DumpStack(os, dump_native_stack, backtrace_map); } -mirror::String* Thread::GetThreadName(const ScopedObjectAccessAlreadyRunnable& soa) const { - ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name); +mirror::String* Thread::GetThreadName() const { + ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name); if (tlsPtr_.opeer == nullptr) { return nullptr; } @@ -1306,17 +1306,18 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) { // cause ScopedObjectAccessUnchecked to deadlock. if (gAborting == 0 && self != nullptr && thread != nullptr && thread->tlsPtr_.opeer != nullptr) { ScopedObjectAccessUnchecked soa(self); - priority = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority) + priority = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority) ->GetInt(thread->tlsPtr_.opeer); - is_daemon = soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon) + is_daemon = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_daemon) ->GetBoolean(thread->tlsPtr_.opeer); ObjPtr<mirror::Object> thread_group = - soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(thread->tlsPtr_.opeer); + jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group) + ->GetObject(thread->tlsPtr_.opeer); if (thread_group != nullptr) { ArtField* group_name_field = - soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name); + jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_name); ObjPtr<mirror::String> group_name_string = group_name_field->GetObject(thread_group)->AsString(); group_name = (group_name_string != nullptr) ? group_name_string->ToModifiedUtf8() : "<null>"; @@ -1792,10 +1793,10 @@ void Thread::Destroy() { // this.nativePeer = 0; if (Runtime::Current()->IsActiveTransaction()) { - soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer) + jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer) ->SetLong<true>(tlsPtr_.opeer, 0); } else { - soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer) + jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer) ->SetLong<false>(tlsPtr_.opeer, 0); } Dbg::PostThreadDeath(self); @@ -1803,7 +1804,7 @@ void Thread::Destroy() { // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone // who is waiting. ObjPtr<mirror::Object> lock = - soa.DecodeField(WellKnownClasses::java_lang_Thread_lock)->GetObject(tlsPtr_.opeer); + jni::DecodeArtField(WellKnownClasses::java_lang_Thread_lock)->GetObject(tlsPtr_.opeer); // (This conditional is only needed for tests, where Thread.lock won't have been set.) if (lock != nullptr) { StackHandleScope<1> hs(self); @@ -1894,7 +1895,7 @@ void Thread::HandleUncaughtExceptions(ScopedObjectAccess& soa) { void Thread::RemoveFromThreadGroup(ScopedObjectAccess& soa) { // this.group.removeThread(this); // group can be null if we're in the compiler or a test. - ObjPtr<mirror::Object> ogroup = soa.DecodeField(WellKnownClasses::java_lang_Thread_group) + ObjPtr<mirror::Object> ogroup = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group) ->GetObject(tlsPtr_.opeer); if (ogroup != nullptr) { ScopedLocalRef<jobject> group(soa.Env(), soa.AddLocalReference<jobject>(ogroup)); @@ -2414,7 +2415,7 @@ void Thread::ThrowNewWrappedException(const char* exception_class_descriptor, ++i; } ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(exception.Get())); - InvokeWithJValues(soa, ref.get(), soa.EncodeMethod(exception_init_method), jv_args); + InvokeWithJValues(soa, ref.get(), jni::EncodeArtMethod(exception_init_method), jv_args); if (LIKELY(!IsExceptionPending())) { SetException(exception.Get()); } @@ -2503,7 +2504,7 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) { QUICK_ENTRY_POINT_INFO(pAllocStringFromChars) QUICK_ENTRY_POINT_INFO(pAllocStringFromString) QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial) - QUICK_ENTRY_POINT_INFO(pCheckCast) + QUICK_ENTRY_POINT_INFO(pCheckInstanceOf) QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage) QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccess) QUICK_ENTRY_POINT_INFO(pInitializeType) diff --git a/runtime/thread.h b/runtime/thread.h index 4f26803726..f3001be202 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -333,8 +333,7 @@ class Thread { } // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer. - mirror::String* GetThreadName(const ScopedObjectAccessAlreadyRunnable& ts) const - REQUIRES_SHARED(Locks::mutator_lock_); + mirror::String* GetThreadName() const REQUIRES_SHARED(Locks::mutator_lock_); // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code, // allocation, or locking. diff --git a/runtime/utf_test.cc b/runtime/utf_test.cc index 328492523f..d1e97515d3 100644 --- a/runtime/utf_test.cc +++ b/runtime/utf_test.cc @@ -113,8 +113,8 @@ TEST_F(UtfTest, CountModifiedUtf8Chars) { EXPECT_EQ(2u, CountModifiedUtf8Chars(reinterpret_cast<const char *>(kSurrogateEncoding))); } -static void AssertConversion(const std::vector<uint16_t> input, - const std::vector<uint8_t> expected) { +static void AssertConversion(const std::vector<uint16_t>& input, + const std::vector<uint8_t>& expected) { ASSERT_EQ(expected.size(), CountUtf8Bytes(&input[0], input.size())); std::vector<uint8_t> output(expected.size()); diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc index 53d717a8ff..35495867c7 100644 --- a/runtime/well_known_classes.cc +++ b/runtime/well_known_classes.cc @@ -22,6 +22,7 @@ #include "base/logging.h" #include "entrypoints/quick/quick_entrypoints_enum.h" +#include "jni_internal.h" #include "mirror/class.h" #include "mirror/throwable.h" #include "obj_ptr-inl.h" @@ -219,9 +220,9 @@ void WellKnownClasses::InitStringInit(JNIEnv* env) { ScopedObjectAccess soa(Thread::Current()); #define LOAD_STRING_INIT(init_runtime_name, init_signature, new_runtime_name, \ new_java_name, new_signature, ...) \ - init_runtime_name = soa.DecodeMethod( \ + init_runtime_name = jni::DecodeArtMethod( \ CacheMethod(env, java_lang_String, false, "<init>", init_signature)); \ - new_runtime_name = soa.DecodeMethod( \ + new_runtime_name = jni::DecodeArtMethod( \ CacheMethod(env, java_lang_StringFactory, true, new_java_name, new_signature)); STRING_INIT_LIST(LOAD_STRING_INIT) #undef LOAD_STRING_INIT @@ -239,8 +240,8 @@ void Thread::InitStringEntryPoints() { ArtMethod* WellKnownClasses::StringInitToStringFactory(ArtMethod* string_init) { #define TO_STRING_FACTORY(init_runtime_name, init_signature, new_runtime_name, \ new_java_name, new_signature, entry_point_name) \ - if (string_init == init_runtime_name) { \ - return new_runtime_name; \ + if (string_init == (init_runtime_name)) { \ + return (new_runtime_name); \ } STRING_INIT_LIST(TO_STRING_FACTORY) #undef TO_STRING_FACTORY @@ -251,7 +252,7 @@ ArtMethod* WellKnownClasses::StringInitToStringFactory(ArtMethod* string_init) { uint32_t WellKnownClasses::StringInitToEntryPoint(ArtMethod* string_init) { #define TO_ENTRY_POINT(init_runtime_name, init_signature, new_runtime_name, \ new_java_name, new_signature, entry_point_name) \ - if (string_init == init_runtime_name) { \ + if (string_init == (init_runtime_name)) { \ return kQuick ## entry_point_name; \ } STRING_INIT_LIST(TO_ENTRY_POINT) diff --git a/test/530-checker-lse/expected.txt b/test/530-checker-lse/expected.txt index e69de29bb2..ddae16aff4 100644 --- a/test/530-checker-lse/expected.txt +++ b/test/530-checker-lse/expected.txt @@ -0,0 +1 @@ +java.lang.ArrayIndexOutOfBoundsException: length=3; index=3 diff --git a/test/530-checker-lse/src/Main.java b/test/530-checker-lse/src/Main.java index a61b9a0c06..9f4be6c227 100644 --- a/test/530-checker-lse/src/Main.java +++ b/test/530-checker-lse/src/Main.java @@ -18,6 +18,9 @@ class Circle { Circle(double radius) { this.radius = radius; } + public double getRadius() { + return radius; + } public double getArea() { return radius * radius * Math.PI; } @@ -758,6 +761,30 @@ public class Main { return area; } + /// CHECK-START: double Main.testDeoptimize(int[], double[], double) load_store_elimination (before) + /// CHECK: Deoptimize + /// CHECK: NewInstance + /// CHECK: Deoptimize + /// CHECK: NewInstance + + /// CHECK-START: double Main.testDeoptimize(int[], double[], double) load_store_elimination (after) + /// CHECK: Deoptimize + /// CHECK: NewInstance + /// CHECK: Deoptimize + /// CHECK-NOT: NewInstance + + private static double testDeoptimize(int[] iarr, double[] darr, double radius) { + iarr[0] = 1; // One HDeoptimize here. Not triggered. + iarr[1] = 1; + Circle circle1 = new Circle(radius); + iarr[2] = 1; + darr[0] = circle1.getRadius(); // One HDeoptimize here, which holds circle1 live. Triggered. + darr[1] = circle1.getRadius(); + darr[2] = circle1.getRadius(); + darr[3] = circle1.getRadius(); + return new Circle(Math.PI).getArea(); + } + static void assertIntEquals(int result, int expected) { if (expected != result) { throw new Error("Expected: " + expected + ", found: " + result); @@ -824,6 +851,20 @@ public class Main { assertFloatEquals(mF, 0f); assertDoubleEquals(Math.PI * Math.PI * Math.PI, getCircleArea(Math.PI, true)); assertDoubleEquals(0d, getCircleArea(Math.PI, false)); + + int[] iarray = {0, 0, 0}; + double[] darray = {0d, 0d, 0d}; + try { + assertDoubleEquals(Math.PI * Math.PI * Math.PI, testDeoptimize(iarray, darray, Math.PI)); + } catch (Exception e) { + System.out.println(e); + } + assertIntEquals(iarray[0], 1); + assertIntEquals(iarray[1], 1); + assertIntEquals(iarray[2], 1); + assertDoubleEquals(darray[0], Math.PI); + assertDoubleEquals(darray[1], Math.PI); + assertDoubleEquals(darray[2], Math.PI); } static boolean sFlag; diff --git a/test/530-checker-lse2/expected.txt b/test/530-checker-lse2/expected.txt new file mode 100644 index 0000000000..e18fc7e1f8 --- /dev/null +++ b/test/530-checker-lse2/expected.txt @@ -0,0 +1,8 @@ +Start.... +r = 9.649776E8 +mZ = false +mI = 0 +mJ = -576460752303423488 +mF = NaN +mD = NaN +Done.... diff --git a/test/530-checker-lse2/info.txt b/test/530-checker-lse2/info.txt new file mode 100644 index 0000000000..8dd3f502bf --- /dev/null +++ b/test/530-checker-lse2/info.txt @@ -0,0 +1,2 @@ +Checker test for testing store/allocation elimination in presence of +HDeoptimize. diff --git a/test/530-checker-lse2/src/Main.java b/test/530-checker-lse2/src/Main.java new file mode 100644 index 0000000000..0fe3d873ea --- /dev/null +++ b/test/530-checker-lse2/src/Main.java @@ -0,0 +1,208 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.util.Arrays; + +// Modified from a fuzz test. +public class Main { + + private interface X { + int x(); + } + + private class A { + public int a() { + return (+ (Math.multiplyExact(mI, mI))); + } + } + + private class B extends A implements X { + public int a() { + return super.a() + ((int) (Math.max(364746077.0f, ((float) mD)))); + } + public int x() { + return (mI >> (mI++)); + } + } + + private static class C implements X { + public static int s() { + return 671468641; + } + public int c() { + return -383762838; + } + public int x() { + return -138813312; + } + } + + private A mA = new B(); + private B mB = new B(); + private X mBX = new B(); + private C mC = new C(); + private X mCX = new C(); + + private boolean mZ = false; + private int mI = 0; + private long mJ = 0; + private float mF = 0; + private double mD = 0; + + private boolean[] mArray = new boolean[576]; + + private Main() { + boolean a = false; + for (int i0 = 0; i0 < 576; i0++) { + mArray[i0] = a; + a = !a; + } + } + + /// CHECK-START: float Main.testMethod() load_store_elimination (before) + /// CHECK-DAG: Deoptimize + /// CHECK-DAG: Deoptimize + /// CHECK-DAG: NewInstance + /// CHECK-DAG: NewInstance + /// CHECK-DAG: NewInstance + /// CHECK-DAG: NewInstance + /// CHECK-DAG: NewInstance + /// CHECK-DAG: NewInstance + /// CHECK-DAG: NewInstance + /// CHECK-DAG: NewInstance + /// CHECK-DAG: NewInstance + /// CHECK-DAG: NewInstance + /// CHECK-DAG: NewInstance + /// CHECK-DAG: NewInstance + /// CHECK-DAG: NewInstance + /// CHECK-DAG: NewInstance + /// CHECK-NOT: NewInstance + + /// CHECK-START: float Main.testMethod() load_store_elimination (after) + /// CHECK-DAG: Deoptimize + /// CHECK-DAG: Deoptimize + /// CHECK-NOT: NewInstance + + private float testMethod() { + { + int lI0 = (-1456058746 << mI); + mD = ((double)(int)(double) mD); + for (int i0 = 56 - 1; i0 >= 0; i0--) { + mArray[i0] &= (Boolean.logicalOr(((true ? ((boolean) new Boolean((mZ))) : mZ) || mArray[i0]), (mZ))); + mF *= (mF * mF); + if ((mZ ^ true)) { + mF *= ((float)(int)(float) 267827331.0f); + mZ ^= ((false & ((boolean) new Boolean(false))) | mZ); + for (int i1 = 576 - 1; i1 >= 0; i1--) { + mZ &= ((mArray[279]) | ((boolean) new Boolean(true))); + mD -= (--mD); + for (int i2 = 56 - 1; i2 >= 0; i2--) { + mF /= (mF - mF); + mI = (Math.min(((int) new Integer(mI)), (766538816 * (++mI)))); + mF += (mZ ? (mB.a()) : ((! mZ) ? -752042357.0f : (++mF))); + mJ |= ((long) new Long((-2084191070L + (mJ | mJ)))); + lI0 |= ((int) new Integer(((int) new Integer(mI)))); + if (((boolean) new Boolean(false))) { + mZ &= (mZ); + mF *= (mF--); + mD = (Double.POSITIVE_INFINITY); + mF += ((float)(int)(float) (-2026938813.0f * 638401585.0f)); + mJ = (--mJ); + for (int i3 = 56 - 1; i3 >= 0; i3--) { + mI &= (- mI); + mD = (--mD); + mArray[426] = (mZ || false); + mF -= (((this instanceof Main) ? mF : mF) + 976981405.0f); + mZ &= ((mZ) & (this instanceof Main)); + } + mZ ^= (Float.isFinite(-1975953895.0f)); + } else { + mJ /= ((long) (Math.nextDown(-1519600008.0f))); + mJ <<= (Math.round(1237681786.0)); + } + } + mArray[i0] &= (false || ((1256071300.0f != -353296391.0f) ? false : (mZ ^ mArray[i0]))); + mF *= (+ ((float) mD)); + for (int i2 = 0; i2 < 576; i2++) { + mD *= ((double) lI0); + lI0 = (lI0 & (Integer.MIN_VALUE)); + mF -= (--mF); + } + if ((this instanceof Main)) { + mZ ^= ((boolean) new Boolean(true)); + } else { + { + int lI1 = (mZ ? (--lI0) : 1099574344); + mJ >>= (Math.incrementExact(mJ)); + mJ = (~ -2103354070L); + } + } + } + } else { + mJ *= (- ((long) new Long(479832084L))); + mJ %= (Long.MAX_VALUE); + mD /= (--mD); + if ((mI > ((mBX.x()) << mI))) { + { + long lJ0 = (mJ--); + mI >>>= (mBX.x()); + } + mF = (+ 505094603.0f); + mD *= (((boolean) new Boolean((! false))) ? mD : 1808773781.0); + mI *= (Integer.MIN_VALUE); + for (int i1 = 576 - 1; i1 >= 0; i1--) { + if (((boolean) new Boolean(false))) { + mD += ((double)(float)(double) -1051436901.0); + } else { + mF -= ((float)(int)(float) (Float.min(mF, (mF--)))); + } + for (int i2 = 0; i2 < 576; i2++) { + mJ -= ((long) new Long(-1968644857L)); + mJ ^= (+ (mC.s())); + } + } + } else { + mF -= ((- mF) + -2145489966.0f); + } + mD -= (mD++); + mD = (949112777.0 * 1209996119.0); + } + mZ &= (Boolean.logicalAnd(true, ((mZ) & (((boolean) new Boolean(true)) && true)))); + } + } + return ((float) 964977619L); + } + + public static void main(String[] args) { + System.out.println("Start...."); + Main t = new Main(); + float r = 1883600237.0f; + try { + r = t.testMethod(); + } catch (Exception e) { + // Arithmetic, null pointer, index out of bounds, etc. + System.out.println("An exception was caught."); + } + System.out.println("r = " + r); + System.out.println("mZ = " + t.mZ); + System.out.println("mI = " + t.mI); + System.out.println("mJ = " + t.mJ); + System.out.println("mF = " + t.mF); + System.out.println("mD = " + t.mD); + System.out.println("Done...."); + } +} + diff --git a/test/552-checker-sharpening/src/Main.java b/test/552-checker-sharpening/src/Main.java index 3c053cf5ea..95ecfb5726 100644 --- a/test/552-checker-sharpening/src/Main.java +++ b/test/552-checker-sharpening/src/Main.java @@ -252,27 +252,27 @@ public class Main { /// CHECK-START-X86: java.lang.String Main.$noinline$getBootImageString() sharpening (after) // Note: load kind depends on PIC/non-PIC // TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress. - /// CHECK: LoadString load_kind:{{BootImageAddress|DexCachePcRelative|DexCacheViaMethod}} + /// CHECK: LoadString load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}} /// CHECK-START-X86_64: java.lang.String Main.$noinline$getBootImageString() sharpening (after) // Note: load kind depends on PIC/non-PIC // TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress. - /// CHECK: LoadString load_kind:{{BootImageAddress|DexCachePcRelative|DexCacheViaMethod}} + /// CHECK: LoadString load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}} /// CHECK-START-ARM: java.lang.String Main.$noinline$getBootImageString() sharpening (after) // Note: load kind depends on PIC/non-PIC // TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress. - /// CHECK: LoadString load_kind:{{BootImageAddress|DexCachePcRelative|DexCacheViaMethod}} + /// CHECK: LoadString load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}} /// CHECK-START-ARM64: java.lang.String Main.$noinline$getBootImageString() sharpening (after) // Note: load kind depends on PIC/non-PIC // TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress. - /// CHECK: LoadString load_kind:{{BootImageAddress|DexCachePcRelative|DexCacheViaMethod}} + /// CHECK: LoadString load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}} /// CHECK-START-MIPS: java.lang.String Main.$noinline$getBootImageString() sharpening (after) // Note: load kind depends on PIC/non-PIC // TODO: Remove DexCacheViaMethod when read barrier config supports BootImageAddress. - /// CHECK: LoadString load_kind:{{BootImageAddress|DexCachePcRelative|DexCacheViaMethod}} + /// CHECK: LoadString load_kind:{{BootImageAddress|BssEntry|DexCacheViaMethod}} public static String $noinline$getBootImageString() { // Prevent inlining to avoid the string comparison being optimized away. diff --git a/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali b/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali index 5d4aa56c8f..af43973073 100644 --- a/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali +++ b/test/559-checker-irreducible-loop/smali/IrreducibleLoop.smali @@ -196,7 +196,7 @@ const-class v0, LMain; if-ne v0, v2, :exit :other_loop_entry - const-class v1, LIrreducibleLoop; + const-class v1, Ljava/lang/Class; # LoadClass that can throw goto :loop_entry :exit return-object v0 @@ -250,7 +250,7 @@ const/4 v0, 0 if-ne p0, v0, :other_loop_entry :loop_entry - const-class v1, LIrreducibleLoop; + const-class v1, Ljava/lang/Class; # LoadClass that can throw if-ne v0, p0, :exit :other_loop_entry sub-int v1, p0, p0 @@ -286,7 +286,7 @@ .method public static licm3(III)I .registers 4 :loop_entry - const-class v0, LIrreducibleLoop; + const-class v0, Ljava/lang/Class; # LoadClass that can throw if-ne p1, p2, :exit goto :loop_body diff --git a/test/570-checker-osr/src/Main.java b/test/570-checker-osr/src/Main.java index 8af3894b33..4de563469d 100644 --- a/test/570-checker-osr/src/Main.java +++ b/test/570-checker-osr/src/Main.java @@ -17,26 +17,6 @@ public class Main { public static void main(String[] args) { System.loadLibrary(args[0]); - Thread testThread = new Thread() { - public void run() { - performTest(); - } - }; - testThread.start(); - try { - testThread.join(20 * 1000); // 20s timeout. - } catch (InterruptedException ie) { - System.out.println("Interrupted."); - System.exit(1); - } - Thread.State state = testThread.getState(); - if (state != Thread.State.TERMINATED) { - System.out.println("Test timed out, current state: " + state); - System.exit(1); - } - } - - public static void performTest() { new SubMain(); if ($noinline$returnInt() != 53) { throw new Error("Unexpected return value"); diff --git a/test/624-checker-stringops/expected.txt b/test/624-checker-stringops/expected.txt new file mode 100644 index 0000000000..b0aad4deb5 --- /dev/null +++ b/test/624-checker-stringops/expected.txt @@ -0,0 +1 @@ +passed diff --git a/test/624-checker-stringops/info.txt b/test/624-checker-stringops/info.txt new file mode 100644 index 0000000000..64344ac4d7 --- /dev/null +++ b/test/624-checker-stringops/info.txt @@ -0,0 +1 @@ +Verify some properties of string operations represented by intrinsics. diff --git a/test/624-checker-stringops/src/Main.java b/test/624-checker-stringops/src/Main.java new file mode 100644 index 0000000000..34e82831a8 --- /dev/null +++ b/test/624-checker-stringops/src/Main.java @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Tests properties of some string operations represented by intrinsics. + */ +public class Main { + + static final String ABC = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"; + static final String XYZ = "XYZ"; + + // + // Variant intrinsics remain in the loop, but invariant references are hoisted out of the loop. + // + /// CHECK-START: int Main.liveIndexOf() licm (before) + /// CHECK-DAG: InvokeVirtual intrinsic:StringIndexOf loop:{{B\d+}} outer_loop:none + /// CHECK-DAG: InvokeVirtual intrinsic:StringIndexOfAfter loop:{{B\d+}} outer_loop:none + /// CHECK-DAG: InvokeVirtual intrinsic:StringStringIndexOf loop:{{B\d+}} outer_loop:none + /// CHECK-DAG: InvokeVirtual intrinsic:StringStringIndexOfAfter loop:{{B\d+}} outer_loop:none + // + /// CHECK-START: int Main.liveIndexOf() licm (after) + /// CHECK-DAG: InvokeVirtual intrinsic:StringIndexOf loop:{{B\d+}} outer_loop:none + /// CHECK-DAG: InvokeVirtual intrinsic:StringIndexOfAfter loop:{{B\d+}} outer_loop:none + /// CHECK-DAG: InvokeVirtual intrinsic:StringStringIndexOf loop:none + /// CHECK-DAG: InvokeVirtual intrinsic:StringStringIndexOfAfter loop:none + static int liveIndexOf() { + int k = ABC.length() + XYZ.length(); // does LoadString before loops + for (char c = 'A'; c <= 'Z'; c++) { + k += ABC.indexOf(c); + } + for (char c = 'A'; c <= 'Z'; c++) { + k += ABC.indexOf(c, 4); + } + for (char c = 'A'; c <= 'Z'; c++) { + k += ABC.indexOf(XYZ); + } + for (char c = 'A'; c <= 'Z'; c++) { + k += ABC.indexOf(XYZ, 2); + } + return k; + } + + // + // All dead intrinsics can be removed completely. + // + /// CHECK-START: int Main.deadIndexOf() dead_code_elimination$initial (before) + /// CHECK-DAG: InvokeVirtual intrinsic:StringIndexOf loop:{{B\d+}} outer_loop:none + /// CHECK-DAG: InvokeVirtual intrinsic:StringIndexOfAfter loop:{{B\d+}} outer_loop:none + /// CHECK-DAG: InvokeVirtual intrinsic:StringStringIndexOf loop:{{B\d+}} outer_loop:none + /// CHECK-DAG: InvokeVirtual intrinsic:StringStringIndexOfAfter loop:{{B\d+}} outer_loop:none + // + /// CHECK-START: int Main.deadIndexOf() dead_code_elimination$initial (after) + /// CHECK-NOT: InvokeVirtual intrinsic:StringIndexOf + /// CHECK-NOT: InvokeVirtual intrinsic:StringIndexOfAfter + /// CHECK-NOT: InvokeVirtual intrinsic:StringStringIndexOf + /// CHECK-NOT: InvokeVirtual intrinsic:StringStringIndexOfAfter + static int deadIndexOf() { + int k = ABC.length() + XYZ.length(); // does LoadString before loops + for (char c = 'A'; c <= 'Z'; c++) { + int d = ABC.indexOf(c); + } + for (char c = 'A'; c <= 'Z'; c++) { + int d = ABC.indexOf(c, 4); + } + for (char c = 'A'; c <= 'Z'; c++) { + int d = ABC.indexOf(XYZ); + } + for (char c = 'A'; c <= 'Z'; c++) { + int d = ABC.indexOf(XYZ, 2); + } + return k; + } + + // + // Explicit null check on receiver, implicit null check on argument prevents hoisting. + // + /// CHECK-START: int Main.indexOfExceptions(java.lang.String, java.lang.String) licm (after) + /// CHECK-DAG: <<String:l\d+>> NullCheck loop:<<Loop:B\d+>> outer_loop:none + /// CHECK-DAG: InvokeVirtual [<<String>>,{{l\d+}}] intrinsic:StringStringIndexOf loop:<<Loop>> outer_loop:none + static int indexOfExceptions(String s, String t) { + int k = 0; + for (char c = 'A'; c <= 'Z'; c++) { + k += s.indexOf(t); + } + return k; + } + + public static void main(String[] args) { + expectEquals(1865, liveIndexOf()); + expectEquals(29, deadIndexOf()); + try { + indexOfExceptions(null, XYZ); + throw new Error("Expected: NPE"); + } catch (NullPointerException e) { + } + try { + indexOfExceptions(ABC, null); + throw new Error("Expected: NPE"); + } catch (NullPointerException e) { + } + expectEquals(598, indexOfExceptions(ABC, XYZ)); + + System.out.println("passed"); + } + + private static void expectEquals(int expected, int result) { + if (expected != result) { + throw new Error("Expected: " + expected + ", found: " + result); + } + } +} diff --git a/test/907-get-loaded-classes/get_loaded_classes.cc b/test/907-get-loaded-classes/get_loaded_classes.cc index 0e09d1be09..ce929a6c46 100644 --- a/test/907-get-loaded-classes/get_loaded_classes.cc +++ b/test/907-get-loaded-classes/get_loaded_classes.cc @@ -27,6 +27,7 @@ #include "ScopedLocalRef.h" #include "ScopedUtfChars.h" +#include "ti-agent/common_helper.h" #include "ti-agent/common_load.h" namespace art { @@ -50,28 +51,14 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getLoadedClasses( return nullptr; } - ScopedLocalRef<jclass> obj_class(env, env->FindClass("java/lang/String")); - if (obj_class.get() == nullptr) { - return nullptr; - } - - jobjectArray ret = env->NewObjectArray(count, obj_class.get(), nullptr); - if (ret == nullptr) { - return ret; - } - - for (size_t i = 0; i < static_cast<size_t>(count); ++i) { + auto callback = [&](jint i) { jstring class_name = GetClassName(env, classes[i]); - env->SetObjectArrayElement(ret, static_cast<jint>(i), class_name); - env->DeleteLocalRef(class_name); - } - - // Need to: - // 1) Free the local references. - // 2) Deallocate. - for (size_t i = 0; i < static_cast<size_t>(count); ++i) { env->DeleteLocalRef(classes[i]); - } + return class_name; + }; + jobjectArray ret = CreateObjectArray(env, count, "java/lang/String", callback); + + // Need to Deallocate. jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(classes)); return ret; diff --git a/test/910-methods/methods.cc b/test/910-methods/methods.cc index cc6ad6793b..005cba60ac 100644 --- a/test/910-methods/methods.cc +++ b/test/910-methods/methods.cc @@ -23,6 +23,7 @@ #include "openjdkjvmti/jvmti.h" #include "ScopedLocalRef.h" +#include "ti-agent/common_helper.h" #include "ti-agent/common_load.h" namespace art { @@ -43,23 +44,16 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getMethodName( return nullptr; } - ScopedLocalRef<jclass> obj_class(env, env->FindClass("java/lang/String")); - if (obj_class.get() == nullptr) { - return nullptr; - } - - jobjectArray ret = env->NewObjectArray(3, obj_class.get(), nullptr); - if (ret == nullptr) { - return ret; - } - - ScopedLocalRef<jstring> name_str(env, name == nullptr ? nullptr : env->NewStringUTF(name)); - ScopedLocalRef<jstring> sig_str(env, sig == nullptr ? nullptr : env->NewStringUTF(sig)); - ScopedLocalRef<jstring> gen_str(env, gen == nullptr ? nullptr : env->NewStringUTF(gen)); - - env->SetObjectArrayElement(ret, 0, name_str.get()); - env->SetObjectArrayElement(ret, 1, sig_str.get()); - env->SetObjectArrayElement(ret, 2, gen_str.get()); + auto callback = [&](jint i) { + if (i == 0) { + return name == nullptr ? nullptr : env->NewStringUTF(name); + } else if (i == 1) { + return sig == nullptr ? nullptr : env->NewStringUTF(sig); + } else { + return gen == nullptr ? nullptr : env->NewStringUTF(gen); + } + }; + jobjectArray ret = CreateObjectArray(env, 3, "java/lang/String", callback); // Need to deallocate the strings. if (name != nullptr) { diff --git a/test/911-get-stack-trace/stack_trace.cc b/test/911-get-stack-trace/stack_trace.cc index da649cf8c9..a30416dec4 100644 --- a/test/911-get-stack-trace/stack_trace.cc +++ b/test/911-get-stack-trace/stack_trace.cc @@ -23,6 +23,7 @@ #include "jni.h" #include "openjdkjvmti/jvmti.h" #include "ScopedLocalRef.h" +#include "ti-agent/common_helper.h" #include "ti-agent/common_load.h" namespace art { @@ -33,39 +34,36 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getStackTrace( std::unique_ptr<jvmtiFrameInfo[]> frames(new jvmtiFrameInfo[max]); jint count; - jvmtiError result = jvmti_env->GetStackTrace(thread, start, max, frames.get(), &count); - if (result != JVMTI_ERROR_NONE) { - char* err; - jvmti_env->GetErrorName(result, &err); - printf("Failure running GetStackTrace: %s\n", err); - return nullptr; - } - - ScopedLocalRef<jclass> obj_class(env, env->FindClass("java/lang/String")); - if (obj_class.get() == nullptr) { - return nullptr; - } - - jobjectArray ret = env->NewObjectArray(2 * count, obj_class.get(), nullptr); - if (ret == nullptr) { - return ret; + { + jvmtiError result = jvmti_env->GetStackTrace(thread, start, max, frames.get(), &count); + if (result != JVMTI_ERROR_NONE) { + char* err; + jvmti_env->GetErrorName(result, &err); + printf("Failure running GetStackTrace: %s\n", err); + return nullptr; + } } - for (size_t i = 0; i < static_cast<size_t>(count); ++i) { + auto callback = [&](jint i) -> jstring { + size_t method_index = static_cast<size_t>(i) / 2; char* name; char* sig; char* gen; - jvmtiError result2 = jvmti_env->GetMethodName(frames[i].method, &name, &sig, &gen); - if (result2 != JVMTI_ERROR_NONE) { - char* err; - jvmti_env->GetErrorName(result, &err); - printf("Failure running GetMethodName: %s\n", err); - return nullptr; + { + jvmtiError result2 = jvmti_env->GetMethodName(frames[method_index].method, &name, &sig, &gen); + if (result2 != JVMTI_ERROR_NONE) { + char* err; + jvmti_env->GetErrorName(result2, &err); + printf("Failure running GetMethodName: %s\n", err); + return nullptr; + } + } + jstring callback_result; + if (i % 2 == 0) { + callback_result = name == nullptr ? nullptr : env->NewStringUTF(name); + } else { + callback_result = sig == nullptr ? nullptr : env->NewStringUTF(sig); } - ScopedLocalRef<jstring> trace_name(env, name == nullptr ? nullptr : env->NewStringUTF(name)); - ScopedLocalRef<jstring> trace_sig(env, sig == nullptr ? nullptr : env->NewStringUTF(sig)); - env->SetObjectArrayElement(ret, static_cast<jint>(2 * i), trace_name.get()); - env->SetObjectArrayElement(ret, static_cast<jint>(2 * i + 1), trace_sig.get()); if (name != nullptr) { jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(name)); @@ -76,9 +74,9 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getStackTrace( if (gen != nullptr) { jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(gen)); } - } - - return ret; + return callback_result; + }; + return CreateObjectArray(env, 2 * count, "java/lang/String", callback); } // Don't do anything diff --git a/test/912-classes/classes.cc b/test/912-classes/classes.cc index 4bf329c8e1..fbf32595f8 100644 --- a/test/912-classes/classes.cc +++ b/test/912-classes/classes.cc @@ -23,6 +23,7 @@ #include "openjdkjvmti/jvmti.h" #include "ScopedLocalRef.h" +#include "ti-agent/common_helper.h" #include "ti-agent/common_load.h" namespace art { @@ -40,21 +41,14 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_getClassSignature( return nullptr; } - ScopedLocalRef<jclass> obj_class(env, env->FindClass("java/lang/String")); - if (obj_class.get() == nullptr) { - return nullptr; - } - - jobjectArray ret = env->NewObjectArray(2, obj_class.get(), nullptr); - if (ret == nullptr) { - return ret; - } - - ScopedLocalRef<jstring> sig_str(env, sig == nullptr ? nullptr : env->NewStringUTF(sig)); - ScopedLocalRef<jstring> gen_str(env, gen == nullptr ? nullptr : env->NewStringUTF(gen)); - - env->SetObjectArrayElement(ret, 0, sig_str.get()); - env->SetObjectArrayElement(ret, 1, gen_str.get()); + auto callback = [&](jint i) { + if (i == 0) { + return sig == nullptr ? nullptr : env->NewStringUTF(sig); + } else { + return gen == nullptr ? nullptr : env->NewStringUTF(gen); + } + }; + jobjectArray ret = CreateObjectArray(env, 2, "java/lang/String", callback); // Need to deallocate the strings. if (sig != nullptr) { diff --git a/test/913-heaps/expected.txt b/test/913-heaps/expected.txt index 77791a4efe..dc6e67d194 100644 --- a/test/913-heaps/expected.txt +++ b/test/913-heaps/expected.txt @@ -1,2 +1,98 @@ --- true true +root@root --(stack-local)--> 1@1000 [size=16, length=-1] +root@root --(stack-local)--> 3000@0 [size=132, length=-1] +root@root --(thread)--> 3000@0 [size=132, length=-1] +1@1000 --(class)--> 1000@0 [size=123, length=-1] +1@1000 --(field@8)--> 2@1000 [size=16, length=-1] +1@1000 --(field@12)--> 3@1001 [size=24, length=-1] +0@0 --(array-element@0)--> 1@1000 [size=16, length=-1] +2@1000 --(class)--> 1000@0 [size=123, length=-1] +3@1001 --(class)--> 1001@0 [size=123, length=-1] +3@1001 --(field@16)--> 4@1000 [size=16, length=-1] +3@1001 --(field@20)--> 5@1002 [size=32, length=-1] +1001@0 --(superclass)--> 1000@0 [size=123, length=-1] +4@1000 --(class)--> 1000@0 [size=123, length=-1] +5@1002 --(class)--> 1002@0 [size=123, length=-1] +5@1002 --(field@24)--> 6@1000 [size=16, length=-1] +5@1002 --(field@28)--> 1@1000 [size=16, length=-1] +1002@0 --(superclass)--> 1001@0 [size=123, length=-1] +1002@0 --(interface)--> 2001@0 [size=132, length=-1] +6@1000 --(class)--> 1000@0 [size=123, length=-1] +2001@0 --(interface)--> 2000@0 [size=132, length=-1] +--- +root@root --(stack-local)--> 1@1000 [size=16, length=-1] +root@root --(stack-local)--> 1@1000 [size=16, length=-1] +root@root --(stack-local)--> 1@1000 [size=16, length=-1] +root@root --(stack-local)--> 2@1000 [size=16, length=-1] +root@root --(stack-local)--> 3000@0 [size=132, length=-1] +root@root --(thread)--> 2@1000 [size=16, length=-1] +root@root --(thread)--> 3000@0 [size=132, length=-1] +2@1000 --(class)--> 1000@0 [size=123, length=-1] +1@1000 --(class)--> 1000@0 [size=123, length=-1] +1@1000 --(field@8)--> 2@1000 [size=16, length=-1] +1@1000 --(field@12)--> 3@1001 [size=24, length=-1] +3@1001 --(class)--> 1001@0 [size=123, length=-1] +3@1001 --(field@16)--> 4@1000 [size=16, length=-1] +3@1001 --(field@20)--> 5@1002 [size=32, length=-1] +0@0 --(array-element@0)--> 1@1000 [size=16, length=-1] +1001@0 --(superclass)--> 1000@0 [size=123, length=-1] +4@1000 --(class)--> 1000@0 [size=123, length=-1] +5@1002 --(class)--> 1002@0 [size=123, length=-1] +5@1002 --(field@24)--> 6@1000 [size=16, length=-1] +5@1002 --(field@28)--> 1@1000 [size=16, length=-1] +1002@0 --(superclass)--> 1001@0 [size=123, length=-1] +1002@0 --(interface)--> 2001@0 [size=132, length=-1] +6@1000 --(class)--> 1000@0 [size=123, length=-1] +2001@0 --(interface)--> 2000@0 [size=132, length=-1] +--- +root@root --(jni-global)--> 1@1000 [size=16, length=-1] +root@root --(jni-local)--> 1@1000 [size=16, length=-1] +root@root --(stack-local)--> 1@1000 [size=16, length=-1] +root@root --(stack-local)--> 1@1000 [size=16, length=-1] +root@root --(thread)--> 1@1000 [size=16, length=-1] +root@root --(thread)--> 3000@0 [size=132, length=-1] +1@1000 --(class)--> 1000@0 [size=123, length=-1] +1@1000 --(field@8)--> 2@1000 [size=16, length=-1] +1@1000 --(field@12)--> 3@1001 [size=24, length=-1] +2@1000 --(class)--> 1000@0 [size=123, length=-1] +3@1001 --(class)--> 1001@0 [size=123, length=-1] +3@1001 --(field@16)--> 4@1000 [size=16, length=-1] +3@1001 --(field@20)--> 5@1002 [size=32, length=-1] +1001@0 --(superclass)--> 1000@0 [size=123, length=-1] +4@1000 --(class)--> 1000@0 [size=123, length=-1] +5@1002 --(class)--> 1002@0 [size=123, length=-1] +5@1002 --(field@24)--> 6@1000 [size=16, length=-1] +5@1002 --(field@28)--> 1@1000 [size=16, length=-1] +1002@0 --(superclass)--> 1001@0 [size=123, length=-1] +1002@0 --(interface)--> 2001@0 [size=132, length=-1] +6@1000 --(class)--> 1000@0 [size=123, length=-1] +2001@0 --(interface)--> 2000@0 [size=132, length=-1] +--- +root@root --(jni-global)--> 1@1000 [size=16, length=-1] +root@root --(jni-local)--> 1@1000 [size=16, length=-1] +root@root --(stack-local)--> 1@1000 [size=16, length=-1] +root@root --(stack-local)--> 1@1000 [size=16, length=-1] +root@root --(stack-local)--> 1@1000 [size=16, length=-1] +root@root --(stack-local)--> 1@1000 [size=16, length=-1] +root@root --(stack-local)--> 2@1000 [size=16, length=-1] +root@root --(thread)--> 1@1000 [size=16, length=-1] +root@root --(thread)--> 2@1000 [size=16, length=-1] +root@root --(thread)--> 3000@0 [size=132, length=-1] +1@1000 --(class)--> 1000@0 [size=123, length=-1] +1@1000 --(field@8)--> 2@1000 [size=16, length=-1] +1@1000 --(field@12)--> 3@1001 [size=24, length=-1] +2@1000 --(class)--> 1000@0 [size=123, length=-1] +3@1001 --(class)--> 1001@0 [size=123, length=-1] +3@1001 --(field@16)--> 4@1000 [size=16, length=-1] +3@1001 --(field@20)--> 5@1002 [size=32, length=-1] +1001@0 --(superclass)--> 1000@0 [size=123, length=-1] +4@1000 --(class)--> 1000@0 [size=123, length=-1] +5@1002 --(class)--> 1002@0 [size=123, length=-1] +5@1002 --(field@24)--> 6@1000 [size=16, length=-1] +5@1002 --(field@28)--> 1@1000 [size=16, length=-1] +1002@0 --(superclass)--> 1001@0 [size=123, length=-1] +1002@0 --(interface)--> 2001@0 [size=132, length=-1] +6@1000 --(class)--> 1000@0 [size=123, length=-1] +2001@0 --(interface)--> 2000@0 [size=132, length=-1] +--- diff --git a/test/913-heaps/heaps.cc b/test/913-heaps/heaps.cc index 437779a5f4..d74026cc57 100644 --- a/test/913-heaps/heaps.cc +++ b/test/913-heaps/heaps.cc @@ -16,13 +16,18 @@ #include "heaps.h" +#include <inttypes.h> #include <stdio.h> #include <string.h> +#include <vector> + +#include "base/logging.h" #include "base/macros.h" +#include "base/stringprintf.h" #include "jni.h" #include "openjdkjvmti/jvmti.h" - +#include "ti-agent/common_helper.h" #include "ti-agent/common_load.h" namespace art { @@ -38,6 +43,230 @@ extern "C" JNIEXPORT void JNICALL Java_Main_forceGarbageCollection(JNIEnv* env A } } +class IterationConfig { + public: + IterationConfig() {} + virtual ~IterationConfig() {} + + virtual jint Handle(jvmtiHeapReferenceKind reference_kind, + const jvmtiHeapReferenceInfo* reference_info, + jlong class_tag, + jlong referrer_class_tag, + jlong size, + jlong* tag_ptr, + jlong* referrer_tag_ptr, + jint length, + void* user_data) = 0; +}; + +static jint JNICALL HeapReferenceCallback(jvmtiHeapReferenceKind reference_kind, + const jvmtiHeapReferenceInfo* reference_info, + jlong class_tag, + jlong referrer_class_tag, + jlong size, + jlong* tag_ptr, + jlong* referrer_tag_ptr, + jint length, + void* user_data) { + IterationConfig* config = reinterpret_cast<IterationConfig*>(user_data); + return config->Handle(reference_kind, + reference_info, + class_tag, + referrer_class_tag, + size, + tag_ptr, + referrer_tag_ptr, + length, + user_data); +} + +static bool Run(jint heap_filter, + jclass klass_filter, + jobject initial_object, + IterationConfig* config) { + jvmtiHeapCallbacks callbacks; + memset(&callbacks, 0, sizeof(jvmtiHeapCallbacks)); + callbacks.heap_reference_callback = HeapReferenceCallback; + + jvmtiError ret = jvmti_env->FollowReferences(heap_filter, + klass_filter, + initial_object, + &callbacks, + config); + if (ret != JVMTI_ERROR_NONE) { + char* err; + jvmti_env->GetErrorName(ret, &err); + printf("Failure running FollowReferences: %s\n", err); + return false; + } + return true; +} + +extern "C" JNIEXPORT jobjectArray JNICALL Java_Main_followReferences(JNIEnv* env, + jclass klass ATTRIBUTE_UNUSED, + jint heap_filter, + jclass klass_filter, + jobject initial_object, + jint stop_after, + jint follow_set, + jobject jniRef) { + class PrintIterationConfig FINAL : public IterationConfig { + public: + PrintIterationConfig(jint _stop_after, jint _follow_set) + : counter_(0), + stop_after_(_stop_after), + follow_set_(_follow_set) { + } + + jint Handle(jvmtiHeapReferenceKind reference_kind, + const jvmtiHeapReferenceInfo* reference_info, + jlong class_tag, + jlong referrer_class_tag, + jlong size, + jlong* tag_ptr, + jlong* referrer_tag_ptr, + jint length, + void* user_data ATTRIBUTE_UNUSED) OVERRIDE { + jlong tag = *tag_ptr; + // Only check tagged objects. + if (tag == 0) { + return JVMTI_VISIT_OBJECTS; + } + + Print(reference_kind, + reference_info, + class_tag, + referrer_class_tag, + size, + tag_ptr, + referrer_tag_ptr, + length); + + counter_++; + if (counter_ == stop_after_) { + return JVMTI_VISIT_ABORT; + } + + if (tag > 0 && tag < 32) { + bool should_visit_references = (follow_set_ & (1 << static_cast<int32_t>(tag))) != 0; + return should_visit_references ? JVMTI_VISIT_OBJECTS : 0; + } + + return JVMTI_VISIT_OBJECTS; + } + + void Print(jvmtiHeapReferenceKind reference_kind, + const jvmtiHeapReferenceInfo* reference_info, + jlong class_tag, + jlong referrer_class_tag, + jlong size, + jlong* tag_ptr, + jlong* referrer_tag_ptr, + jint length) { + std::string referrer_str; + if (referrer_tag_ptr == nullptr) { + referrer_str = "root@root"; + } else { + referrer_str = StringPrintf("%" PRId64 "@%" PRId64, *referrer_tag_ptr, referrer_class_tag); + } + + jlong adapted_size = size; + if (*tag_ptr >= 1000) { + // This is a class or interface, the size of which will be dependent on the architecture. + // Do not print the size, but detect known values and "normalize" for the golden file. + if ((sizeof(void*) == 4 && size == 180) || (sizeof(void*) == 8 && size == 232)) { + adapted_size = 123; + } + } + + lines_.push_back( + StringPrintf("%s --(%s)--> %" PRId64 "@%" PRId64 " [size=%" PRId64 ", length=%d]", + referrer_str.c_str(), + GetReferenceTypeStr(reference_kind, reference_info).c_str(), + *tag_ptr, + class_tag, + adapted_size, + length)); + } + + static std::string GetReferenceTypeStr(jvmtiHeapReferenceKind reference_kind, + const jvmtiHeapReferenceInfo* reference_info) { + switch (reference_kind) { + case JVMTI_HEAP_REFERENCE_CLASS: + return "class"; + case JVMTI_HEAP_REFERENCE_FIELD: + return StringPrintf("field@%d", reference_info->field.index); + case JVMTI_HEAP_REFERENCE_ARRAY_ELEMENT: + return StringPrintf("array-element@%d", reference_info->array.index); + case JVMTI_HEAP_REFERENCE_CLASS_LOADER: + return "classloader"; + case JVMTI_HEAP_REFERENCE_SIGNERS: + return "signers"; + case JVMTI_HEAP_REFERENCE_PROTECTION_DOMAIN: + return "protection-domain"; + case JVMTI_HEAP_REFERENCE_INTERFACE: + return "interface"; + case JVMTI_HEAP_REFERENCE_STATIC_FIELD: + return StringPrintf("static-field@%d", reference_info->field.index); + case JVMTI_HEAP_REFERENCE_CONSTANT_POOL: + return "constant-pool"; + case JVMTI_HEAP_REFERENCE_SUPERCLASS: + return "superclass"; + case JVMTI_HEAP_REFERENCE_JNI_GLOBAL: + return "jni-global"; + case JVMTI_HEAP_REFERENCE_SYSTEM_CLASS: + return "system-class"; + case JVMTI_HEAP_REFERENCE_MONITOR: + return "monitor"; + case JVMTI_HEAP_REFERENCE_STACK_LOCAL: + return "stack-local"; + case JVMTI_HEAP_REFERENCE_JNI_LOCAL: + return "jni-local"; + case JVMTI_HEAP_REFERENCE_THREAD: + return "thread"; + case JVMTI_HEAP_REFERENCE_OTHER: + return "other"; + } + return "unknown"; + } + + const std::vector<std::string>& GetLines() const { + return lines_; + } + + private: + jint counter_; + const jint stop_after_; + const jint follow_set_; + std::vector<std::string> lines_; + }; + + // If jniRef isn't null, add a local and a global ref. + ScopedLocalRef<jobject> jni_local_ref(env, nullptr); + jobject jni_global_ref = nullptr; + if (jniRef != nullptr) { + jni_local_ref.reset(env->NewLocalRef(jniRef)); + jni_global_ref = env->NewGlobalRef(jniRef); + } + + PrintIterationConfig config(stop_after, follow_set); + Run(heap_filter, klass_filter, initial_object, &config); + + const std::vector<std::string>& lines = config.GetLines(); + jobjectArray ret = CreateObjectArray(env, + static_cast<jint>(lines.size()), + "java/lang/String", + [&](jint i) { + return env->NewStringUTF(lines[i].c_str()); + }); + + if (jni_global_ref != nullptr) { + env->DeleteGlobalRef(jni_global_ref); + } + + return ret; +} + // Don't do anything jint OnLoad(JavaVM* vm, char* options ATTRIBUTE_UNUSED, diff --git a/test/913-heaps/src/Main.java b/test/913-heaps/src/Main.java index 4d77a482b7..f463429ef4 100644 --- a/test/913-heaps/src/Main.java +++ b/test/913-heaps/src/Main.java @@ -15,12 +15,14 @@ */ import java.util.ArrayList; +import java.util.Collections; public class Main { public static void main(String[] args) throws Exception { System.loadLibrary(args[1]); doTest(); + doFollowReferencesTest(); } public static void doTest() throws Exception { @@ -43,10 +45,161 @@ public class Main { } private static void printStats() { - System.out.println("---"); - int s = getGcStarts(); - int f = getGcFinishes(); - System.out.println((s > 0) + " " + (f > 0)); + System.out.println("---"); + int s = getGcStarts(); + int f = getGcFinishes(); + System.out.println((s > 0) + " " + (f > 0)); + } + + public static void doFollowReferencesTest() throws Exception { + // Force GCs to clean up dirt. + Runtime.getRuntime().gc(); + Runtime.getRuntime().gc(); + + tagClasses(); + setTag(Thread.currentThread(), 3000); + + { + ArrayList<Object> tmpStorage = new ArrayList<>(); + doFollowReferencesTestNonRoot(tmpStorage); + tmpStorage = null; + } + + // Force GCs to clean up dirt. + Runtime.getRuntime().gc(); + Runtime.getRuntime().gc(); + + doFollowReferencesTestRoot(); + + // Force GCs to clean up dirt. + Runtime.getRuntime().gc(); + Runtime.getRuntime().gc(); + } + + private static void doFollowReferencesTestNonRoot(ArrayList<Object> tmpStorage) { + A a = createTree(); + tmpStorage.add(a); + doFollowReferencesTestImpl(null, Integer.MAX_VALUE, -1, null); + doFollowReferencesTestImpl(a, Integer.MAX_VALUE, -1, null); + tmpStorage.clear(); + } + + private static void doFollowReferencesTestRoot() { + A a = createTree(); + doFollowReferencesTestImpl(null, Integer.MAX_VALUE, -1, a); + doFollowReferencesTestImpl(a, Integer.MAX_VALUE, -1, a); + } + + private static void doFollowReferencesTestImpl(A root, int stopAfter, int followSet, + Object asRoot) { + String[] lines = + followReferences(0, null, root == null ? null : root.foo, stopAfter, followSet, asRoot); + // Note: sort the roots, as stack locals visit order isn't defined, so may depend on compiled + // code. Do not sort non-roots, as the order here needs to be verified (elements are + // finished before a reference is followed). The test setup (and root visit order) + // luckily ensures that this is deterministic. + + int i = 0; + ArrayList<String> rootLines = new ArrayList<>(); + while (i < lines.length) { + if (lines[i].startsWith("root")) { + rootLines.add(lines[i]); + } else { + break; + } + i++; + } + Collections.sort(rootLines); + for (String l : rootLines) { + System.out.println(l); + } + + // Print the non-root lines in order. + while (i < lines.length) { + System.out.println(lines[i]); + i++; + } + + System.out.println("---"); + + // TODO: Test filters. + } + + private static void tagClasses() { + setTag(A.class, 1000); + setTag(B.class, 1001); + setTag(C.class, 1002); + setTag(I1.class, 2000); + setTag(I2.class, 2001); + } + + private static A createTree() { + A root = new A(); + setTag(root, 1); + + A foo = new A(); + setTag(foo, 2); + root.foo = foo; + + B foo2 = new B(); + setTag(foo2, 3); + root.foo2 = foo2; + + A bar = new A(); + setTag(bar, 4); + foo2.bar = bar; + + C bar2 = new C(); + setTag(bar2, 5); + foo2.bar2 = bar2; + + A baz = new A(); + setTag(baz, 6); + bar2.baz = baz; + bar2.baz2 = root; + + return root; + } + + public static class A { + public A foo; + public A foo2; + + public A() {} + public A(A a, A b) { + foo = a; + foo2 = b; + } + } + + public static class B extends A { + public A bar; + public A bar2; + + public B() {} + public B(A a, A b) { + bar = a; + bar2 = b; + } + } + + public static interface I1 { + public final static int i1Field = 1; + } + + public static interface I2 extends I1 { + public final static int i2Field = 2; + } + + public static class C extends B implements I2 { + public A baz; + public A baz2; + + public C() {} + public C(A a, A b) { + baz = a; + baz2 = b; + } } private static native void setupGcCallback(); @@ -54,4 +207,10 @@ public class Main { private static native int getGcStarts(); private static native int getGcFinishes(); private static native void forceGarbageCollection(); + + private static native void setTag(Object o, long tag); + private static native long getTag(Object o); + + private static native String[] followReferences(int heapFilter, Class<?> klassFilter, + Object initialObject, int stopAfter, int followSet, Object jniRef); } diff --git a/test/956-methodhandles/expected.txt b/test/956-methodhandles/expected.txt index ad1c43c490..9ca448ce74 100644 --- a/test/956-methodhandles/expected.txt +++ b/test/956-methodhandles/expected.txt @@ -4,3 +4,4 @@ foo_A foo_B privateRyan_D Received exception: Expected (java.lang.String, java.lang.String)java.lang.String but was (java.lang.String, java.lang.Object)void +String constructors done. diff --git a/test/956-methodhandles/src/Main.java b/test/956-methodhandles/src/Main.java index 780513f9ed..d0c658f819 100644 --- a/test/956-methodhandles/src/Main.java +++ b/test/956-methodhandles/src/Main.java @@ -19,6 +19,8 @@ import java.lang.invoke.MethodHandles; import java.lang.invoke.MethodHandles.Lookup; import java.lang.invoke.MethodType; import java.lang.invoke.WrongMethodTypeException; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.lang.reflect.Constructor; import java.lang.reflect.Field; @@ -65,6 +67,8 @@ public class Main { testfindVirtual(); testUnreflects(); testAsType(); + testConstructors(); + testStringConstructors(); } public static void testfindSpecial_invokeSuperBehaviour() throws Throwable { @@ -347,18 +351,17 @@ public class Main { privateConstructor.setAccessible(true); mh = MethodHandles.lookup().unreflectConstructor(privateConstructor); - // TODO(narayan): Method handle constructor invokes are not supported yet. - // - // UnreflectTester tester = (UnreflectTester) mh.invoke("foo"); - // UnreflectTester tester = (UnreflectTester) mh.invoke("fooExact"); - + instance = (UnreflectTester) mh.invokeExact("abc"); + assertEquals("abc", instance.publicField); + instance = (UnreflectTester) mh.invoke("def"); + assertEquals("def", instance.publicField); Constructor publicConstructor = UnreflectTester.class.getConstructor(String.class, boolean.class); mh = MethodHandles.lookup().unreflectConstructor(publicConstructor); - // TODO(narayan): Method handle constructor invokes are not supported yet. - // - // UnreflectTester tester = (UnreflectTester) mh.invoke("foo"); - // UnreflectTester tester = (UnreflectTester) mh.invoke("fooExact"); + instance = (UnreflectTester) mh.invokeExact("abc", false); + assertEquals("abc", instance.publicField); + instance = (UnreflectTester) mh.invoke("def", true); + assertEquals("def", instance.publicField); // TODO(narayan): Non exact invokes for field sets/gets are not implemented yet. // @@ -493,6 +496,195 @@ public class Main { System.out.println("fail"); Thread.dumpStack(); } + + public static void fail(String message) { + System.out.println("fail: " + message); + Thread.dumpStack(); + } + + public static void testConstructors() throws Throwable { + MethodHandle mh = + MethodHandles.lookup().findConstructor(Float.class, + MethodType.methodType(void.class, + float.class)); + Float value = (Float) mh.invokeExact(0.33f); + if (value.floatValue() != 0.33f) { + fail("Unexpected float value from invokeExact " + value.floatValue()); + } + + value = (Float) mh.invoke(3.34f); + if (value.floatValue() != 3.34f) { + fail("Unexpected float value from invoke " + value.floatValue()); + } + + mh = MethodHandles.lookup().findConstructor(Double.class, + MethodType.methodType(void.class, String.class)); + Double d = (Double) mh.invoke("8.45e3"); + if (d.doubleValue() != 8.45e3) { + fail("Unexpected double value from Double(String) " + value.doubleValue()); + } + + mh = MethodHandles.lookup().findConstructor(Double.class, + MethodType.methodType(void.class, double.class)); + d = (Double) mh.invoke(8.45e3); + if (d.doubleValue() != 8.45e3) { + fail("Unexpected double value from Double(double) " + value.doubleValue()); + } + + // Primitive type + try { + mh = MethodHandles.lookup().findConstructor(int.class, MethodType.methodType(void.class)); + fail("Unexpected lookup success for primitive constructor"); + } catch (NoSuchMethodException e) {} + + // Interface + try { + mh = MethodHandles.lookup().findConstructor(Readable.class, + MethodType.methodType(void.class)); + fail("Unexpected lookup success for interface constructor"); + } catch (NoSuchMethodException e) {} + + // Abstract + mh = MethodHandles.lookup().findConstructor(Process.class, MethodType.methodType(void.class)); + try { + mh.invoke(); + fail("Unexpected ability to instantiate an abstract class"); + } catch (InstantiationException e) {} + + // Non-existent + try { + MethodHandle bad = MethodHandles.lookup().findConstructor( + String.class, MethodType.methodType(String.class, Float.class)); + fail("Unexpected success for non-existent constructor"); + } catch (NoSuchMethodException e) {} + + // Non-void constructor search. (I)I instead of (I)V. + try { + MethodHandle foo = MethodHandles.lookup().findConstructor( + Integer.class, MethodType.methodType(Integer.class, Integer.class)); + fail("Unexpected success for non-void type for findConstructor"); + } catch (NoSuchMethodException e) {} + } + + public static void testStringConstructors() throws Throwable { + final String testPattern = "The system as we know it is broken"; + + // String() + MethodHandle mh = MethodHandles.lookup().findConstructor( + String.class, MethodType.methodType(void.class)); + String s = (String) mh.invokeExact(); + if (!s.equals("")) { + fail("Unexpected empty string constructor result: '" + s + "'"); + } + + // String(String) + mh = MethodHandles.lookup().findConstructor( + String.class, MethodType.methodType(void.class, String.class)); + s = (String) mh.invokeExact(testPattern); + if (!s.equals(testPattern)) { + fail("Unexpected string constructor result: '" + s + "'"); + } + + // String(char[]) + mh = MethodHandles.lookup().findConstructor( + String.class, MethodType.methodType(void.class, char[].class)); + s = (String) mh.invokeExact(testPattern.toCharArray()); + if (!s.equals(testPattern)) { + fail("Unexpected string constructor result: '" + s + "'"); + } + + // String(char[], int, int) + mh = MethodHandles.lookup().findConstructor( + String.class, MethodType.methodType(void.class, char[].class, int.class, int.class)); + s = (String) mh.invokeExact(new char [] { 'a', 'b', 'c', 'd', 'e'}, 2, 3); + if (!s.equals("cde")) { + fail("Unexpected string constructor result: '" + s + "'"); + } + + // String(int[] codePoints, int offset, int count) + StringBuffer sb = new StringBuffer(testPattern); + int[] codePoints = new int[sb.codePointCount(0, sb.length())]; + for (int i = 0; i < sb.length(); ++i) { + codePoints[i] = sb.codePointAt(i); + } + mh = MethodHandles.lookup().findConstructor( + String.class, MethodType.methodType(void.class, int[].class, int.class, int.class)); + s = (String) mh.invokeExact(codePoints, 0, codePoints.length); + if (!s.equals(testPattern)) { + fail("Unexpected string constructor result: '" + s + "'"); + } + + // String(byte ascii[], int hibyte, int offset, int count) + byte [] ascii = testPattern.getBytes(StandardCharsets.US_ASCII); + mh = MethodHandles.lookup().findConstructor( + String.class, MethodType.methodType(void.class, byte[].class, int.class, int.class)); + s = (String) mh.invokeExact(ascii, 0, ascii.length); + if (!s.equals(testPattern)) { + fail("Unexpected string constructor result: '" + s + "'"); + } + + // String(byte bytes[], int offset, int length, String charsetName) + mh = MethodHandles.lookup().findConstructor( + String.class, + MethodType.methodType(void.class, byte[].class, int.class, int.class, String.class)); + s = (String) mh.invokeExact(ascii, 0, 5, StandardCharsets.US_ASCII.name()); + if (!s.equals(testPattern.substring(0, 5))) { + fail("Unexpected string constructor result: '" + s + "'"); + } + + // String(byte bytes[], int offset, int length, Charset charset) + mh = MethodHandles.lookup().findConstructor( + String.class, + MethodType.methodType(void.class, byte[].class, int.class, int.class, Charset.class)); + s = (String) mh.invokeExact(ascii, 0, 5, StandardCharsets.US_ASCII); + if (!s.equals(testPattern.substring(0, 5))) { + fail("Unexpected string constructor result: '" + s + "'"); + } + + // String(byte bytes[], String charsetName) + mh = MethodHandles.lookup().findConstructor( + String.class, + MethodType.methodType(void.class, byte[].class, String.class)); + s = (String) mh.invokeExact(ascii, StandardCharsets.US_ASCII.name()); + if (!s.equals(testPattern)) { + fail("Unexpected string constructor result: '" + s + "'"); + } + + // String(byte bytes[], Charset charset) + mh = MethodHandles.lookup().findConstructor( + String.class, MethodType.methodType(void.class, byte[].class, Charset.class)); + s = (String) mh.invokeExact(ascii, StandardCharsets.US_ASCII); + if (!s.equals(testPattern)) { + fail("Unexpected string constructor result: '" + s + "'"); + } + + // String(byte bytes[], int offset, int length) + mh = MethodHandles.lookup().findConstructor( + String.class, MethodType.methodType(void.class, byte[].class, int.class, int.class)); + s = (String) mh.invokeExact(ascii, 1, ascii.length - 2); + s = testPattern.charAt(0) + s + testPattern.charAt(testPattern.length() - 1); + if (!s.equals(testPattern)) { + fail("Unexpected string constructor result: '" + s + "'"); + } + + // String(byte bytes[]) + mh = MethodHandles.lookup().findConstructor( + String.class, MethodType.methodType(void.class, byte[].class)); + s = (String) mh.invokeExact(ascii); + if (!s.equals(testPattern)) { + fail("Unexpected string constructor result: '" + s + "'"); + } + + // String(StringBuffer buffer) + mh = MethodHandles.lookup().findConstructor( + String.class, MethodType.methodType(void.class, StringBuffer.class)); + s = (String) mh.invokeExact(sb); + if (!s.equals(testPattern)) { + fail("Unexpected string constructor result: '" + s + "'"); + } + + System.out.println("String constructors done."); + } } diff --git a/test/957-methodhandle-transforms/src/Main.java b/test/957-methodhandle-transforms/src/Main.java index 6c977f4f34..5806509fc3 100644 --- a/test/957-methodhandle-transforms/src/Main.java +++ b/test/957-methodhandle-transforms/src/Main.java @@ -30,6 +30,9 @@ public class Main { testArrayElementSetter(); testIdentity(); testConstant(); + testBindTo(); + testFilterReturnValue(); + testPermuteArguments(); } public static void testThrowException() throws Throwable { @@ -669,6 +672,222 @@ public class Main { } } + public static void testBindTo() throws Throwable { + MethodHandle stringCharAt = MethodHandles.lookup().findVirtual( + String.class, "charAt", MethodType.methodType(char.class, int.class)); + + char value = (char) stringCharAt.invoke("foo", 0); + if (value != 'f') { + System.out.println("Unexpected value: " + value); + } + + MethodHandle bound = stringCharAt.bindTo("foo"); + value = (char) bound.invoke(0); + if (value != 'f') { + System.out.println("Unexpected value: " + value); + } + + try { + stringCharAt.bindTo(new Object()); + fail(); + } catch (ClassCastException expected) { + } + + bound = stringCharAt.bindTo(null); + try { + bound.invoke(0); + fail(); + } catch (NullPointerException expected) { + } + + MethodHandle integerParseInt = MethodHandles.lookup().findStatic( + Integer.class, "parseInt", MethodType.methodType(int.class, String.class)); + + bound = integerParseInt.bindTo("78452"); + int intValue = (int) bound.invoke(); + if (intValue != 78452) { + System.out.println("Unexpected value: " + intValue); + } + } + + public static String filterReturnValue_target(int a) { + return "ReturnValue" + a; + } + + public static boolean filterReturnValue_filter(String value) { + return value.indexOf("42") != -1; + } + + public static int filterReturnValue_intTarget(String a) { + return Integer.parseInt(a); + } + + public static int filterReturnValue_intFilter(int b) { + return b + 1; + } + + public static void filterReturnValue_voidTarget() { + } + + public static int filterReturnValue_voidFilter() { + return 42; + } + + public static void testFilterReturnValue() throws Throwable { + // A target that returns a reference. + { + final MethodHandle target = MethodHandles.lookup().findStatic(Main.class, + "filterReturnValue_target", MethodType.methodType(String.class, int.class)); + final MethodHandle filter = MethodHandles.lookup().findStatic(Main.class, + "filterReturnValue_filter", MethodType.methodType(boolean.class, String.class)); + + MethodHandle adapter = MethodHandles.filterReturnValue(target, filter); + + boolean value = (boolean) adapter.invoke((int) 42); + if (!value) { + System.out.println("Unexpected value: " + value); + } + value = (boolean) adapter.invoke((int) 43); + if (value) { + System.out.println("Unexpected value: " + value); + } + } + + // A target that returns a primitive. + { + final MethodHandle target = MethodHandles.lookup().findStatic(Main.class, + "filterReturnValue_intTarget", MethodType.methodType(int.class, String.class)); + final MethodHandle filter = MethodHandles.lookup().findStatic(Main.class, + "filterReturnValue_intFilter", MethodType.methodType(int.class, int.class)); + + MethodHandle adapter = MethodHandles.filterReturnValue(target, filter); + + int value = (int) adapter.invoke("56"); + if (value != 57) { + System.out.println("Unexpected value: " + value); + } + } + + // A target that returns void. + { + final MethodHandle target = MethodHandles.lookup().findStatic(Main.class, + "filterReturnValue_voidTarget", MethodType.methodType(void.class)); + final MethodHandle filter = MethodHandles.lookup().findStatic(Main.class, + "filterReturnValue_voidFilter", MethodType.methodType(int.class)); + + MethodHandle adapter = MethodHandles.filterReturnValue(target, filter); + + int value = (int) adapter.invoke(); + if (value != 42) { + System.out.println("Unexpected value: " + value); + } + } + } + + public static void permuteArguments_callee(boolean a, byte b, char c, + short d, int e, long f, float g, double h) { + if (a == true && b == (byte) 'b' && c == 'c' && d == (short) 56 && + e == 78 && f == (long) 97 && g == 98.0f && f == 97.0) { + return; + } + + System.out.println("Unexpected arguments: " + a + ", " + b + ", " + c + + ", " + d + ", " + e + ", " + f + ", " + g + ", " + h); + } + + public static void permuteArguments_boxingCallee(boolean a, Integer b) { + if (a && b.intValue() == 42) { + return; + } + + System.out.println("Unexpected arguments: " + a + ", " + b); + } + + public static void testPermuteArguments() throws Throwable { + { + final MethodHandle target = MethodHandles.lookup().findStatic( + Main.class, "permuteArguments_callee", + MethodType.methodType(void.class, new Class<?>[] { + boolean.class, byte.class, char.class, short.class, int.class, + long.class, float.class, double.class })); + + final MethodType newType = MethodType.methodType(void.class, new Class<?>[] { + double.class, float.class, long.class, int.class, short.class, char.class, + byte.class, boolean.class }); + + final MethodHandle permutation = MethodHandles.permuteArguments( + target, newType, new int[] { 7, 6, 5, 4, 3, 2, 1, 0 }); + + permutation.invoke((double) 97.0, (float) 98.0f, (long) 97, 78, + (short) 56, 'c', (byte) 'b', (boolean) true); + + // The permutation array was not of the right length. + try { + MethodHandles.permuteArguments(target, newType, + new int[] { 7 }); + fail(); + } catch (IllegalArgumentException expected) { + } + + // The permutation array has an element that's out of bounds + // (there's no argument with idx == 8). + try { + MethodHandles.permuteArguments(target, newType, + new int[] { 8, 6, 5, 4, 3, 2, 1, 0 }); + fail(); + } catch (IllegalArgumentException expected) { + } + + // The permutation array maps to an incorrect type. + try { + MethodHandles.permuteArguments(target, newType, + new int[] { 7, 7, 5, 4, 3, 2, 1, 0 }); + fail(); + } catch (IllegalArgumentException expected) { + } + } + + // Tests for reference arguments as well as permutations that + // repeat arguments. + { + final MethodHandle target = MethodHandles.lookup().findVirtual( + String.class, "concat", MethodType.methodType(String.class, String.class)); + + final MethodType newType = MethodType.methodType(String.class, String.class, + String.class); + + assertEquals("foobar", (String) target.invoke("foo", "bar")); + + MethodHandle permutation = MethodHandles.permuteArguments(target, + newType, new int[] { 1, 0 }); + assertEquals("barfoo", (String) permutation.invoke("foo", "bar")); + + permutation = MethodHandles.permuteArguments(target, newType, new int[] { 0, 0 }); + assertEquals("foofoo", (String) permutation.invoke("foo", "bar")); + + permutation = MethodHandles.permuteArguments(target, newType, new int[] { 1, 1 }); + assertEquals("barbar", (String) permutation.invoke("foo", "bar")); + } + + // Tests for boxing and unboxing. + { + final MethodHandle target = MethodHandles.lookup().findStatic( + Main.class, "permuteArguments_boxingCallee", + MethodType.methodType(void.class, new Class<?>[] { boolean.class, Integer.class })); + + final MethodType newType = MethodType.methodType(void.class, + new Class<?>[] { Integer.class, boolean.class }); + + MethodHandle permutation = MethodHandles.permuteArguments(target, + newType, new int[] { 1, 0 }); + + permutation.invoke(42, true); + permutation.invoke(42, Boolean.TRUE); + permutation.invoke(Integer.valueOf(42), true); + permutation.invoke(Integer.valueOf(42), Boolean.TRUE); + } + } + public static void fail() { System.out.println("FAIL"); Thread.dumpStack(); @@ -686,5 +905,3 @@ public class Main { throw new AssertionError("assertEquals s1: " + s1 + ", s2: " + s2); } } - - diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk index 8f8f99832c..29cec9111b 100644 --- a/test/Android.run-test.mk +++ b/test/Android.run-test.mk @@ -230,6 +230,40 @@ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES), $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \ $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(ART_TEST_RUN_TEST_SKIP), $(ALL_ADDRESS_SIZES)) +# b/31385354: Roots (and thus iteration order) is non-stable between different run modes. +# Temporarily disable test for everything but default optimizing configuration +# until the test check code is generalized to allow spec-compliant output. +TEST_ART_BROKEN_B31385354_TESTS := \ + 913-heaps \ + +NON_AOT_MODES := $(filter-out optimizing,$(COMPILER_TYPES)) +ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \ + $(NON_AOT_MODES), $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \ + $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_B31385354_TESTS), \ + $(ALL_ADDRESS_SIZES)) +NON_AOT_MODES := + +NON_PREBUILD_MODES := $(filter-out prebuild,$(PREBUILD_TYPES)) +ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(NON_PREBUILD_MODES), \ + $(COMPILER_TYPES), $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \ + $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_B31385354_TESTS), \ + $(ALL_ADDRESS_SIZES)) +NON_PREBUILD_MODES := + +NON_RELOCATE_MODES := $(filter-out relocate,$(RELOCATE_TYPES)) +ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \ + $(COMPILER_TYPES), $(NON_RELOCATE_MODES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \ + $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_B31385354_TESTS), \ + $(ALL_ADDRESS_SIZES)) +NON_RELOCATE_MODES := + +ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \ + $(COMPILER_TYPES), $(RELOCATE_TYPES),trace,$(GC_TYPES),$(JNI_TYPES), \ + $(IMAGE_TYPES),$(PICTEST_TYPES),$(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_B31385354_TESTS), \ + $(ALL_ADDRESS_SIZES)) + +TEST_ART_BROKEN_B31385354_TESTS := + # Disable 149-suspend-all-stress, its output is flaky (b/28988206). # Disable 577-profile-foreign-dex (b/27454772). diff --git a/test/ti-agent/common_helper.h b/test/ti-agent/common_helper.h new file mode 100644 index 0000000000..9aeb98c6e1 --- /dev/null +++ b/test/ti-agent/common_helper.h @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_TEST_TI_AGENT_COMMON_HELPER_H_ +#define ART_TEST_TI_AGENT_COMMON_HELPER_H_ + +#include "jni.h" +#include "ScopedLocalRef.h" + +namespace art { + +template <typename T> +static jobjectArray CreateObjectArray(JNIEnv* env, + jint length, + const char* component_type_descriptor, + T src) { + if (length < 0) { + return nullptr; + } + + ScopedLocalRef<jclass> obj_class(env, env->FindClass(component_type_descriptor)); + if (obj_class.get() == nullptr) { + return nullptr; + } + + ScopedLocalRef<jobjectArray> ret(env, env->NewObjectArray(length, obj_class.get(), nullptr)); + if (ret.get() == nullptr) { + return nullptr; + } + + for (jint i = 0; i < length; ++i) { + jobject element = src(i); + env->SetObjectArrayElement(ret.get(), static_cast<jint>(i), element); + env->DeleteLocalRef(element); + if (env->ExceptionCheck()) { + return nullptr; + } + } + + return ret.release(); +} + +} // namespace art + +#endif // ART_TEST_TI_AGENT_COMMON_HELPER_H_ diff --git a/tools/cpp-define-generator/main.cc b/tools/cpp-define-generator/main.cc index a1b463a92d..fc99f8abc7 100644 --- a/tools/cpp-define-generator/main.cc +++ b/tools/cpp-define-generator/main.cc @@ -59,12 +59,12 @@ pretty_format(T value) { } template <typename T> -void cpp_define(std::string name, T value) { +void cpp_define(const std::string& name, T value) { std::cout << "#define " << name << " " << pretty_format(value) << std::endl; } template <typename T> -void emit_check_eq(T value, std::string expr) { +void emit_check_eq(T value, const std::string& expr) { std::cout << "DEFINE_CHECK_EQ(" << value << ", (" << expr << "))" << std::endl; } |