summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cmdline/cmdline_types.h2
-rw-r--r--compiler/jit/jit_compiler.cc125
-rw-r--r--compiler/jit/jit_compiler.h8
-rw-r--r--compiler/optimizing/inliner.cc1
-rw-r--r--compiler/optimizing/instruction_builder.cc1
-rw-r--r--compiler/optimizing/nodes.cc1
-rw-r--r--compiler/optimizing/optimizing_compiler.cc1
-rw-r--r--compiler/optimizing/optimizing_compiler_stats.h3
-rw-r--r--compiler/optimizing/ssa_builder.cc1
-rw-r--r--libdexfile/dex/class_iterator.h2
-rw-r--r--libdexfile/dex/type_lookup_table.h3
-rw-r--r--openjdkjvmti/ti_class_definition.cc1
-rw-r--r--openjdkjvmti/ti_logging.h5
-rw-r--r--openjdkjvmti/ti_stack.cc90
-rw-r--r--openjdkjvmti/transform.cc1
-rw-r--r--runtime/art_method-inl.h26
-rw-r--r--runtime/art_method.h30
-rw-r--r--runtime/class_linker.cc12
-rw-r--r--runtime/debugger.cc530
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h8
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc2
-rw-r--r--runtime/gc/allocation_record.cc47
-rw-r--r--runtime/gc/space/region_space.cc1
-rw-r--r--runtime/gc/verification.cc1
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/instrumentation.cc121
-rw-r--r--runtime/interpreter/interpreter_cache.h1
-rw-r--r--runtime/interpreter/mterp/arm/arithmetic.S60
-rw-r--r--runtime/interpreter/mterp/arm/array.S4
-rw-r--r--runtime/interpreter/mterp/arm/control_flow.S2
-rw-r--r--runtime/interpreter/mterp/arm/floating_point.S46
-rw-r--r--runtime/interpreter/mterp/arm/main.S19
-rw-r--r--runtime/interpreter/mterp/arm/object.S4
-rw-r--r--runtime/interpreter/mterp/arm/other.S22
-rw-r--r--runtime/interpreter/mterp/arm64/floating_point.S89
-rw-r--r--runtime/interpreter/mterp/arm64/main.S15
-rw-r--r--runtime/interpreter/mterp/x86/arithmetic.S52
-rw-r--r--runtime/interpreter/mterp/x86/floating_point.S10
-rw-r--r--runtime/interpreter/mterp/x86/main.S13
-rw-r--r--runtime/interpreter/mterp/x86_64/arithmetic.S54
-rw-r--r--runtime/interpreter/mterp/x86_64/floating_point.S14
-rw-r--r--runtime/interpreter/mterp/x86_64/main.S13
-rw-r--r--runtime/jdwp_provider.h2
-rw-r--r--runtime/jit/jit.cc87
-rw-r--r--runtime/jit/jit.h23
-rw-r--r--runtime/jit/jit_code_cache.cc304
-rw-r--r--runtime/jit/jit_code_cache.h36
-rw-r--r--runtime/jit/profile_saver.cc2
-rw-r--r--runtime/monitor.cc71
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc15
-rw-r--r--runtime/parsed_options.cc19
-rw-r--r--runtime/quick_exception_handler.cc159
-rw-r--r--runtime/runtime.cc144
-rw-r--r--runtime/runtime.h8
-rw-r--r--runtime/runtime_options.h1
-rw-r--r--runtime/stack.h30
-rw-r--r--runtime/thread.cc52
-rw-r--r--runtime/ti/agent.cc1
-rw-r--r--runtime/ti/agent.h3
-rw-r--r--runtime/trace.cc42
-rw-r--r--test/021-string2/src/Main.java7
-rw-r--r--test/137-cfi/cfi.cc1
-rw-r--r--test/461-get-reference-vreg/get_reference_vreg_jni.cc86
-rw-r--r--test/543-env-long-ref/env_long_ref.cc54
-rw-r--r--test/570-checker-osr/osr.cc167
-rw-r--r--test/common/stack_inspect.cc105
-rw-r--r--tools/hiddenapi/hiddenapi.cc2
67 files changed, 1351 insertions, 1513 deletions
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index 48da7551a7..1725154735 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -24,11 +24,11 @@
#include "detail/cmdline_debug_detail.h"
#include "memory_representation.h"
+#include "android-base/logging.h"
#include "android-base/strings.h"
// Includes for the types that are being specialized
#include <string>
-#include "base/logging.h"
#include "base/time_utils.h"
#include "experimental_flags.h"
#include "gc/collector_type.h"
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index bb35065921..0eab8356e7 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -26,7 +26,6 @@
#include "base/systrace.h"
#include "base/time_utils.h"
#include "base/timing_logger.h"
-#include "base/unix_file/fd_file.h"
#include "debug/elf_debug_writer.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
@@ -34,11 +33,6 @@
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "jit/jit_logger.h"
-#include "oat_file-inl.h"
-#include "oat_quick_method_header.h"
-#include "object_lock.h"
-#include "optimizing/register_allocator.h"
-#include "thread_list.h"
namespace art {
namespace jit {
@@ -47,46 +41,7 @@ JitCompiler* JitCompiler::Create() {
return new JitCompiler();
}
-extern "C" void* jit_load(bool* generate_debug_info) {
- VLOG(jit) << "loading jit compiler";
- auto* const jit_compiler = JitCompiler::Create();
- CHECK(jit_compiler != nullptr);
- *generate_debug_info = jit_compiler->GetCompilerOptions().GetGenerateDebugInfo();
- VLOG(jit) << "Done loading jit compiler";
- return jit_compiler;
-}
-
-extern "C" void jit_unload(void* handle) {
- DCHECK(handle != nullptr);
- delete reinterpret_cast<JitCompiler*>(handle);
-}
-
-extern "C" bool jit_compile_method(
- void* handle, ArtMethod* method, Thread* self, bool osr)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
- DCHECK(jit_compiler != nullptr);
- return jit_compiler->CompileMethod(self, method, osr);
-}
-
-extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t count)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
- DCHECK(jit_compiler != nullptr);
- const CompilerOptions& compiler_options = jit_compiler->GetCompilerOptions();
- if (compiler_options.GetGenerateDebugInfo()) {
- const ArrayRef<mirror::Class*> types_array(types, count);
- std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForClasses(
- kRuntimeISA, compiler_options.GetInstructionSetFeatures(), types_array);
- MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
- // We never free debug info for types, so we don't need to provide a handle
- // (which would have been otherwise used as identifier to remove it later).
- AddNativeDebugInfoForJit(nullptr /* handle */, elf_file);
- }
-}
-
-JitCompiler::JitCompiler() {
- compiler_options_.reset(new CompilerOptions());
+void JitCompiler::ParseCompilerOptions() {
// Special case max code units for inlining, whose default is "unset" (implictly
// meaning no limit). Do this before parsing the actual passed options.
compiler_options_->SetInlineMaxCodeUnits(CompilerOptions::kDefaultInlineMaxCodeUnits);
@@ -94,8 +49,8 @@ JitCompiler::JitCompiler() {
{
std::string error_msg;
if (!compiler_options_->ParseCompilerOptions(runtime->GetCompilerOptions(),
- /*ignore_unrecognized=*/ true,
- &error_msg)) {
+ /*ignore_unrecognized=*/ true,
+ &error_msg)) {
LOG(FATAL) << error_msg;
UNREACHABLE();
}
@@ -103,8 +58,11 @@ JitCompiler::JitCompiler() {
// JIT is never PIC, no matter what the runtime compiler options specify.
compiler_options_->SetNonPic();
- // Set debuggability based on the runtime value.
- compiler_options_->SetDebuggable(runtime->IsJavaDebuggable());
+ // If the options don't provide whether we generate debuggable code, set
+ // debuggability based on the runtime value.
+ if (!compiler_options_->GetDebuggable()) {
+ compiler_options_->SetDebuggable(runtime->IsJavaDebuggable());
+ }
const InstructionSet instruction_set = compiler_options_->GetInstructionSet();
if (kRuntimeISA == InstructionSet::kArm) {
@@ -148,6 +106,65 @@ JitCompiler::JitCompiler() {
compiler_options_->compiling_with_core_image_ =
CompilerDriver::IsCoreImageFilename(runtime->GetImageLocation());
+ if (compiler_options_->GetGenerateDebugInfo()) {
+ jit_logger_.reset(new JitLogger());
+ jit_logger_->OpenLog();
+ }
+}
+
+extern "C" void* jit_load() {
+ VLOG(jit) << "Create jit compiler";
+ auto* const jit_compiler = JitCompiler::Create();
+ CHECK(jit_compiler != nullptr);
+ VLOG(jit) << "Done creating jit compiler";
+ return jit_compiler;
+}
+
+extern "C" void jit_unload(void* handle) {
+ DCHECK(handle != nullptr);
+ delete reinterpret_cast<JitCompiler*>(handle);
+}
+
+extern "C" bool jit_compile_method(
+ void* handle, ArtMethod* method, Thread* self, bool osr)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
+ DCHECK(jit_compiler != nullptr);
+ return jit_compiler->CompileMethod(self, method, osr);
+}
+
+extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t count)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
+ DCHECK(jit_compiler != nullptr);
+ const CompilerOptions& compiler_options = jit_compiler->GetCompilerOptions();
+ if (compiler_options.GetGenerateDebugInfo()) {
+ const ArrayRef<mirror::Class*> types_array(types, count);
+ std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForClasses(
+ kRuntimeISA, compiler_options.GetInstructionSetFeatures(), types_array);
+ MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
+ // We never free debug info for types, so we don't need to provide a handle
+ // (which would have been otherwise used as identifier to remove it later).
+ AddNativeDebugInfoForJit(nullptr /* handle */, elf_file);
+ }
+}
+
+extern "C" void jit_update_options(void* handle) {
+ JitCompiler* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
+ DCHECK(jit_compiler != nullptr);
+ jit_compiler->ParseCompilerOptions();
+}
+
+extern "C" bool jit_generate_debug_info(void* handle) {
+ JitCompiler* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
+ DCHECK(jit_compiler != nullptr);
+ return jit_compiler->GetCompilerOptions().GetGenerateDebugInfo();
+}
+
+JitCompiler::JitCompiler() {
+ compiler_options_.reset(new CompilerOptions());
+ ParseCompilerOptions();
+
compiler_driver_.reset(new CompilerDriver(
compiler_options_.get(),
/* verification_results */ nullptr,
@@ -157,14 +174,6 @@ JitCompiler::JitCompiler() {
/* swap_fd */ -1));
// Disable dedupe so we can remove compiled methods.
compiler_driver_->SetDedupeEnabled(false);
-
- size_t thread_count = compiler_driver_->GetThreadCount();
- if (compiler_options_->GetGenerateDebugInfo()) {
- DCHECK_EQ(thread_count, 1u)
- << "Generating debug info only works with one compiler thread";
- jit_logger_.reset(new JitLogger());
- jit_logger_->OpenLog();
- }
}
JitCompiler::~JitCompiler() {
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index 5840fece2e..d201611d79 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -43,10 +43,13 @@ class JitCompiler {
const CompilerOptions& GetCompilerOptions() const {
return *compiler_options_.get();
}
+
CompilerDriver* GetCompilerDriver() const {
return compiler_driver_.get();
}
+ void ParseCompilerOptions();
+
private:
std::unique_ptr<CompilerOptions> compiler_options_;
std::unique_ptr<CompilerDriver> compiler_driver_;
@@ -54,11 +57,6 @@ class JitCompiler {
JitCompiler();
- // This is in the compiler since the runtime doesn't have access to the compiled method
- // structures.
- bool AddToCodeCache(ArtMethod* method, const CompiledMethod* compiled_method)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
DISALLOW_COPY_AND_ASSIGN(JitCompiler);
};
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 45d91675fd..ec9322270a 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -18,6 +18,7 @@
#include "art_method-inl.h"
#include "base/enums.h"
+#include "base/logging.h"
#include "builder.h"
#include "class_linker.h"
#include "class_root.h"
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index e9b5b5a93d..448fed9435 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -19,6 +19,7 @@
#include "art_method-inl.h"
#include "base/arena_bit_vector.h"
#include "base/bit_vector-inl.h"
+#include "base/logging.h"
#include "block_builder.h"
#include "class_linker-inl.h"
#include "code_generator.h"
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index aad06b91b6..3f225f354c 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -20,6 +20,7 @@
#include "art_method-inl.h"
#include "base/bit_utils.h"
#include "base/bit_vector-inl.h"
+#include "base/logging.h"
#include "base/stl_util.h"
#include "class_linker-inl.h"
#include "class_root.h"
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 1db20fcfeb..1d3fcf3002 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -26,6 +26,7 @@
#include "base/arena_allocator.h"
#include "base/arena_containers.h"
#include "base/dumpable.h"
+#include "base/logging.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "base/scoped_arena_allocator.h"
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index 1f4f6d56be..ddd57f5f1a 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -22,9 +22,10 @@
#include <string>
#include <type_traits>
+#include <android-base/logging.h>
+
#include "base/atomic.h"
#include "base/globals.h"
-#include "base/logging.h" // For VLOG_IS_ON.
namespace art {
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 16c23c8df5..cef234a399 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -18,6 +18,7 @@
#include "base/arena_bit_vector.h"
#include "base/bit_vector-inl.h"
+#include "base/logging.h"
#include "data_type-inl.h"
#include "dex/bytecode_utils.h"
#include "mirror/class-inl.h"
diff --git a/libdexfile/dex/class_iterator.h b/libdexfile/dex/class_iterator.h
index 477c93b508..8ed585b0b1 100644
--- a/libdexfile/dex/class_iterator.h
+++ b/libdexfile/dex/class_iterator.h
@@ -17,7 +17,7 @@
#ifndef ART_LIBDEXFILE_DEX_CLASS_ITERATOR_H_
#define ART_LIBDEXFILE_DEX_CLASS_ITERATOR_H_
-#include "base/logging.h"
+#include <android-base/logging.h>
namespace art {
diff --git a/libdexfile/dex/type_lookup_table.h b/libdexfile/dex/type_lookup_table.h
index 7005d34b88..5f002d1d72 100644
--- a/libdexfile/dex/type_lookup_table.h
+++ b/libdexfile/dex/type_lookup_table.h
@@ -17,7 +17,8 @@
#ifndef ART_LIBDEXFILE_DEX_TYPE_LOOKUP_TABLE_H_
#define ART_LIBDEXFILE_DEX_TYPE_LOOKUP_TABLE_H_
-#include "base/logging.h"
+#include <android-base/logging.h>
+
#include "dex/dex_file_types.h"
namespace art {
diff --git a/openjdkjvmti/ti_class_definition.cc b/openjdkjvmti/ti_class_definition.cc
index 2a565127f6..795a68a189 100644
--- a/openjdkjvmti/ti_class_definition.cc
+++ b/openjdkjvmti/ti_class_definition.cc
@@ -32,6 +32,7 @@
#include "ti_class_definition.h"
#include "base/array_slice.h"
+#include "base/logging.h"
#include "class_linker-inl.h"
#include "class_root.h"
#include "dex/dex_file.h"
diff --git a/openjdkjvmti/ti_logging.h b/openjdkjvmti/ti_logging.h
index 31b51bb126..a1be090b5d 100644
--- a/openjdkjvmti/ti_logging.h
+++ b/openjdkjvmti/ti_logging.h
@@ -37,8 +37,9 @@
#include <ostream>
#include <sstream>
-#include "base/logging.h"
-#include "base/macros.h"
+#include <android-base/logging.h>
+#include <android-base/macros.h>
+
#include "base/mutex.h"
#include "thread-current-inl.h"
diff --git a/openjdkjvmti/ti_stack.cc b/openjdkjvmti/ti_stack.cc
index 5de4a81f5e..4a3eac8a15 100644
--- a/openjdkjvmti/ti_stack.cc
+++ b/openjdkjvmti/ti_stack.cc
@@ -673,34 +673,24 @@ jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
return ERR(NONE);
}
-// Walks up the stack counting Java frames. This is not StackVisitor::ComputeNumFrames, as
-// runtime methods and transitions must not be counted.
-struct GetFrameCountVisitor : public art::StackVisitor {
- explicit GetFrameCountVisitor(art::Thread* thread)
- : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- count(0) {}
-
- bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
- art::ArtMethod* m = GetMethod();
- const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
- if (do_count) {
- count++;
- }
- return true;
- }
-
- size_t count;
-};
-
struct GetFrameCountClosure : public art::Closure {
public:
GetFrameCountClosure() : count(0) {}
void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
- GetFrameCountVisitor visitor(self);
- visitor.WalkStack(false);
-
- count = visitor.count;
+ // This is not StackVisitor::ComputeNumFrames, as runtime methods and transitions must not be
+ // counted.
+ art::StackVisitor::WalkStack(
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::ArtMethod* m = stack_visitor->GetMethod();
+ if (m != nullptr && !m->IsRuntimeMethod()) {
+ count++;
+ }
+ return true;
+ },
+ self,
+ /* context= */ nullptr,
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
}
size_t count;
@@ -743,46 +733,30 @@ jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED,
return ERR(NONE);
}
-// Walks up the stack 'n' callers, when used with Thread::WalkStack.
-struct GetLocationVisitor : public art::StackVisitor {
- GetLocationVisitor(art::Thread* thread, size_t n_in)
- : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- n(n_in),
- count(0),
- caller(nullptr),
- caller_dex_pc(0) {}
-
- bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
- art::ArtMethod* m = GetMethod();
- const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
- if (do_count) {
- DCHECK(caller == nullptr);
- if (count == n) {
- caller = m;
- caller_dex_pc = GetDexPc(false);
- return false;
- }
- count++;
- }
- return true;
- }
-
- const size_t n;
- size_t count;
- art::ArtMethod* caller;
- uint32_t caller_dex_pc;
-};
-
struct GetLocationClosure : public art::Closure {
public:
explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
- GetLocationVisitor visitor(self, n);
- visitor.WalkStack(false);
-
- method = visitor.caller;
- dex_pc = visitor.caller_dex_pc;
+ // Walks up the stack 'n' callers.
+ size_t count = 0u;
+ art::StackVisitor::WalkStack(
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::ArtMethod* m = stack_visitor->GetMethod();
+ if (m != nullptr && !m->IsRuntimeMethod()) {
+ DCHECK(method == nullptr);
+ if (count == n) {
+ method = m;
+ dex_pc = stack_visitor->GetDexPc(/*abort_on_failure=*/false);
+ return false;
+ }
+ count++;
+ }
+ return true;
+ },
+ self,
+ /* context= */ nullptr,
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
}
const size_t n;
diff --git a/openjdkjvmti/transform.cc b/openjdkjvmti/transform.cc
index 8bd10dafcb..27f04b795a 100644
--- a/openjdkjvmti/transform.cc
+++ b/openjdkjvmti/transform.cc
@@ -40,6 +40,7 @@
#include "art_method.h"
#include "base/array_ref.h"
#include "base/globals.h"
+#include "base/logging.h"
#include "base/mem_map.h"
#include "class_linker.h"
#include "dex/dex_file.h"
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index f2541160ff..c240017900 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -31,6 +31,7 @@
#include "dex/invoke_type.h"
#include "dex/primitive.h"
#include "gc_root-inl.h"
+#include "imtable-inl.h"
#include "intrinsics_enum.h"
#include "jit/profiling_info.h"
#include "mirror/class-inl.h"
@@ -421,6 +422,31 @@ inline CodeItemDebugInfoAccessor ArtMethod::DexInstructionDebugInfo() {
return CodeItemDebugInfoAccessor(*GetDexFile(), GetCodeItem(), GetDexMethodIndex());
}
+inline void ArtMethod::SetCounter(int16_t hotness_count) {
+ DCHECK(!IsAbstract()) << PrettyMethod();
+ hotness_count_ = hotness_count;
+}
+
+inline uint16_t ArtMethod::GetCounter() {
+ DCHECK(!IsAbstract()) << PrettyMethod();
+ return hotness_count_;
+}
+
+inline uint32_t ArtMethod::GetImtIndex() {
+ if (LIKELY(IsAbstract() && imt_index_ != 0)) {
+ uint16_t imt_index = ~imt_index_;
+ DCHECK_EQ(imt_index, ImTable::GetImtIndex(this)) << PrettyMethod();
+ return imt_index;
+ } else {
+ return ImTable::GetImtIndex(this);
+ }
+}
+
+inline void ArtMethod::CalculateAndSetImtIndex() {
+ DCHECK(IsAbstract()) << PrettyMethod();
+ imt_index_ = ~ImTable::GetImtIndex(this);
+}
+
} // namespace art
#endif // ART_RUNTIME_ART_METHOD_INL_H_
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 5bbee92c14..cc214f7ca3 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -650,24 +650,13 @@ class ArtMethod final {
void CopyFrom(ArtMethod* src, PointerSize image_pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Note, hotness_counter_ updates are non-atomic but it doesn't need to be precise. Also,
- // given that the counter is only 16 bits wide we can expect wrap-around in some
- // situations. Consumers of hotness_count_ must be able to deal with that.
- uint16_t IncrementCounter() {
- return ++hotness_count_;
- }
+ ALWAYS_INLINE void SetCounter(int16_t hotness_count) REQUIRES_SHARED(Locks::mutator_lock_);
- void ClearCounter() {
- hotness_count_ = 0;
- }
+ ALWAYS_INLINE uint16_t GetCounter() REQUIRES_SHARED(Locks::mutator_lock_);
- void SetCounter(int16_t hotness_count) {
- hotness_count_ = hotness_count;
- }
+ ALWAYS_INLINE uint32_t GetImtIndex() REQUIRES_SHARED(Locks::mutator_lock_);
- uint16_t GetCounter() const {
- return hotness_count_;
- }
+ void CalculateAndSetImtIndex() REQUIRES_SHARED(Locks::mutator_lock_);
static constexpr MemberOffset HotnessCountOffset() {
return MemberOffset(OFFSETOF_MEMBER(ArtMethod, hotness_count_));
@@ -772,9 +761,14 @@ class ArtMethod final {
// ifTable.
uint16_t method_index_;
- // The hotness we measure for this method. Not atomic, as we allow
- // missing increments: if the method is hot, we will see it eventually.
- uint16_t hotness_count_;
+ union {
+ // Non-abstract methods: The hotness we measure for this method. Not atomic,
+ // as we allow missing increments: if the method is hot, we will see it eventually.
+ uint16_t hotness_count_;
+ // Abstract methods: IMT index (bitwise negated) or zero if it was not cached.
+ // The negation is needed to distinguish zero index and missing cached entry.
+ uint16_t imt_index_;
+ };
// Fake padding field gets inserted here.
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 545754f662..991faa27d3 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -3251,7 +3251,7 @@ bool ClassLinker::ShouldUseInterpreterEntrypoint(ArtMethod* method, const void*
return (jit == nullptr) || !jit->GetCodeCache()->ContainsPc(quick_code);
}
- if (runtime->IsNativeDebuggableZygoteOK()) {
+ if (runtime->IsNativeDebuggable()) {
DCHECK(runtime->UseJitCompilation() && runtime->GetJit()->JitAtFirstUse());
// If we are doing native debugging, ignore application's AOT code,
// since we want to JIT it (at first use) with extra stackmaps for native
@@ -3634,6 +3634,10 @@ void ClassLinker::LoadMethod(const DexFile& dex_file,
dex_file, dst->GetClassDef(), dex_method_idx);
}
dst->SetAccessFlags(access_flags);
+ // Must be done after SetAccessFlags since IsAbstract depends on it.
+ if (klass->IsInterface() && dst->IsAbstract()) {
+ dst->CalculateAndSetImtIndex();
+ }
}
void ClassLinker::AppendToBootClassPath(Thread* self, const DexFile& dex_file) {
@@ -6723,7 +6727,7 @@ void ClassLinker::FillIMTFromIfTable(ObjPtr<mirror::IfTable> if_table,
// or interface methods in the IMT here they will not create extra conflicts since we compare
// names and signatures in SetIMTRef.
ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_);
- const uint32_t imt_index = ImTable::GetImtIndex(interface_method);
+ const uint32_t imt_index = interface_method->GetImtIndex();
// There is only any conflicts if all of the interface methods for an IMT slot don't have
// the same implementation method, keep track of this to avoid creating a conflict table in
@@ -6777,7 +6781,7 @@ void ClassLinker::FillIMTFromIfTable(ObjPtr<mirror::IfTable> if_table,
}
DCHECK(implementation_method != nullptr);
ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_);
- const uint32_t imt_index = ImTable::GetImtIndex(interface_method);
+ const uint32_t imt_index = interface_method->GetImtIndex();
if (!imt[imt_index]->IsRuntimeMethod() ||
imt[imt_index] == unimplemented_method ||
imt[imt_index] == imt_conflict_method) {
@@ -7703,7 +7707,7 @@ bool ClassLinker::LinkInterfaceMethods(
auto* interface_method = iftable->GetInterface(i)->GetVirtualMethod(j, image_pointer_size_);
MethodNameAndSignatureComparator interface_name_comparator(
interface_method->GetInterfaceMethodIfProxy(image_pointer_size_));
- uint32_t imt_index = ImTable::GetImtIndex(interface_method);
+ uint32_t imt_index = interface_method->GetImtIndex();
ArtMethod** imt_ptr = &out_imt[imt_index];
// For each method listed in the interface's method list, find the
// matching method in our class's method list. We want to favor the
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 3ad7fc92a2..4af97f0f35 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -2362,25 +2362,18 @@ void Dbg::GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>*
}
static int GetStackDepth(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_) {
- struct CountStackDepthVisitor : public StackVisitor {
- explicit CountStackDepthVisitor(Thread* thread_in)
- : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- depth(0) {}
-
- // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
- // annotalysis.
- bool VisitFrame() override NO_THREAD_SAFETY_ANALYSIS {
- if (!GetMethod()->IsRuntimeMethod()) {
- ++depth;
- }
- return true;
- }
- size_t depth;
- };
-
- CountStackDepthVisitor visitor(thread);
- visitor.WalkStack();
- return visitor.depth;
+ size_t depth = 0u;
+ StackVisitor::WalkStack(
+ [&depth](const StackVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!visitor->GetMethod()->IsRuntimeMethod()) {
+ ++depth;
+ }
+ return true;
+ },
+ thread,
+ /* context= */ nullptr,
+ StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+ return depth;
}
JDWP::JdwpError Dbg::GetThreadFrameCount(JDWP::ObjectId thread_id, size_t* result) {
@@ -2398,47 +2391,10 @@ JDWP::JdwpError Dbg::GetThreadFrameCount(JDWP::ObjectId thread_id, size_t* resul
return JDWP::ERR_NONE;
}
-JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame,
- size_t frame_count, JDWP::ExpandBuf* buf) {
- class GetFrameVisitor : public StackVisitor {
- public:
- GetFrameVisitor(Thread* thread, size_t start_frame_in, size_t frame_count_in,
- JDWP::ExpandBuf* buf_in)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- depth_(0),
- start_frame_(start_frame_in),
- frame_count_(frame_count_in),
- buf_(buf_in) {
- expandBufAdd4BE(buf_, frame_count_);
- }
-
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- if (GetMethod()->IsRuntimeMethod()) {
- return true; // The debugger can't do anything useful with a frame that has no Method*.
- }
- if (depth_ >= start_frame_ + frame_count_) {
- return false;
- }
- if (depth_ >= start_frame_) {
- JDWP::FrameId frame_id(GetFrameId());
- JDWP::JdwpLocation location;
- SetJdwpLocation(&location, GetMethod(), GetDexPc());
- VLOG(jdwp) << StringPrintf(" Frame %3zd: id=%3" PRIu64 " ", depth_, frame_id) << location;
- expandBufAdd8BE(buf_, frame_id);
- expandBufAddLocation(buf_, location);
- }
- ++depth_;
- return true;
- }
-
- private:
- size_t depth_;
- const size_t start_frame_;
- const size_t frame_count_;
- JDWP::ExpandBuf* buf_;
- };
-
+JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id,
+ const size_t start_frame,
+ const size_t frame_count,
+ JDWP::ExpandBuf* buf) {
ScopedObjectAccessUnchecked soa(Thread::Current());
JDWP::JdwpError error;
Thread* thread = DecodeThread(soa, thread_id, &error);
@@ -2448,8 +2404,34 @@ JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_fram
if (!IsSuspendedForDebugger(soa, thread)) {
return JDWP::ERR_THREAD_NOT_SUSPENDED;
}
- GetFrameVisitor visitor(thread, start_frame, frame_count, buf);
- visitor.WalkStack();
+
+ expandBufAdd4BE(buf, frame_count);
+
+ size_t depth = 0u;
+ StackVisitor::WalkStack(
+ [&](StackVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (visitor->GetMethod()->IsRuntimeMethod()) {
+ return true; // The debugger can't do anything useful with a frame that has no Method*.
+ }
+ if (depth >= start_frame + frame_count) {
+ return false;
+ }
+ if (depth >= start_frame) {
+ JDWP::FrameId frame_id(visitor->GetFrameId());
+ JDWP::JdwpLocation location;
+ SetJdwpLocation(&location, visitor->GetMethod(), visitor->GetDexPc());
+ VLOG(jdwp)
+ << StringPrintf(" Frame %3zd: id=%3" PRIu64 " ", depth, frame_id) << location;
+ expandBufAdd8BE(buf, frame_id);
+ expandBufAddLocation(buf, location);
+ }
+ ++depth;
+ return true;
+ },
+ thread,
+ /* context= */ nullptr,
+ StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+
return JDWP::ERR_NONE;
}
@@ -2530,28 +2512,6 @@ void Dbg::SuspendSelf() {
Runtime::Current()->GetThreadList()->SuspendSelfForDebugger();
}
-struct GetThisVisitor : public StackVisitor {
- GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id_in)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- this_object(nullptr),
- frame_id(frame_id_in) {}
-
- // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
- // annotalysis.
- bool VisitFrame() override NO_THREAD_SAFETY_ANALYSIS {
- if (frame_id != GetFrameId()) {
- return true; // continue
- } else {
- this_object = GetThisObject();
- return false;
- }
- }
-
- mirror::Object* this_object;
- JDWP::FrameId frame_id;
-};
-
JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id,
JDWP::ObjectId* result) {
ScopedObjectAccessUnchecked soa(Thread::Current());
@@ -2564,48 +2524,50 @@ JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame
return JDWP::ERR_THREAD_NOT_SUSPENDED;
}
std::unique_ptr<Context> context(Context::Create());
- GetThisVisitor visitor(thread, context.get(), frame_id);
- visitor.WalkStack();
- *result = gRegistry->Add(visitor.this_object);
+ mirror::Object* this_object = nullptr;
+ StackVisitor::WalkStack(
+ [&](art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (frame_id != stack_visitor->GetFrameId()) {
+ return true; // continue
+ } else {
+ this_object = stack_visitor->GetThisObject();
+ return false;
+ }
+ },
+ thread,
+ context.get(),
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+ *result = gRegistry->Add(this_object);
return JDWP::ERR_NONE;
}
-// Walks the stack until we find the frame with the given FrameId.
-class FindFrameVisitor final : public StackVisitor {
- public:
- FindFrameVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- frame_id_(frame_id),
- error_(JDWP::ERR_INVALID_FRAMEID) {}
-
- // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
- // annotalysis.
- bool VisitFrame() override NO_THREAD_SAFETY_ANALYSIS {
- if (GetFrameId() != frame_id_) {
- return true; // Not our frame, carry on.
- }
- ArtMethod* m = GetMethod();
- if (m->IsNative()) {
- // We can't read/write local value from/into native method.
- error_ = JDWP::ERR_OPAQUE_FRAME;
- } else {
- // We found our frame.
- error_ = JDWP::ERR_NONE;
- }
- return false;
- }
-
- JDWP::JdwpError GetError() const {
- return error_;
- }
-
- private:
- const JDWP::FrameId frame_id_;
- JDWP::JdwpError error_;
-
- DISALLOW_COPY_AND_ASSIGN(FindFrameVisitor);
-};
+template <typename FrameHandler>
+static JDWP::JdwpError FindAndHandleNonNativeFrame(Thread* thread,
+ JDWP::FrameId frame_id,
+ const FrameHandler& handler)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ JDWP::JdwpError result = JDWP::ERR_INVALID_FRAMEID;
+ std::unique_ptr<Context> context(Context::Create());
+ StackVisitor::WalkStack(
+ [&](art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (stack_visitor->GetFrameId() != frame_id) {
+ return true; // Not our frame, carry on.
+ }
+ ArtMethod* m = stack_visitor->GetMethod();
+ if (m->IsNative()) {
+ // We can't read/write local value from/into native method.
+ result = JDWP::ERR_OPAQUE_FRAME;
+ } else {
+ // We found our frame.
+ result = handler(stack_visitor);
+ }
+ return false;
+ },
+ thread,
+ context.get(),
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+ return result;
+}
JDWP::JdwpError Dbg::GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pReply) {
JDWP::ObjectId thread_id = request->ReadThreadId();
@@ -2620,31 +2582,29 @@ JDWP::JdwpError Dbg::GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pRe
if (!IsSuspendedForDebugger(soa, thread)) {
return JDWP::ERR_THREAD_NOT_SUSPENDED;
}
- // Find the frame with the given frame_id.
- std::unique_ptr<Context> context(Context::Create());
- FindFrameVisitor visitor(thread, context.get(), frame_id);
- visitor.WalkStack();
- if (visitor.GetError() != JDWP::ERR_NONE) {
- return visitor.GetError();
- }
- // Read the values from visitor's context.
- int32_t slot_count = request->ReadSigned32("slot count");
- expandBufAdd4BE(pReply, slot_count); /* "int values" */
- for (int32_t i = 0; i < slot_count; ++i) {
- uint32_t slot = request->ReadUnsigned32("slot");
- JDWP::JdwpTag reqSigByte = request->ReadTag();
+ return FindAndHandleNonNativeFrame(
+ thread,
+ frame_id,
+ [&](art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Read the values from visitor's context.
+ int32_t slot_count = request->ReadSigned32("slot count");
+ expandBufAdd4BE(pReply, slot_count); /* "int values" */
+ for (int32_t i = 0; i < slot_count; ++i) {
+ uint32_t slot = request->ReadUnsigned32("slot");
+ JDWP::JdwpTag reqSigByte = request->ReadTag();
- VLOG(jdwp) << " --> slot " << slot << " " << reqSigByte;
+ VLOG(jdwp) << " --> slot " << slot << " " << reqSigByte;
- size_t width = Dbg::GetTagWidth(reqSigByte);
- uint8_t* ptr = expandBufAddSpace(pReply, width + 1);
- error = Dbg::GetLocalValue(visitor, soa, slot, reqSigByte, ptr, width);
- if (error != JDWP::ERR_NONE) {
- return error;
- }
- }
- return JDWP::ERR_NONE;
+ size_t width = Dbg::GetTagWidth(reqSigByte);
+ uint8_t* ptr = expandBufAddSpace(pReply, width + 1);
+ error = Dbg::GetLocalValue(*stack_visitor, soa, slot, reqSigByte, ptr, width);
+ if (error != JDWP::ERR_NONE) {
+ return error;
+ }
+ }
+ return JDWP::ERR_NONE;
+ });
}
constexpr JDWP::JdwpError kStackFrameLocalAccessError = JDWP::ERR_ABSENT_INFORMATION;
@@ -2791,29 +2751,27 @@ JDWP::JdwpError Dbg::SetLocalValues(JDWP::Request* request) {
if (!IsSuspendedForDebugger(soa, thread)) {
return JDWP::ERR_THREAD_NOT_SUSPENDED;
}
- // Find the frame with the given frame_id.
- std::unique_ptr<Context> context(Context::Create());
- FindFrameVisitor visitor(thread, context.get(), frame_id);
- visitor.WalkStack();
- if (visitor.GetError() != JDWP::ERR_NONE) {
- return visitor.GetError();
- }
-
- // Writes the values into visitor's context.
- int32_t slot_count = request->ReadSigned32("slot count");
- for (int32_t i = 0; i < slot_count; ++i) {
- uint32_t slot = request->ReadUnsigned32("slot");
- JDWP::JdwpTag sigByte = request->ReadTag();
- size_t width = Dbg::GetTagWidth(sigByte);
- uint64_t value = request->ReadValue(width);
- VLOG(jdwp) << " --> slot " << slot << " " << sigByte << " " << value;
- error = Dbg::SetLocalValue(thread, visitor, slot, sigByte, value, width);
- if (error != JDWP::ERR_NONE) {
- return error;
- }
- }
- return JDWP::ERR_NONE;
+ return FindAndHandleNonNativeFrame(
+ thread,
+ frame_id,
+ [&](art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Writes the values into visitor's context.
+ int32_t slot_count = request->ReadSigned32("slot count");
+ for (int32_t i = 0; i < slot_count; ++i) {
+ uint32_t slot = request->ReadUnsigned32("slot");
+ JDWP::JdwpTag sigByte = request->ReadTag();
+ size_t width = Dbg::GetTagWidth(sigByte);
+ uint64_t value = request->ReadValue(width);
+
+ VLOG(jdwp) << " --> slot " << slot << " " << sigByte << " " << value;
+ error = Dbg::SetLocalValue(thread, *stack_visitor, slot, sigByte, value, width);
+ if (error != JDWP::ERR_NONE) {
+ return error;
+ }
+ }
+ return JDWP::ERR_NONE;
+ });
}
template<typename T>
@@ -2985,107 +2943,71 @@ void Dbg::PostFieldModificationEvent(ArtMethod* m, int dex_pc,
gJdwpState->PostFieldEvent(&location, f, this_object, field_value, true);
}
-/**
- * Finds the location where this exception will be caught. We search until we reach the top
- * frame, in which case this exception is considered uncaught.
- */
-class CatchLocationFinder : public StackVisitor {
- public:
- CatchLocationFinder(Thread* self, const Handle<mirror::Throwable>& exception, Context* context)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- exception_(exception),
- handle_scope_(self),
- this_at_throw_(handle_scope_.NewHandle<mirror::Object>(nullptr)),
- catch_method_(nullptr),
- throw_method_(nullptr),
- catch_dex_pc_(dex::kDexNoIndex),
- throw_dex_pc_(dex::kDexNoIndex) {
- }
-
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* method = GetMethod();
- DCHECK(method != nullptr);
- if (method->IsRuntimeMethod()) {
- // Ignore callee save method.
- DCHECK(method->IsCalleeSaveMethod());
- return true;
- }
-
- uint32_t dex_pc = GetDexPc();
- if (throw_method_ == nullptr) {
- // First Java method found. It is either the method that threw the exception,
- // or the Java native method that is reporting an exception thrown by
- // native code.
- this_at_throw_.Assign(GetThisObject());
- throw_method_ = method;
- throw_dex_pc_ = dex_pc;
- }
-
- if (dex_pc != dex::kDexNoIndex) {
- StackHandleScope<1> hs(GetThread());
- uint32_t found_dex_pc;
- Handle<mirror::Class> exception_class(hs.NewHandle(exception_->GetClass()));
- bool unused_clear_exception;
- found_dex_pc = method->FindCatchBlock(exception_class, dex_pc, &unused_clear_exception);
- if (found_dex_pc != dex::kDexNoIndex) {
- catch_method_ = method;
- catch_dex_pc_ = found_dex_pc;
- return false; // End stack walk.
- }
- }
- return true; // Continue stack walk.
- }
-
- ArtMethod* GetCatchMethod() REQUIRES_SHARED(Locks::mutator_lock_) {
- return catch_method_;
- }
-
- ArtMethod* GetThrowMethod() REQUIRES_SHARED(Locks::mutator_lock_) {
- return throw_method_;
- }
-
- mirror::Object* GetThisAtThrow() REQUIRES_SHARED(Locks::mutator_lock_) {
- return this_at_throw_.Get();
- }
-
- uint32_t GetCatchDexPc() const {
- return catch_dex_pc_;
- }
-
- uint32_t GetThrowDexPc() const {
- return throw_dex_pc_;
- }
-
- private:
- const Handle<mirror::Throwable>& exception_;
- StackHandleScope<1> handle_scope_;
- MutableHandle<mirror::Object> this_at_throw_;
- ArtMethod* catch_method_;
- ArtMethod* throw_method_;
- uint32_t catch_dex_pc_;
- uint32_t throw_dex_pc_;
-
- DISALLOW_COPY_AND_ASSIGN(CatchLocationFinder);
-};
-
void Dbg::PostException(mirror::Throwable* exception_object) {
if (!IsDebuggerActive()) {
return;
}
Thread* const self = Thread::Current();
- StackHandleScope<1> handle_scope(self);
+ StackHandleScope<2> handle_scope(self);
Handle<mirror::Throwable> h_exception(handle_scope.NewHandle(exception_object));
+ MutableHandle<mirror::Object> this_at_throw = handle_scope.NewHandle<mirror::Object>(nullptr);
std::unique_ptr<Context> context(Context::Create());
- CatchLocationFinder clf(self, h_exception, context.get());
- clf.WalkStack(/* include_transitions= */ false);
+
+ ArtMethod* catch_method = nullptr;
+ ArtMethod* throw_method = nullptr;
+ uint32_t catch_dex_pc = dex::kDexNoIndex;
+ uint32_t throw_dex_pc = dex::kDexNoIndex;
+ StackVisitor::WalkStack(
+ /**
+ * Finds the location where this exception will be caught. We search until we reach the top
+ * frame, in which case this exception is considered uncaught.
+ */
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* method = stack_visitor->GetMethod();
+ DCHECK(method != nullptr);
+ if (method->IsRuntimeMethod()) {
+ // Ignore callee save method.
+ DCHECK(method->IsCalleeSaveMethod());
+ return true;
+ }
+
+ uint32_t dex_pc = stack_visitor->GetDexPc();
+ if (throw_method == nullptr) {
+ // First Java method found. It is either the method that threw the exception,
+ // or the Java native method that is reporting an exception thrown by
+ // native code.
+ this_at_throw.Assign(stack_visitor->GetThisObject());
+ throw_method = method;
+ throw_dex_pc = dex_pc;
+ }
+
+ if (dex_pc != dex::kDexNoIndex) {
+ StackHandleScope<1> hs(stack_visitor->GetThread());
+ uint32_t found_dex_pc;
+ Handle<mirror::Class> exception_class(hs.NewHandle(h_exception->GetClass()));
+ bool unused_clear_exception;
+ found_dex_pc = method->FindCatchBlock(exception_class, dex_pc, &unused_clear_exception);
+ if (found_dex_pc != dex::kDexNoIndex) {
+ catch_method = method;
+ catch_dex_pc = found_dex_pc;
+ return false; // End stack walk.
+ }
+ }
+ return true; // Continue stack walk.
+ },
+ self,
+ context.get(),
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+
JDWP::EventLocation exception_throw_location;
- SetEventLocation(&exception_throw_location, clf.GetThrowMethod(), clf.GetThrowDexPc());
+ SetEventLocation(&exception_throw_location, throw_method, throw_dex_pc);
JDWP::EventLocation exception_catch_location;
- SetEventLocation(&exception_catch_location, clf.GetCatchMethod(), clf.GetCatchDexPc());
+ SetEventLocation(&exception_catch_location, catch_method, catch_dex_pc);
- gJdwpState->PostException(&exception_throw_location, h_exception.Get(), &exception_catch_location,
- clf.GetThisAtThrow());
+ gJdwpState->PostException(&exception_throw_location,
+ h_exception.Get(),
+ &exception_catch_location,
+ this_at_throw.Get());
}
void Dbg::PostClassPrepare(mirror::Class* c) {
@@ -3649,56 +3571,6 @@ bool Dbg::IsForcedInterpreterNeededForUpcallImpl(Thread* thread, ArtMethod* m) {
return instrumentation->IsDeoptimized(m);
}
-class NeedsDeoptimizationVisitor : public StackVisitor {
- public:
- explicit NeedsDeoptimizationVisitor(Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- needs_deoptimization_(false) {}
-
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- // The visitor is meant to be used when handling exception from compiled code only.
- CHECK(!IsShadowFrame()) << "We only expect to visit compiled frame: "
- << ArtMethod::PrettyMethod(GetMethod());
- ArtMethod* method = GetMethod();
- if (method == nullptr) {
- // We reach an upcall and don't need to deoptimize this part of the stack (ManagedFragment)
- // so we can stop the visit.
- DCHECK(!needs_deoptimization_);
- return false;
- }
- if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) {
- // We found a compiled frame in the stack but instrumentation is set to interpret
- // everything: we need to deoptimize.
- needs_deoptimization_ = true;
- return false;
- }
- if (Runtime::Current()->GetInstrumentation()->IsDeoptimized(method)) {
- // We found a deoptimized method in the stack.
- needs_deoptimization_ = true;
- return false;
- }
- ShadowFrame* frame = GetThread()->FindDebuggerShadowFrame(GetFrameId());
- if (frame != nullptr) {
- // The debugger allocated a ShadowFrame to update a variable in the stack: we need to
- // deoptimize the stack to execute (and deallocate) this frame.
- needs_deoptimization_ = true;
- return false;
- }
- return true;
- }
-
- bool NeedsDeoptimization() const {
- return needs_deoptimization_;
- }
-
- private:
- // Do we need to deoptimize the stack?
- bool needs_deoptimization_;
-
- DISALLOW_COPY_AND_ASSIGN(NeedsDeoptimizationVisitor);
-};
-
// Do we need to deoptimize the stack to handle an exception?
bool Dbg::IsForcedInterpreterNeededForExceptionImpl(Thread* thread) {
const SingleStepControl* const ssc = thread->GetSingleStepControl();
@@ -3708,9 +3580,45 @@ bool Dbg::IsForcedInterpreterNeededForExceptionImpl(Thread* thread) {
}
// Deoptimization is required if at least one method in the stack needs it. However we
// skip frames that will be unwound (thus not executed).
- NeedsDeoptimizationVisitor visitor(thread);
- visitor.WalkStack(true); // includes upcall.
- return visitor.NeedsDeoptimization();
+ bool needs_deoptimization = false;
+ StackVisitor::WalkStack(
+ [&](art::StackVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ // The visitor is meant to be used when handling exception from compiled code only.
+ CHECK(!visitor->IsShadowFrame()) << "We only expect to visit compiled frame: "
+ << ArtMethod::PrettyMethod(visitor->GetMethod());
+ ArtMethod* method = visitor->GetMethod();
+ if (method == nullptr) {
+ // We reach an upcall and don't need to deoptimize this part of the stack (ManagedFragment)
+ // so we can stop the visit.
+ DCHECK(!needs_deoptimization);
+ return false;
+ }
+ if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) {
+ // We found a compiled frame in the stack but instrumentation is set to interpret
+ // everything: we need to deoptimize.
+ needs_deoptimization = true;
+ return false;
+ }
+ if (Runtime::Current()->GetInstrumentation()->IsDeoptimized(method)) {
+ // We found a deoptimized method in the stack.
+ needs_deoptimization = true;
+ return false;
+ }
+ ShadowFrame* frame = visitor->GetThread()->FindDebuggerShadowFrame(visitor->GetFrameId());
+ if (frame != nullptr) {
+ // The debugger allocated a ShadowFrame to update a variable in the stack: we need to
+ // deoptimize the stack to execute (and deallocate) this frame.
+ needs_deoptimization = true;
+ return false;
+ }
+ return true;
+ },
+ thread,
+ /* context= */ nullptr,
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames,
+ /* check_suspended */ true,
+ /* include_transitions */ true);
+ return needs_deoptimization;
}
// Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 2236e61d75..a18cca4cb2 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -533,13 +533,7 @@ ALWAYS_INLINE ArtMethod* FindMethodToCall(uint32_t method_idx,
UNREACHABLE();
}
case kInterface: {
- size_t imt_index;
- InterpreterCache* tls_cache = self->GetInterpreterCache();
- if (UNLIKELY(!tls_cache->Get(resolved_method, &imt_index))) {
- imt_index = ImTable::GetImtIndex(resolved_method);
- tls_cache->Set(resolved_method, imt_index);
- }
- DCHECK_EQ(imt_index, ImTable::GetImtIndex(resolved_method));
+ size_t imt_index = resolved_method->GetImtIndex();
PointerSize pointer_size = class_linker->GetImagePointerSize();
ObjPtr<mirror::Class> klass = (*this_object)->GetClass();
ArtMethod* imt_method = klass->GetImt(pointer_size)->Get(imt_index, pointer_size);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 147249000f..b6adcf070d 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -2671,7 +2671,7 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(ArtMethod* interface_metho
DCHECK(!interface_method->IsRuntimeMethod());
// Look whether we have a match in the ImtConflictTable.
- uint32_t imt_index = ImTable::GetImtIndex(interface_method);
+ uint32_t imt_index = interface_method->GetImtIndex();
ArtMethod* conflict_method = imt->Get(imt_index, kRuntimePointerSize);
if (LIKELY(conflict_method->IsRuntimeMethod())) {
ImtConflictTable* current_table = conflict_method->GetImtConflictTable(kRuntimePointerSize);
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index e11fa5c212..80e3394f86 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -184,34 +184,6 @@ void AllocRecordObjectMap::BroadcastForNewAllocationRecords() {
new_record_condition_.Broadcast(Thread::Current());
}
-class AllocRecordStackVisitor : public StackVisitor {
- public:
- AllocRecordStackVisitor(Thread* thread, size_t max_depth, AllocRecordStackTrace* trace_out)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- max_depth_(max_depth),
- trace_(trace_out) {}
-
- // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
- // annotalysis.
- bool VisitFrame() override NO_THREAD_SAFETY_ANALYSIS {
- if (trace_->GetDepth() >= max_depth_) {
- return false;
- }
- ArtMethod* m = GetMethod();
- // m may be null if we have inlined methods of unresolved classes. b/27858645
- if (m != nullptr && !m->IsRuntimeMethod()) {
- m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
- trace_->AddStackElement(AllocRecordStackTraceElement(m, GetDexPc()));
- }
- return true;
- }
-
- private:
- const size_t max_depth_;
- AllocRecordStackTrace* const trace_;
-};
-
void AllocRecordObjectMap::SetAllocTrackingEnabled(bool enable) {
Thread* self = Thread::Current();
Heap* heap = Runtime::Current()->GetHeap();
@@ -268,11 +240,26 @@ void AllocRecordObjectMap::RecordAllocation(Thread* self,
// Get stack trace outside of lock in case there are allocations during the stack walk.
// b/27858645.
AllocRecordStackTrace trace;
- AllocRecordStackVisitor visitor(self, max_stack_depth_, /*out*/ &trace);
{
StackHandleScope<1> hs(self);
auto obj_wrapper = hs.NewHandleWrapper(obj);
- visitor.WalkStack();
+
+ StackVisitor::WalkStack(
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (trace.GetDepth() >= max_stack_depth_) {
+ return false;
+ }
+ ArtMethod* m = stack_visitor->GetMethod();
+ // m may be null if we have inlined methods of unresolved classes. b/27858645
+ if (m != nullptr && !m->IsRuntimeMethod()) {
+ m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
+ trace.AddStackElement(AllocRecordStackTraceElement(m, stack_visitor->GetDexPc()));
+ }
+ return true;
+ },
+ self,
+ /* context= */ nullptr,
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
}
MutexLock mu(self, *Locks::alloc_tracker_lock_);
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 2774e26acd..21cae9371f 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -17,6 +17,7 @@
#include "bump_pointer_space-inl.h"
#include "bump_pointer_space.h"
#include "base/dumpable.h"
+#include "base/logging.h"
#include "gc/accounting/read_barrier_table.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
diff --git a/runtime/gc/verification.cc b/runtime/gc/verification.cc
index 47c54bd189..8670a22431 100644
--- a/runtime/gc/verification.cc
+++ b/runtime/gc/verification.cc
@@ -21,6 +21,7 @@
#include "art_field-inl.h"
#include "base/file_utils.h"
+#include "base/logging.h"
#include "mirror/class-inl.h"
#include "mirror/object-refvisitor-inl.h"
diff --git a/runtime/image.cc b/runtime/image.cc
index 3023cefd66..f50c39c3d5 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -26,7 +26,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '6', '9', '\0' }; // Remove boot oat extents.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '7', '0', '\0' }; // Store ImtIndex.
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index cbcaaef260..12f1522386 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -18,6 +18,8 @@
#include <sstream>
+#include <android-base/logging.h>
+
#include "arch/context.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
@@ -1355,65 +1357,66 @@ DeoptimizationMethodType Instrumentation::GetDeoptimizationMethodType(ArtMethod*
}
// Try to get the shorty of a runtime method if it's an invocation stub.
-struct RuntimeMethodShortyVisitor : public StackVisitor {
- explicit RuntimeMethodShortyVisitor(Thread* thread)
- : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- shorty('V') {}
-
- static uint16_t GetMethodIndexOfInvoke(ArtMethod* caller,
- const Instruction& inst,
- uint32_t dex_pc)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- switch (inst.Opcode()) {
- case Instruction::INVOKE_VIRTUAL_RANGE_QUICK:
- case Instruction::INVOKE_VIRTUAL_QUICK: {
- uint16_t method_idx = caller->GetIndexFromQuickening(dex_pc);
- CHECK_NE(method_idx, DexFile::kDexNoIndex16);
- return method_idx;
- }
- default: {
- return inst.VRegB();
- }
- }
- }
-
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = GetMethod();
- if (m == nullptr || m->IsRuntimeMethod()) {
- return true;
- }
- // The first Java method.
- if (m->IsNative()) {
- // Use JNI method's shorty for the jni stub.
- shorty = m->GetShorty()[0];
- } else if (m->IsProxyMethod()) {
- // Proxy method just invokes its proxied method via
- // art_quick_proxy_invoke_handler.
- shorty = m->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty()[0];
- } else {
- const Instruction& instr = m->DexInstructions().InstructionAt(GetDexPc());
- if (instr.IsInvoke()) {
- uint16_t method_index = GetMethodIndexOfInvoke(m, instr, GetDexPc());
- const DexFile* dex_file = m->GetDexFile();
- if (interpreter::IsStringInit(dex_file, method_index)) {
- // Invoking string init constructor is turned into invoking
- // StringFactory.newStringFromChars() which returns a string.
- shorty = 'L';
- } else {
- shorty = dex_file->GetMethodShorty(method_index)[0];
+static char GetRuntimeMethodShorty(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_) {
+ char shorty = 'V';
+ StackVisitor::WalkStack(
+ [&shorty](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* m = stack_visitor->GetMethod();
+ if (m == nullptr || m->IsRuntimeMethod()) {
+ return true;
}
- } else {
- // It could be that a non-invoke opcode invokes a stub, which in turn
- // invokes Java code. In such cases, we should never expect a return
- // value from the stub.
- }
- }
- // Stop stack walking since we've seen a Java frame.
- return false;
- }
+ // The first Java method.
+ if (m->IsNative()) {
+ // Use JNI method's shorty for the jni stub.
+ shorty = m->GetShorty()[0];
+ } else if (m->IsProxyMethod()) {
+ // Proxy method just invokes its proxied method via
+ // art_quick_proxy_invoke_handler.
+ shorty = m->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty()[0];
+ } else {
+ const Instruction& instr = m->DexInstructions().InstructionAt(stack_visitor->GetDexPc());
+ if (instr.IsInvoke()) {
+ auto get_method_index_fn = [](ArtMethod* caller,
+ const Instruction& inst,
+ uint32_t dex_pc)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ switch (inst.Opcode()) {
+ case Instruction::INVOKE_VIRTUAL_RANGE_QUICK:
+ case Instruction::INVOKE_VIRTUAL_QUICK: {
+ uint16_t method_idx = caller->GetIndexFromQuickening(dex_pc);
+ CHECK_NE(method_idx, DexFile::kDexNoIndex16);
+ return method_idx;
+ }
+ default: {
+ return static_cast<uint16_t>(inst.VRegB());
+ }
+ }
+ };
+
+ uint16_t method_index = get_method_index_fn(m, instr, stack_visitor->GetDexPc());
+ const DexFile* dex_file = m->GetDexFile();
+ if (interpreter::IsStringInit(dex_file, method_index)) {
+ // Invoking string init constructor is turned into invoking
+ // StringFactory.newStringFromChars() which returns a string.
+ shorty = 'L';
+ } else {
+ shorty = dex_file->GetMethodShorty(method_index)[0];
+ }
- char shorty;
-};
+ } else {
+ // It could be that a non-invoke opcode invokes a stub, which in turn
+ // invokes Java code. In such cases, we should never expect a return
+ // value from the stub.
+ }
+ }
+ // Stop stack walking since we've seen a Java frame.
+ return false;
+ },
+ thread,
+ /* context= */ nullptr,
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+ return shorty;
+}
TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self,
uintptr_t* return_pc,
@@ -1447,9 +1450,7 @@ TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self,
// for clinit, we need to pass return results to the caller.
// We need the correct shorty to decide whether we need to pass the return
// result for deoptimization below.
- RuntimeMethodShortyVisitor visitor(self);
- visitor.WalkStack();
- return_shorty = visitor.shorty;
+ return_shorty = GetRuntimeMethodShorty(self);
} else {
// Some runtime methods such as allocations, unresolved field getters, etc.
// have return value. We don't need to set return_value since MethodExitEvent()
diff --git a/runtime/interpreter/interpreter_cache.h b/runtime/interpreter/interpreter_cache.h
index 355058f4f6..003ea6c8d3 100644
--- a/runtime/interpreter/interpreter_cache.h
+++ b/runtime/interpreter/interpreter_cache.h
@@ -38,7 +38,6 @@ class Thread;
// iget/iput: The field offset. The field must be non-volatile.
// sget/sput: The ArtField* pointer. The field must be non-volitile.
// invoke: The ArtMethod* pointer (before vtable indirection, etc).
-// ArtMethod*: The ImtIndex of the method.
//
// We ensure consistency of the cache by clearing it
// whenever any dex file is unloaded.
diff --git a/runtime/interpreter/mterp/arm/arithmetic.S b/runtime/interpreter/mterp/arm/arithmetic.S
index 7a373c7e3a..a6ba454882 100644
--- a/runtime/interpreter/mterp/arm/arithmetic.S
+++ b/runtime/interpreter/mterp/arm/arithmetic.S
@@ -157,8 +157,8 @@
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ GET_VREG_WIDE_BY_ADDR r0, r1, r2 @ r0/r1<- vBB/vBB+1
+ GET_VREG_WIDE_BY_ADDR r2, r3, r3 @ r2/r3<- vCC/vCC+1
.if $chkzero
orrs ip, r2, r3 @ second arg (r2-r3) is zero?
beq common_errDivideByZero
@@ -168,7 +168,7 @@
$preinstr @ optional op; may set condition codes
$instr @ result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {$result0,$result1} @ vAA/vAA+1<- $result0/$result1
+ SET_VREG_WIDE_BY_ADDR $result0,$result1,r9 @ vAA/vAA+1<, $result0/$result1
GOTO_OPCODE ip @ jump to next instruction
/* 14-17 instructions */
@@ -192,8 +192,8 @@
ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ GET_VREG_WIDE_BY_ADDR r2, r3, r1 @ r2/r3<- vBB/vBB+1
+ GET_VREG_WIDE_BY_ADDR r0, r1, r9 @ r0/r1<- vAA/vAA+1
.if $chkzero
orrs ip, r2, r3 @ second arg (r2-r3) is zero?
beq common_errDivideByZero
@@ -203,7 +203,7 @@
$preinstr @ optional op; may set condition codes
$instr @ result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {$result0,$result1} @ vAA/vAA+1<- $result0/$result1
+ SET_VREG_WIDE_BY_ADDR $result0,$result1,r9 @ vAA/vAA+1<- $result0/$result1
GOTO_OPCODE ip @ jump to next instruction
/* 12-15 instructions */
@@ -243,7 +243,7 @@
mov r3, rINST, lsr #12 @ r3<- B
ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1
+ GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- vB/vB+1
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$preinstr @ optional op; may set condition codes
$instr @ r0<- op, r0-r3 changed
@@ -265,13 +265,13 @@
ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r3, {r0-r1} @ r0/r1<- vAA
+ GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- vAA
CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$preinstr @ optional op; may set condition codes
$instr @ r0/r1<- op, r2-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r9 @ vAA<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
/* 10-11 instructions */
@@ -293,7 +293,7 @@
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$instr @ r0<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r9 @ vA/vA+1<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
/* 9-10 instructions */
@@ -345,8 +345,8 @@
mov r3, r0, lsr #8 @ r3<- CC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ GET_VREG_WIDE_BY_ADDR r0, r1, r2 @ r0/r1<- vBB/vBB+1
+ GET_VREG_WIDE_BY_ADDR r2, r3, r3 @ r2/r3<- vCC/vCC+1
cmp r0, r2
sbcs ip, r1, r3 @ Sets correct CCs for checking LT (but not EQ/NE)
mov ip, #0
@@ -541,8 +541,8 @@
mov r3, r0, lsr #8 @ r3<- CC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ GET_VREG_WIDE_BY_ADDR r0, r1, r2 @ r0/r1<- vBB/vBB+1
+ GET_VREG_WIDE_BY_ADDR r2, r3, r3 @ r2/r3<- vCC/vCC+1
mul ip, r2, r1 @ ip<- ZxW
umull r1, lr, r2, r0 @ r1/lr <- ZxX
mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
@@ -552,7 +552,7 @@
VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[AA]
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r0, {r1-r2 } @ vAA/vAA+1<- r1/r2
+ SET_VREG_WIDE_BY_ADDR r1, r2 , r0 @ vAA/vAA+1<- r1/r2
GOTO_OPCODE ip @ jump to next instruction
%def op_mul_long_2addr():
@@ -569,8 +569,8 @@
ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
VREG_INDEX_TO_ADDR rINST, r9 @ rINST<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
- ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1
+ GET_VREG_WIDE_BY_ADDR r2, r3, r1 @ r2/r3<- vBB/vBB+1
+ GET_VREG_WIDE_BY_ADDR r0, r1, rINST @ r0/r1<- vAA/vAA+1
mul ip, r2, r1 @ ip<- ZxW
umull r1, lr, r2, r0 @ r1/lr <- ZxX
mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
@@ -578,7 +578,7 @@
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
add r2, r2, lr @ r2<- r2 + low(ZxW + (YxX))
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r0, {r1-r2} @ vAA/vAA+1<- r1/r2
+ SET_VREG_WIDE_BY_ADDR r1, r2, r0 @ vAA/vAA+1<- r1/r2
GOTO_OPCODE ip @ jump to next instruction
%def op_neg_int():
@@ -781,7 +781,7 @@
mov r0, r0, lsr #8 @ r0<- CC
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
GET_VREG r2, r0 @ r2<- vCC
- ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- vBB/vBB+1
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
and r2, r2, #63 @ r2<- r2 & 0x3f
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
@@ -793,7 +793,7 @@
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
mov r0, r0, asl r2 @ r0<- r0 << r2
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r9 @ vAA/vAA+1<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
%def op_shl_long_2addr():
@@ -808,7 +808,7 @@
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
and r2, r2, #63 @ r2<- r2 & 0x3f
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ GET_VREG_WIDE_BY_ADDR r0, r1, r9 @ r0/r1<- vAA/vAA+1
mov r1, r1, asl r2 @ r1<- r1 << r2
rsb r3, r2, #32 @ r3<- 32 - r2
orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
@@ -817,7 +817,7 @@
movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
mov r0, r0, asl r2 @ r0<- r0 << r2
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r9 @ vAA/vAA+1<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
%def op_shr_int():
@@ -843,7 +843,7 @@
mov r0, r0, lsr #8 @ r0<- CC
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
GET_VREG r2, r0 @ r2<- vCC
- ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- vBB/vBB+1
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
and r2, r2, #63 @ r0<- r0 & 0x3f
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
@@ -855,7 +855,7 @@
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
mov r1, r1, asr r2 @ r1<- r1 >> r2
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r9 @ vAA/vAA+1<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
%def op_shr_long_2addr():
@@ -870,7 +870,7 @@
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
and r2, r2, #63 @ r2<- r2 & 0x3f
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ GET_VREG_WIDE_BY_ADDR r0, r1, r9 @ r0/r1<- vAA/vAA+1
mov r0, r0, lsr r2 @ r0<- r2 >> r2
rsb r3, r2, #32 @ r3<- 32 - r2
orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
@@ -879,7 +879,7 @@
movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
mov r1, r1, asr r2 @ r1<- r1 >> r2
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r9 @ vAA/vAA+1<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
%def op_sub_int():
@@ -917,7 +917,7 @@
mov r0, r0, lsr #8 @ r0<- CC
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
GET_VREG r2, r0 @ r2<- vCC
- ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- vBB/vBB+1
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
and r2, r2, #63 @ r0<- r0 & 0x3f
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
@@ -929,7 +929,7 @@
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
mov r1, r1, lsr r2 @ r1<- r1 >>> r2
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r9 @ vAA/vAA+1<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
%def op_ushr_long_2addr():
@@ -944,7 +944,7 @@
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
and r2, r2, #63 @ r2<- r2 & 0x3f
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ GET_VREG_WIDE_BY_ADDR r0, r1, r9 @ r0/r1<- vAA/vAA+1
mov r0, r0, lsr r2 @ r0<- r2 >> r2
rsb r3, r2, #32 @ r3<- 32 - r2
orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
@@ -953,7 +953,7 @@
movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
mov r1, r1, lsr r2 @ r1<- r1 >>> r2
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r9 @ vAA/vAA+1<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
%def op_xor_int():
diff --git a/runtime/interpreter/mterp/arm/array.S b/runtime/interpreter/mterp/arm/array.S
index 88d89c53a2..7b3db6165b 100644
--- a/runtime/interpreter/mterp/arm/array.S
+++ b/runtime/interpreter/mterp/arm/array.S
@@ -87,7 +87,7 @@
ldrd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3
+ SET_VREG_WIDE_BY_ADDR r2, r3, r9 @ vAA/vAA+1<- r2/r3
GOTO_OPCODE ip @ jump to next instruction
%def op_aput(store="str", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
@@ -169,7 +169,7 @@
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
bcs common_errArrayIndex @ index >= length, bail
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1
+ GET_VREG_WIDE_BY_ADDR r2, r3, r9 @ r2/r3<- vAA/vAA+1
GET_INST_OPCODE ip @ extract opcode from rINST
strd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/control_flow.S b/runtime/interpreter/mterp/arm/control_flow.S
index 51832e10b5..2299ef9332 100644
--- a/runtime/interpreter/mterp/arm/control_flow.S
+++ b/runtime/interpreter/mterp/arm/control_flow.S
@@ -189,7 +189,7 @@
blne MterpSuspendCheck @ (self)
mov r2, rINST, lsr #8 @ r2<- AA
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[AA]
- ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1
+ GET_VREG_WIDE_BY_ADDR r0, r1, r2 @ r0/r1 <- vAA/vAA+1
b MterpReturn
%def op_sparse_switch():
diff --git a/runtime/interpreter/mterp/arm/floating_point.S b/runtime/interpreter/mterp/arm/floating_point.S
index 21c386eb6a..9e4d00cb9a 100644
--- a/runtime/interpreter/mterp/arm/floating_point.S
+++ b/runtime/interpreter/mterp/arm/floating_point.S
@@ -13,8 +13,8 @@
and r2, r0, #255 @ r2<- BB
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- flds s1, [r3] @ s1<- vCC
- flds s0, [r2] @ s0<- vBB
+ GET_VREG_FLOAT_BY_ADDR s1, r3 @ s1<- vCC
+ GET_VREG_FLOAT_BY_ADDR s0, r2 @ s0<- vBB
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
$instr @ s2<- op
@@ -35,12 +35,12 @@
ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- flds s1, [r3] @ s1<- vB
+ GET_VREG_FLOAT_BY_ADDR s1, r3 @ s1<- vB
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- flds s0, [r9] @ s0<- vA
+ GET_VREG_FLOAT_BY_ADDR s0, r9 @ s0<- vA
$instr @ s2<- op
GET_INST_OPCODE ip @ extract opcode from rINST
- fsts s2, [r9] @ vAA<- s2 No need to clear as it's 2addr
+ SET_VREG_FLOAT_BY_ADDR s2, r9 @ vAA<- s2 No need to clear as it's 2addr
GOTO_OPCODE ip @ jump to next instruction
%def fbinopWide(instr=""):
@@ -58,14 +58,14 @@
and r2, r0, #255 @ r2<- BB
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- fldd d1, [r3] @ d1<- vCC
- fldd d0, [r2] @ d0<- vBB
+ GET_VREG_DOUBLE_BY_ADDR d1, r3 @ d1<- vCC
+ GET_VREG_DOUBLE_BY_ADDR d0, r2 @ d0<- vBB
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
$instr @ s2<- op
CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
GET_INST_OPCODE ip @ extract opcode from rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
- fstd d2, [r9] @ vAA<- d2
+ SET_VREG_DOUBLE_BY_ADDR d2, r9 @ vAA<- d2
GOTO_OPCODE ip @ jump to next instruction
%def fbinopWide2addr(instr=""):
@@ -82,13 +82,13 @@
ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
- fldd d1, [r3] @ d1<- vB
+ GET_VREG_DOUBLE_BY_ADDR d1, r3 @ d1<- vB
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- fldd d0, [r9] @ d0<- vA
+ GET_VREG_DOUBLE_BY_ADDR d0, r9 @ d0<- vA
$instr @ d2<- op
GET_INST_OPCODE ip @ extract opcode from rINST
- fstd d2, [r9] @ vAA<- d2
+ SET_VREG_DOUBLE_BY_ADDR d2, r9 @ vAA<- d2
GOTO_OPCODE ip @ jump to next instruction
%def funop(instr=""):
@@ -101,7 +101,7 @@
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- flds s0, [r3] @ s0<- vB
+ GET_VREG_FLOAT_BY_ADDR s0, r3 @ s0<- vB
ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$instr @ s1<- op
@@ -119,7 +119,7 @@
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- fldd d0, [r3] @ d0<- vB
+ GET_VREG_DOUBLE_BY_ADDR d0, r3 @ d0<- vB
ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$instr @ s0<- op
@@ -137,14 +137,14 @@
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- flds s0, [r3] @ s0<- vB
+ GET_VREG_FLOAT_BY_ADDR s0, r3 @ s0<- vB
ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$instr @ d0<- op
CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
GET_INST_OPCODE ip @ extract opcode from rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- fstd d0, [r9] @ vA<- d0
+ SET_VREG_DOUBLE_BY_ADDR d0, r9 @ vA<- d0
GOTO_OPCODE ip @ jump to next instruction
%def op_add_double():
@@ -183,8 +183,8 @@
mov r3, r0, lsr #8 @ r3<- CC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- fldd d0, [r2] @ d0<- vBB
- fldd d1, [r3] @ d1<- vCC
+ GET_VREG_DOUBLE_BY_ADDR d0, r2 @ d0<- vBB
+ GET_VREG_DOUBLE_BY_ADDR d1, r3 @ d1<- vCC
vcmpe.f64 d0, d1 @ compare (vBB, vCC)
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
mov r0, #1 @ r0<- 1 (default)
@@ -219,8 +219,8 @@
mov r3, r0, lsr #8 @ r3<- CC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- flds s0, [r2] @ s0<- vBB
- flds s1, [r3] @ s1<- vCC
+ GET_VREG_FLOAT_BY_ADDR s0, r2 @ s0<- vBB
+ GET_VREG_FLOAT_BY_ADDR s1, r3 @ s1<- vCC
vcmpe.f32 s0, s1 @ compare (vBB, vCC)
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
mov r0, #1 @ r0<- 1 (default)
@@ -255,8 +255,8 @@
mov r3, r0, lsr #8 @ r3<- CC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- fldd d0, [r2] @ d0<- vBB
- fldd d1, [r3] @ d1<- vCC
+ GET_VREG_DOUBLE_BY_ADDR d0, r2 @ d0<- vBB
+ GET_VREG_DOUBLE_BY_ADDR d1, r3 @ d1<- vCC
vcmpe.f64 d0, d1 @ compare (vBB, vCC)
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
mvn r0, #0 @ r0<- -1 (default)
@@ -291,8 +291,8 @@
mov r3, r0, lsr #8 @ r3<- CC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- flds s0, [r2] @ s0<- vBB
- flds s1, [r3] @ s1<- vCC
+ GET_VREG_FLOAT_BY_ADDR s0, r2 @ s0<- vBB
+ GET_VREG_FLOAT_BY_ADDR s1, r3 @ s1<- vCC
vcmpe.f32 s0, s1 @ compare (vBB, vCC)
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
mvn r0, #0 @ r0<- -1 (default)
diff --git a/runtime/interpreter/mterp/arm/main.S b/runtime/interpreter/mterp/arm/main.S
index 6d6b1901ef..4cf65d1930 100644
--- a/runtime/interpreter/mterp/arm/main.S
+++ b/runtime/interpreter/mterp/arm/main.S
@@ -298,6 +298,25 @@ unspecified registers or condition codes.
add \reg, rFP, \vreg, lsl #2 /* WARNING/FIXME: handle shadow frame vreg zero if store */
.endm
+.macro GET_VREG_WIDE_BY_ADDR reg0, reg1, addr
+ ldmia \addr, {\reg0, \reg1}
+.endm
+.macro SET_VREG_WIDE_BY_ADDR reg0, reg1, addr
+ stmia \addr, {\reg0, \reg1}
+.endm
+.macro GET_VREG_FLOAT_BY_ADDR reg, addr
+ flds \reg, [\addr]
+.endm
+.macro SET_VREG_FLOAT_BY_ADDR reg, addr
+ fsts \reg, [\addr]
+.endm
+.macro GET_VREG_DOUBLE_BY_ADDR reg, addr
+ fldd \reg, [\addr]
+.endm
+.macro SET_VREG_DOUBLE_BY_ADDR reg, addr
+ fstd \reg, [\addr]
+.endm
+
/*
* Refresh handler table.
*/
diff --git a/runtime/interpreter/mterp/arm/object.S b/runtime/interpreter/mterp/arm/object.S
index 092aa9ef4e..7736383e95 100644
--- a/runtime/interpreter/mterp/arm/object.S
+++ b/runtime/interpreter/mterp/arm/object.S
@@ -160,7 +160,7 @@
VREG_INDEX_TO_ADDR r3, r2 @ r3<- &fp[A]
CLEAR_SHADOW_PAIR r2, ip, lr @ Zero out the shadow regs
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r3, {r0-r1} @ fp[A]<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r3 @ fp[A]<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
%def op_instance_of():
@@ -257,7 +257,7 @@
cmp r2, #0 @ check object for null
beq common_errNullObject @ object was null
VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[A]
- ldmia r0, {r0-r1} @ r0/r1<- fp[A]/fp[A+1]
+ GET_VREG_WIDE_BY_ADDR r0, r1, r0 @ r0/r1<- fp[A]/fp[A+1]
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
strd r0, [r2, r3] @ obj.field<- r0/r1
GET_INST_OPCODE ip @ extract opcode from rINST
diff --git a/runtime/interpreter/mterp/arm/other.S b/runtime/interpreter/mterp/arm/other.S
index fcdde1e72a..31b9354530 100644
--- a/runtime/interpreter/mterp/arm/other.S
+++ b/runtime/interpreter/mterp/arm/other.S
@@ -104,7 +104,7 @@
FETCH_ADVANCE_INST 5 @ advance rPC, load rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r9 @ vAA<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
%def op_const_wide_16():
@@ -116,7 +116,7 @@
CLEAR_SHADOW_PAIR r3, r2, lr @ Zero out the shadow regs
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r3, {r0-r1} @ vAA<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r3 @ vAA<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
%def op_const_wide_32():
@@ -130,7 +130,7 @@
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
mov r1, r0, asr #31 @ r1<- ssssssss
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r3, {r0-r1} @ vAA<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r3 @ vAA<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
%def op_const_wide_high16():
@@ -143,7 +143,7 @@
CLEAR_SHADOW_PAIR r3, r0, r2 @ Zero shadow regs
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r3, {r0-r1} @ vAA<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r3 @ vAA<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
%def op_monitor_enter():
@@ -279,7 +279,7 @@
ldmia r3, {r0-r1} @ r0/r1<- retval.j
CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- stmia r2, {r0-r1} @ fp[AA]<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r2 @ fp[AA]<- r0/r1
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
@@ -290,11 +290,11 @@
ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[A]
- ldmia r3, {r0-r1} @ r0/r1<- fp[B]
+ GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- fp[B]
CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r2, {r0-r1} @ fp[A]<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r2 @ fp[A]<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
%def op_move_wide_16():
@@ -304,10 +304,10 @@
FETCH r2, 1 @ r2<- AAAA
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BBBB]
VREG_INDEX_TO_ADDR lr, r2 @ r2<- &fp[AAAA]
- ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
+ GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- fp[BBBB]
FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
CLEAR_SHADOW_PAIR r2, r3, ip @ Zero out the shadow regs
- stmia lr, {r0-r1} @ fp[AAAA]<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, lr @ fp[AAAA]<- r0/r1
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
@@ -318,11 +318,11 @@
mov rINST, rINST, lsr #8 @ rINST<- AA
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BBBB]
VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[AA]
- ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
+ GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- fp[BBBB]
CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r2, {r0-r1} @ fp[AA]<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r2 @ fp[AA]<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
%def op_nop():
diff --git a/runtime/interpreter/mterp/arm64/floating_point.S b/runtime/interpreter/mterp/arm64/floating_point.S
index 04ca6949ff..ad42db3f4b 100644
--- a/runtime/interpreter/mterp/arm64/floating_point.S
+++ b/runtime/interpreter/mterp/arm64/floating_point.S
@@ -1,5 +1,5 @@
%def fbinop(instr=""):
- /*:
+ /*
* Generic 32-bit floating-point operation.
*
* For: add-float, sub-float, mul-float, div-float
@@ -15,7 +15,24 @@
lsr w1, wINST, #8 // r1<- AA
FETCH_ADVANCE_INST 2 // advance rPC, load rINST
GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s0, w1
+ SET_VREG_FLOAT s0, w1
+ GOTO_OPCODE ip // jump to next instruction
+
+%def fbinopWide(instr="fadd d0, d1, d2", result="d0", r1="d1", r2="d2"):
+ /*
+ * Generic 64-bit floating-point operation.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ lsr w2, w0, #8 // w2<- CC
+ and w1, w0, #255 // w1<- BB
+ GET_VREG_DOUBLE $r2, w2 // w2<- vCC
+ GET_VREG_DOUBLE $r1, w1 // w1<- vBB
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ $instr // $result<- op, w0-w4 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_DOUBLE $result, w4 // vAA<- $result
GOTO_OPCODE ip // jump to next instruction
%def fbinop2addr(instr=""):
@@ -34,7 +51,22 @@
$instr // s2<- op
FETCH_ADVANCE_INST 1 // advance rPC, load rINST
GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s2, w9
+ SET_VREG_FLOAT s2, w9
+ GOTO_OPCODE ip // jump to next instruction
+
+%def fbinopWide2addr(instr="fadd d0, d0, d1", r0="d0", r1="d1"):
+ /*
+ * Generic 64-bit floating point "/2addr" binary operation.
+ */
+ /* binop/2addr vA, vB */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG_DOUBLE $r1, w1 // x1<- vB
+ GET_VREG_DOUBLE $r0, w2 // x0<- vA
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ $instr // result<- op
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_DOUBLE $r0, w2 // vAA<- result
GOTO_OPCODE ip // jump to next instruction
%def fcmp(wide="", r1="s1", r2="s2", cond="lt"):
@@ -47,8 +79,13 @@
lsr w4, wINST, #8 // w4<- AA
and w2, w0, #255 // w2<- BB
lsr w3, w0, #8 // w3<- CC
- GET_VREG$wide $r1, w2
- GET_VREG$wide $r2, w3
+% if r1.startswith("d"):
+ GET_VREG_DOUBLE $r1, w2
+ GET_VREG_DOUBLE $r2, w3
+% else:
+ GET_VREG $r1, w2
+ GET_VREG $r2, w3
+% #endif
fcmp $r1, $r2
cset w0, ne
cneg w0, w0, $cond
@@ -72,7 +109,7 @@
FETCH_ADVANCE_INST 1 // advance rPC, load wINST
$instr // d0<- op
GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG $tgtreg, w4 // vA<- d0
+ SET_VREG_FLOAT $tgtreg, w4 // vA<- d0
GOTO_OPCODE ip // jump to next instruction
%def funopNarrower(srcreg="s0", tgtreg="d0", instr=""):
@@ -85,11 +122,15 @@
/* unop vA, vB */
lsr w3, wINST, #12 // w3<- B
ubfx w4, wINST, #8, #4 // w4<- A
+% if srcreg.startswith("d"):
+ GET_VREG_DOUBLE $srcreg, w3
+% else:
GET_VREG_WIDE $srcreg, w3
+% #endif
FETCH_ADVANCE_INST 1 // advance rPC, load wINST
$instr // d0<- op
GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG $tgtreg, w4 // vA<- d0
+ SET_VREG_FLOAT $tgtreg, w4 // vA<- d0
GOTO_OPCODE ip // jump to next instruction
%def funopWide(srcreg="s0", tgtreg="d0", instr=""):
@@ -102,11 +143,19 @@
/* unop vA, vB */
lsr w3, wINST, #12 // w3<- B
ubfx w4, wINST, #8, #4 // w4<- A
+% if srcreg.startswith("d"):
+ GET_VREG_DOUBLE $srcreg, w3
+% else:
GET_VREG_WIDE $srcreg, w3
+% #endif
FETCH_ADVANCE_INST 1 // advance rPC, load wINST
$instr // d0<- op
GET_INST_OPCODE ip // extract opcode from wINST
+% if tgtreg.startswith("d"):
+ SET_VREG_DOUBLE $tgtreg, w4 // vA<- d0
+% else:
SET_VREG_WIDE $tgtreg, w4 // vA<- d0
+% #endif
GOTO_OPCODE ip // jump to next instruction
%def funopWider(srcreg="s0", tgtreg="d0", instr=""):
@@ -127,10 +176,10 @@
GOTO_OPCODE ip // jump to next instruction
%def op_add_double():
-% binopWide(instr="fadd d0, d1, d2", result="d0", r1="d1", r2="d2")
+% fbinopWide(instr="fadd d0, d1, d2", result="d0", r1="d1", r2="d2")
%def op_add_double_2addr():
-% binopWide2addr(instr="fadd d0, d0, d1", r0="d0", r1="d1")
+% fbinopWide2addr(instr="fadd d0, d0, d1", r0="d0", r1="d1")
%def op_add_float():
% fbinop(instr="fadd s0, s0, s1")
@@ -151,10 +200,10 @@
% fcmp(wide="", r1="s1", r2="s2", cond="lt")
%def op_div_double():
-% binopWide(instr="fdiv d0, d1, d2", result="d0", r1="d1", r2="d2")
+% fbinopWide(instr="fdiv d0, d1, d2", result="d0", r1="d1", r2="d2")
%def op_div_double_2addr():
-% binopWide2addr(instr="fdiv d0, d0, d1", r0="d0", r1="d1")
+% fbinopWide2addr(instr="fdiv d0, d0, d1", r0="d0", r1="d1")
%def op_div_float():
% fbinop(instr="fdiv s0, s0, s1")
@@ -193,10 +242,10 @@
% funopNarrower(instr="scvtf s0, x0", srcreg="x0", tgtreg="s0")
%def op_mul_double():
-% binopWide(instr="fmul d0, d1, d2", result="d0", r1="d1", r2="d2")
+% fbinopWide(instr="fmul d0, d1, d2", result="d0", r1="d1", r2="d2")
%def op_mul_double_2addr():
-% binopWide2addr(instr="fmul d0, d0, d1", r0="d0", r1="d1")
+% fbinopWide2addr(instr="fmul d0, d0, d1", r0="d0", r1="d1")
%def op_mul_float():
% fbinop(instr="fmul s0, s0, s1")
@@ -215,8 +264,8 @@
FETCH w0, 1 // w0<- CCBB
lsr w2, w0, #8 // w2<- CC
and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE d1, w2 // d1<- vCC
- GET_VREG_WIDE d0, w1 // d0<- vBB
+ GET_VREG_DOUBLE d1, w2 // d1<- vCC
+ GET_VREG_DOUBLE d0, w1 // d0<- vBB
bl fmod
lsr w4, wINST, #8 // w4<- AA
FETCH_ADVANCE_INST 2 // advance rPC, load rINST
@@ -229,8 +278,8 @@
/* rem vA, vB */
lsr w1, wINST, #12 // w1<- B
ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE d1, w1 // d1<- vB
- GET_VREG_WIDE d0, w2 // d0<- vA
+ GET_VREG_DOUBLE d1, w1 // d1<- vB
+ GET_VREG_DOUBLE d0, w2 // d0<- vA
bl fmod
ubfx w2, wINST, #8, #4 // w2<- A (need to reload - killed across call)
FETCH_ADVANCE_INST 1 // advance rPC, load rINST
@@ -253,14 +302,14 @@
ubfx w9, wINST, #8, #4 // w9<- A
FETCH_ADVANCE_INST 1 // advance rPC, load rINST
GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s0, w9
+ SET_VREG_FLOAT s0, w9
GOTO_OPCODE ip // jump to next instruction
%def op_sub_double():
-% binopWide(instr="fsub d0, d1, d2", result="d0", r1="d1", r2="d2")
+% fbinopWide(instr="fsub d0, d1, d2", result="d0", r1="d1", r2="d2")
%def op_sub_double_2addr():
-% binopWide2addr(instr="fsub d0, d0, d1", r0="d0", r1="d1")
+% fbinopWide2addr(instr="fsub d0, d0, d1", r0="d0", r1="d1")
%def op_sub_float():
% fbinop(instr="fsub s0, s0, s1")
diff --git a/runtime/interpreter/mterp/arm64/main.S b/runtime/interpreter/mterp/arm64/main.S
index 858cb38697..0cfbbff3aa 100644
--- a/runtime/interpreter/mterp/arm64/main.S
+++ b/runtime/interpreter/mterp/arm64/main.S
@@ -259,10 +259,13 @@ codes.
str \reg, [xFP, \vreg, uxtw #2]
str \reg, [xREFS, \vreg, uxtw #2]
.endm
+.macro SET_VREG_FLOAT reg, vreg
+ str \reg, [xFP, \vreg, uxtw #2]
+ str wzr, [xREFS, \vreg, uxtw #2]
+.endm
/*
* Get/set the 64-bit value from a Dalvik register.
- * TUNING: can we do better here?
*/
.macro GET_VREG_WIDE reg, vreg
add ip2, xFP, \vreg, lsl #2
@@ -274,6 +277,16 @@ codes.
add ip2, xREFS, \vreg, lsl #2
str xzr, [ip2]
.endm
+.macro GET_VREG_DOUBLE reg, vreg
+ add ip2, xFP, \vreg, lsl #2
+ ldr \reg, [ip2]
+.endm
+.macro SET_VREG_DOUBLE reg, vreg
+ add ip2, xFP, \vreg, lsl #2
+ str \reg, [ip2]
+ add ip2, xREFS, \vreg, lsl #2
+ str xzr, [ip2]
+.endm
/*
* Get the 32-bit value from a Dalvik register and sign-extend to 64-bit.
diff --git a/runtime/interpreter/mterp/x86/arithmetic.S b/runtime/interpreter/mterp/x86/arithmetic.S
index 3b5f0beb89..973e5b8a0f 100644
--- a/runtime/interpreter/mterp/x86/arithmetic.S
+++ b/runtime/interpreter/mterp/x86/arithmetic.S
@@ -153,7 +153,7 @@
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
GET_VREG %eax, %eax # eax <- vBB
- $instr # ex: addl VREG_ADDRESS(%ecx),%eax
+ $instr VREG_ADDRESS(%ecx), %eax
SET_VREG $result, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -187,7 +187,7 @@
sarl $$4, rINST # rINST <- B
GET_VREG %eax, rINST # eax <- vB
andb $$0xf, %cl # ecx <- A
- $instr # for ex: addl %eax,VREG_ADDRESS(%ecx)
+ $instr %eax, VREG_ADDRESS(%ecx)
CLEAR_REF %ecx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -240,8 +240,8 @@
movl rIBASE, LOCAL0(%esp) # save rIBASE
GET_VREG rIBASE, %eax # rIBASE <- v[BB+0]
GET_VREG_HIGH %eax, %eax # eax <- v[BB+1]
- $instr1 # ex: addl VREG_ADDRESS(%ecx),rIBASE
- $instr2 # ex: adcl VREG_HIGH_ADDRESS(%ecx),%eax
+ $instr1 VREG_ADDRESS(%ecx), rIBASE
+ $instr2 VREG_HIGH_ADDRESS(%ecx), %eax
SET_VREG rIBASE, rINST # v[AA+0] <- rIBASE
movl LOCAL0(%esp), rIBASE # restore rIBASE
SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
@@ -257,8 +257,8 @@
GET_VREG %eax, %ecx # eax<- v[B+0]
GET_VREG_HIGH %ecx, %ecx # eax<- v[B+1]
andb $$0xF, rINSTbl # rINST<- A
- $instr1 # ex: addl %eax,(rFP,rINST,4)
- $instr2 # ex: adcl %ecx,4(rFP,rINST,4)
+ $instr1 %eax, VREG_ADDRESS(rINST)
+ $instr2 %ecx, VREG_HIGH_ADDRESS(rINST)
CLEAR_WIDE_REF rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -355,10 +355,10 @@
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
%def op_add_int():
-% binop(instr="addl VREG_ADDRESS(%ecx), %eax")
+% binop(instr="addl")
%def op_add_int_2addr():
-% binop2addr(instr="addl %eax, VREG_ADDRESS(%ecx)")
+% binop2addr(instr="addl")
%def op_add_int_lit16():
% binopLit16(instr="addl %ecx, %eax")
@@ -367,16 +367,16 @@
% binopLit8(instr="addl %ecx, %eax")
%def op_add_long():
-% binopWide(instr1="addl VREG_ADDRESS(%ecx), rIBASE", instr2="adcl VREG_HIGH_ADDRESS(%ecx), %eax")
+% binopWide(instr1="addl", instr2="adcl")
%def op_add_long_2addr():
-% binopWide2addr(instr1="addl %eax, (rFP,rINST,4)", instr2="adcl %ecx, 4(rFP,rINST,4)")
+% binopWide2addr(instr1="addl", instr2="adcl")
%def op_and_int():
-% binop(instr="andl VREG_ADDRESS(%ecx), %eax")
+% binop(instr="andl")
%def op_and_int_2addr():
-% binop2addr(instr="andl %eax, VREG_ADDRESS(%ecx)")
+% binop2addr(instr="andl")
%def op_and_int_lit16():
% binopLit16(instr="andl %ecx, %eax")
@@ -385,10 +385,10 @@
% binopLit8(instr="andl %ecx, %eax")
%def op_and_long():
-% binopWide(instr1="andl VREG_ADDRESS(%ecx), rIBASE", instr2="andl VREG_HIGH_ADDRESS(%ecx), %eax")
+% binopWide(instr1="andl", instr2="andl")
%def op_and_long_2addr():
-% binopWide2addr(instr1="andl %eax, (rFP,rINST,4)", instr2="andl %ecx, 4(rFP,rINST,4)")
+% binopWide2addr(instr1="andl", instr2="andl")
%def op_cmp_long():
/*
@@ -666,10 +666,10 @@
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
%def op_or_int():
-% binop(instr="orl VREG_ADDRESS(%ecx), %eax")
+% binop(instr="orl")
%def op_or_int_2addr():
-% binop2addr(instr="orl %eax, VREG_ADDRESS(%ecx)")
+% binop2addr(instr="orl")
%def op_or_int_lit16():
% binopLit16(instr="orl %ecx, %eax")
@@ -678,10 +678,10 @@
% binopLit8(instr="orl %ecx, %eax")
%def op_or_long():
-% binopWide(instr1="orl VREG_ADDRESS(%ecx), rIBASE", instr2="orl VREG_HIGH_ADDRESS(%ecx), %eax")
+% binopWide(instr1="orl", instr2="orl")
%def op_or_long_2addr():
-% binopWide2addr(instr1="orl %eax, (rFP,rINST,4)", instr2="orl %ecx, 4(rFP,rINST,4)")
+% binopWide2addr(instr1="orl", instr2="orl")
%def op_rem_int():
% bindiv(result="rIBASE", special="$0", rem="1")
@@ -845,16 +845,16 @@
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
%def op_sub_int():
-% binop(instr="subl VREG_ADDRESS(%ecx), %eax")
+% binop(instr="subl")
%def op_sub_int_2addr():
-% binop2addr(instr="subl %eax, VREG_ADDRESS(%ecx)")
+% binop2addr(instr="subl")
%def op_sub_long():
-% binopWide(instr1="subl VREG_ADDRESS(%ecx), rIBASE", instr2="sbbl VREG_HIGH_ADDRESS(%ecx), %eax")
+% binopWide(instr1="subl", instr2="sbbl")
%def op_sub_long_2addr():
-% binopWide2addr(instr1="subl %eax, (rFP,rINST,4)", instr2="sbbl %ecx, 4(rFP,rINST,4)")
+% binopWide2addr(instr1="subl", instr2="sbbl")
%def op_ushr_int():
% binop1(instr="shrl %cl, %eax")
@@ -925,10 +925,10 @@
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
%def op_xor_int():
-% binop(instr="xorl VREG_ADDRESS(%ecx), %eax")
+% binop(instr="xorl")
%def op_xor_int_2addr():
-% binop2addr(instr="xorl %eax, VREG_ADDRESS(%ecx)")
+% binop2addr(instr="xorl")
%def op_xor_int_lit16():
% binopLit16(instr="xorl %ecx, %eax")
@@ -937,7 +937,7 @@
% binopLit8(instr="xorl %ecx, %eax")
%def op_xor_long():
-% binopWide(instr1="xorl VREG_ADDRESS(%ecx), rIBASE", instr2="xorl VREG_HIGH_ADDRESS(%ecx), %eax")
+% binopWide(instr1="xorl", instr2="xorl")
%def op_xor_long_2addr():
-% binopWide2addr(instr1="xorl %eax, (rFP,rINST,4)", instr2="xorl %ecx, 4(rFP,rINST,4)")
+% binopWide2addr(instr1="xorl", instr2="xorl")
diff --git a/runtime/interpreter/mterp/x86/floating_point.S b/runtime/interpreter/mterp/x86/floating_point.S
index 3de1fc8f19..bc7c59dc69 100644
--- a/runtime/interpreter/mterp/x86/floating_point.S
+++ b/runtime/interpreter/mterp/x86/floating_point.S
@@ -18,7 +18,7 @@
/* op vAA, vBB, vCC */
movzbl 3(rPC), %ecx # ecx<- CC
movzbl 2(rPC), %eax # eax<- BB
- movs${suff} VREG_ADDRESS(%eax), %xmm0
+ GET_VREG_XMM${suff} %xmm0, %eax
xor %eax, %eax
ucomis${suff} VREG_ADDRESS(%ecx), %xmm0
jp .L${opcode}_nan_is_${nanval}
@@ -55,9 +55,9 @@
%def sseBinop(instr="", suff=""):
movzbl 2(rPC), %ecx # ecx <- BB
movzbl 3(rPC), %eax # eax <- CC
- movs${suff} VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
+ GET_VREG_XMM${suff} %xmm0, %ecx # %xmm0 <- 1st src
${instr}${suff} VREG_ADDRESS(%eax), %xmm0
- movs${suff} %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
+ SET_VREG_XMM${suff} %xmm0, rINST # vAA <- %xmm0
pxor %xmm0, %xmm0
movs${suff} %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -65,10 +65,10 @@
%def sseBinop2Addr(instr="", suff=""):
movzx rINSTbl, %ecx # ecx <- A+
andl $$0xf, %ecx # ecx <- A
- movs${suff} VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
+ GET_VREG_XMM${suff} %xmm0, %ecx # %xmm0 <- 1st src
sarl $$4, rINST # rINST<- B
${instr}${suff} VREG_ADDRESS(rINST), %xmm0
- movs${suff} %xmm0, VREG_ADDRESS(%ecx) # vAA<- %xmm0
+ SET_VREG_XMM${suff} %xmm0, %ecx # vAA<- %xmm0
pxor %xmm0, %xmm0
movs${suff} %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/main.S b/runtime/interpreter/mterp/x86/main.S
index 0621fb468a..6eaea6f260 100644
--- a/runtime/interpreter/mterp/x86/main.S
+++ b/runtime/interpreter/mterp/x86/main.S
@@ -318,6 +318,19 @@ unspecified registers or condition codes.
movl MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg)
.endm
+.macro GET_VREG_XMMs _xmmreg _vreg
+ movss VREG_ADDRESS(\_vreg), \_xmmreg
+.endm
+.macro GET_VREG_XMMd _xmmreg _vreg
+ movsd VREG_ADDRESS(\_vreg), \_xmmreg
+.endm
+.macro SET_VREG_XMMs _xmmreg _vreg
+ movss \_xmmreg, VREG_ADDRESS(\_vreg)
+.endm
+.macro SET_VREG_XMMd _xmmreg _vreg
+ movsd \_xmmreg, VREG_ADDRESS(\_vreg)
+.endm
+
/*
* function support macros.
*/
diff --git a/runtime/interpreter/mterp/x86_64/arithmetic.S b/runtime/interpreter/mterp/x86_64/arithmetic.S
index 263f82b9f6..ff64b530b5 100644
--- a/runtime/interpreter/mterp/x86_64/arithmetic.S
+++ b/runtime/interpreter/mterp/x86_64/arithmetic.S
@@ -137,7 +137,7 @@
movzbq 2(rPC), %rax # rax <- BB
movzbq 3(rPC), %rcx # rcx <- CC
GET_VREG %eax, %rax # eax <- vBB
- $instr # ex: addl VREG_ADDRESS(%rcx),%eax
+ $instr VREG_ADDRESS(%rcx),%eax
SET_VREG $result, rINSTq
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -177,7 +177,7 @@
sarl $$4, rINST # rINST <- B
andb $$0xf, %cl # ecx <- A
GET_VREG %eax, rINSTq # eax <- vB
- $instr # for ex: addl %eax,(rFP,%ecx,4)
+ $instr %eax, VREG_ADDRESS(%rcx)
CLEAR_REF %rcx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -228,7 +228,7 @@
movzbq 2(rPC), %rax # eax <- BB
movzbq 3(rPC), %rcx # ecx <- CC
GET_WIDE_VREG %rax, %rax # rax <- v[BB]
- $instr # ex: addq VREG_ADDRESS(%rcx),%rax
+ $instr VREG_ADDRESS(%rcx),%rax
SET_WIDE_VREG %rax, rINSTq # v[AA] <- rax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -241,7 +241,7 @@
sarl $$4, rINST # rINST <- B
andb $$0xf, %cl # ecx <- A
GET_WIDE_VREG %rax, rINSTq # rax <- vB
- $instr # for ex: addq %rax,VREG_ADDRESS(%rcx)
+ $instr %rax,VREG_ADDRESS(%rcx)
CLEAR_WIDE_REF %rcx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -255,7 +255,7 @@
movl rINST, %ecx # rcx <- A+
sarl $$4, rINST # rINST <- B
andb $$0xf, %cl # ecx <- A
- movs${fp_suffix} VREG_ADDRESS(rINSTq), %xmm0
+ GET_VREG_XMM${fp_suffix} %xmm0, rINSTq
mov${i_suffix} ${max_const}, ${result_reg}
cvtsi2s${fp_suffix}${i_suffix} ${result_reg}, %xmm1
comis${fp_suffix} %xmm1, %xmm0
@@ -317,10 +317,10 @@ $instr
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
%def op_add_int():
-% binop(instr="addl VREG_ADDRESS(%rcx), %eax")
+% binop(instr="addl")
%def op_add_int_2addr():
-% binop2addr(instr="addl %eax, VREG_ADDRESS(%rcx)")
+% binop2addr(instr="addl")
%def op_add_int_lit16():
% binopLit16(instr="addl %ecx, %eax")
@@ -329,16 +329,16 @@ $instr
% binopLit8(instr="addl %ecx, %eax")
%def op_add_long():
-% binopWide(instr="addq VREG_ADDRESS(%rcx), %rax")
+% binopWide(instr="addq")
%def op_add_long_2addr():
-% binopWide2addr(instr="addq %rax, VREG_ADDRESS(%rcx)")
+% binopWide2addr(instr="addq")
%def op_and_int():
-% binop(instr="andl VREG_ADDRESS(%rcx), %eax")
+% binop(instr="andl")
%def op_and_int_2addr():
-% binop2addr(instr="andl %eax, VREG_ADDRESS(%rcx)")
+% binop2addr(instr="andl")
%def op_and_int_lit16():
% binopLit16(instr="andl %ecx, %eax")
@@ -347,10 +347,10 @@ $instr
% binopLit8(instr="andl %ecx, %eax")
%def op_and_long():
-% binopWide(instr="andq VREG_ADDRESS(%rcx), %rax")
+% binopWide(instr="andq")
%def op_and_long_2addr():
-% binopWide2addr(instr="andq %rax, VREG_ADDRESS(%rcx)")
+% binopWide2addr(instr="andq")
%def op_cmp_long():
/*
@@ -413,7 +413,7 @@ $instr
% op_move()
%def op_mul_int():
-% binop(instr="imull VREG_ADDRESS(%rcx), %eax")
+% binop(instr="imull")
%def op_mul_int_2addr():
/* mul vA, vB */
@@ -432,7 +432,7 @@ $instr
% binopLit8(instr="imull %ecx, %eax")
%def op_mul_long():
-% binopWide(instr="imulq VREG_ADDRESS(%rcx), %rax")
+% binopWide(instr="imulq")
%def op_mul_long_2addr():
/* mul vA, vB */
@@ -457,10 +457,10 @@ $instr
% unop(instr=" notq %rax", wide="1")
%def op_or_int():
-% binop(instr="orl VREG_ADDRESS(%rcx), %eax")
+% binop(instr="orl")
%def op_or_int_2addr():
-% binop2addr(instr="orl %eax, VREG_ADDRESS(%rcx)")
+% binop2addr(instr="orl")
%def op_or_int_lit16():
% binopLit16(instr="orl %ecx, %eax")
@@ -469,10 +469,10 @@ $instr
% binopLit8(instr="orl %ecx, %eax")
%def op_or_long():
-% binopWide(instr="orq VREG_ADDRESS(%rcx), %rax")
+% binopWide(instr="orq")
%def op_or_long_2addr():
-% binopWide2addr(instr="orq %rax, VREG_ADDRESS(%rcx)")
+% binopWide2addr(instr="orq")
%def op_rem_int():
% bindiv(result="%edx", second="%ecx", wide="0", suffix="l", rem="1")
@@ -530,16 +530,16 @@ $instr
% shop2addr(instr="sarq %cl, %rax", wide="1")
%def op_sub_int():
-% binop(instr="subl VREG_ADDRESS(%rcx), %eax")
+% binop(instr="subl")
%def op_sub_int_2addr():
-% binop2addr(instr="subl %eax, VREG_ADDRESS(%rcx)")
+% binop2addr(instr="subl")
%def op_sub_long():
-% binopWide(instr="subq VREG_ADDRESS(%rcx), %rax")
+% binopWide(instr="subq")
%def op_sub_long_2addr():
-% binopWide2addr(instr="subq %rax, VREG_ADDRESS(%rcx)")
+% binopWide2addr(instr="subq")
%def op_ushr_int():
% binop1(instr="shrl %cl, %eax")
@@ -557,10 +557,10 @@ $instr
% shop2addr(instr="shrq %cl, %rax", wide="1")
%def op_xor_int():
-% binop(instr="xorl VREG_ADDRESS(%rcx), %eax")
+% binop(instr="xorl")
%def op_xor_int_2addr():
-% binop2addr(instr="xorl %eax, VREG_ADDRESS(%rcx)")
+% binop2addr(instr="xorl")
%def op_xor_int_lit16():
% binopLit16(instr="xorl %ecx, %eax")
@@ -569,7 +569,7 @@ $instr
% binopLit8(instr="xorl %ecx, %eax")
%def op_xor_long():
-% binopWide(instr="xorq VREG_ADDRESS(%rcx), %rax")
+% binopWide(instr="xorq")
%def op_xor_long_2addr():
-% binopWide2addr(instr="xorq %rax, VREG_ADDRESS(%rcx)")
+% binopWide2addr(instr="xorq")
diff --git a/runtime/interpreter/mterp/x86_64/floating_point.S b/runtime/interpreter/mterp/x86_64/floating_point.S
index b40c0e632e..7fcb7424b6 100644
--- a/runtime/interpreter/mterp/x86_64/floating_point.S
+++ b/runtime/interpreter/mterp/x86_64/floating_point.S
@@ -18,7 +18,7 @@
/* op vAA, vBB, vCC */
movzbq 3(rPC), %rcx # ecx<- CC
movzbq 2(rPC), %rax # eax<- BB
- movs${suff} VREG_ADDRESS(%rax), %xmm0
+ GET_VREG_XMM${suff} %xmm0, %rax
xor %eax, %eax
ucomis${suff} VREG_ADDRESS(%rcx), %xmm0
jp .L${opcode}_nan_is_${nanval}
@@ -44,10 +44,10 @@
andb $$0xf, %cl # ecx <- A
cvts${source_suffix}2s${dest_suffix} VREG_ADDRESS(rINSTq), %xmm0
.if $wide
- movsd %xmm0, VREG_ADDRESS(%rcx)
+ SET_VREG_XMMd %xmm0, %rcx
CLEAR_WIDE_REF %rcx
.else
- movss %xmm0, VREG_ADDRESS(%rcx)
+ SET_VREG_XMMs %xmm0, %rcx
CLEAR_REF %rcx
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -55,9 +55,9 @@
%def sseBinop(instr="", suff=""):
movzbq 2(rPC), %rcx # ecx <- BB
movzbq 3(rPC), %rax # eax <- CC
- movs${suff} VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
+ GET_VREG_XMM${suff} %xmm0, %rcx # %xmm0 <- 1st src
${instr}${suff} VREG_ADDRESS(%rax), %xmm0
- movs${suff} %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
+ SET_VREG_XMM${suff} %xmm0, rINSTq # vAA <- %xmm0
pxor %xmm0, %xmm0
movs${suff} %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -65,10 +65,10 @@
%def sseBinop2Addr(instr="", suff=""):
movl rINST, %ecx # ecx <- A+
andl $$0xf, %ecx # ecx <- A
- movs${suff} VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
+ GET_VREG_XMM${suff} %xmm0, %rcx # %xmm0 <- 1st src
sarl $$4, rINST # rINST<- B
${instr}${suff} VREG_ADDRESS(rINSTq), %xmm0
- movs${suff} %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
+ SET_VREG_XMM${suff} %xmm0, %rcx # vAA <- %xmm0
pxor %xmm0, %xmm0
movs${suff} %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/main.S b/runtime/interpreter/mterp/x86_64/main.S
index 4609067a5a..5900220750 100644
--- a/runtime/interpreter/mterp/x86_64/main.S
+++ b/runtime/interpreter/mterp/x86_64/main.S
@@ -306,6 +306,19 @@ unspecified registers or condition codes.
movl MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg)
.endm
+.macro GET_VREG_XMMs _xmmreg _vreg
+ movss VREG_ADDRESS(\_vreg), \_xmmreg
+.endm
+.macro GET_VREG_XMMd _xmmreg _vreg
+ movsd VREG_ADDRESS(\_vreg), \_xmmreg
+.endm
+.macro SET_VREG_XMMs _xmmreg _vreg
+ movss \_xmmreg, VREG_ADDRESS(\_vreg)
+.endm
+.macro SET_VREG_XMMd _xmmreg _vreg
+ movsd \_xmmreg, VREG_ADDRESS(\_vreg)
+.endm
+
/*
* function support macros.
*/
diff --git a/runtime/jdwp_provider.h b/runtime/jdwp_provider.h
index c4f19899c9..9579513939 100644
--- a/runtime/jdwp_provider.h
+++ b/runtime/jdwp_provider.h
@@ -20,8 +20,6 @@
#include <ios>
#include "base/globals.h"
-#include "base/macros.h"
-#include "base/logging.h"
namespace art {
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index d67d9dced8..4a3ef07819 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -56,10 +56,12 @@ static constexpr size_t kJitSlowStressDefaultCompileThreshold = 2; // Slow-
// JIT compiler
void* Jit::jit_library_handle_ = nullptr;
void* Jit::jit_compiler_handle_ = nullptr;
-void* (*Jit::jit_load_)(bool*) = nullptr;
+void* (*Jit::jit_load_)(void) = nullptr;
void (*Jit::jit_unload_)(void*) = nullptr;
bool (*Jit::jit_compile_method_)(void*, ArtMethod*, Thread*, bool) = nullptr;
void (*Jit::jit_types_loaded_)(void*, mirror::Class**, size_t count) = nullptr;
+bool (*Jit::jit_generate_debug_info_)(void*) = nullptr;
+void (*Jit::jit_update_options_)(void*) = nullptr;
struct StressModeHelper {
DECLARE_RUNTIME_DEBUG_FLAG(kSlowMode);
@@ -179,20 +181,21 @@ Jit* Jit::Create(JitCodeCache* code_cache, JitOptions* options) {
LOG(WARNING) << "Not creating JIT: library not loaded";
return nullptr;
}
- bool will_generate_debug_symbols = false;
- jit_compiler_handle_ = (jit_load_)(&will_generate_debug_symbols);
+ jit_compiler_handle_ = (jit_load_)();
if (jit_compiler_handle_ == nullptr) {
LOG(WARNING) << "Not creating JIT: failed to allocate a compiler";
return nullptr;
}
std::unique_ptr<Jit> jit(new Jit(code_cache, options));
- jit->generate_debug_info_ = will_generate_debug_symbols;
+ // If the code collector is enabled, check if that still holds:
// With 'perf', we want a 1-1 mapping between an address and a method.
// We aren't able to keep method pointers live during the instrumentation method entry trampoline
// so we will just disable jit-gc if we are doing that.
- code_cache->SetGarbageCollectCode(!jit->generate_debug_info_ &&
- !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled());
+ if (code_cache->GetGarbageCollectCode()) {
+ code_cache->SetGarbageCollectCode(!jit_generate_debug_info_(jit_compiler_handle_) &&
+ !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled());
+ }
VLOG(jit) << "JIT created with initial_capacity="
<< PrettySize(options->GetCodeCacheInitialCapacity())
@@ -200,13 +203,21 @@ Jit* Jit::Create(JitCodeCache* code_cache, JitOptions* options) {
<< ", compile_threshold=" << options->GetCompileThreshold()
<< ", profile_saver_options=" << options->GetProfileSaverOptions();
- jit->CreateThreadPool();
-
// Notify native debugger about the classes already loaded before the creation of the jit.
jit->DumpTypeInfoForLoadedTypes(Runtime::Current()->GetClassLinker());
return jit.release();
}
+template <typename T>
+bool Jit::LoadSymbol(T* address, const char* name, std::string* error_msg) {
+ *address = reinterpret_cast<T>(dlsym(jit_library_handle_, name));
+ if (*address == nullptr) {
+ *error_msg = std::string("JIT couldn't find ") + name + std::string(" entry point");
+ return false;
+ }
+ return true;
+}
+
bool Jit::LoadCompilerLibrary(std::string* error_msg) {
jit_library_handle_ = dlopen(
kIsDebugBuild ? "libartd-compiler.so" : "libart-compiler.so", RTLD_NOW);
@@ -216,31 +227,16 @@ bool Jit::LoadCompilerLibrary(std::string* error_msg) {
*error_msg = oss.str();
return false;
}
- jit_load_ = reinterpret_cast<void* (*)(bool*)>(dlsym(jit_library_handle_, "jit_load"));
- if (jit_load_ == nullptr) {
- dlclose(jit_library_handle_);
- *error_msg = "JIT couldn't find jit_load entry point";
- return false;
- }
- jit_unload_ = reinterpret_cast<void (*)(void*)>(
- dlsym(jit_library_handle_, "jit_unload"));
- if (jit_unload_ == nullptr) {
+ bool all_resolved = true;
+ all_resolved = all_resolved && LoadSymbol(&jit_load_, "jit_load", error_msg);
+ all_resolved = all_resolved && LoadSymbol(&jit_unload_, "jit_unload", error_msg);
+ all_resolved = all_resolved && LoadSymbol(&jit_compile_method_, "jit_compile_method", error_msg);
+ all_resolved = all_resolved && LoadSymbol(&jit_types_loaded_, "jit_types_loaded", error_msg);
+ all_resolved = all_resolved && LoadSymbol(&jit_update_options_, "jit_update_options", error_msg);
+ all_resolved = all_resolved &&
+ LoadSymbol(&jit_generate_debug_info_, "jit_generate_debug_info", error_msg);
+ if (!all_resolved) {
dlclose(jit_library_handle_);
- *error_msg = "JIT couldn't find jit_unload entry point";
- return false;
- }
- jit_compile_method_ = reinterpret_cast<bool (*)(void*, ArtMethod*, Thread*, bool)>(
- dlsym(jit_library_handle_, "jit_compile_method"));
- if (jit_compile_method_ == nullptr) {
- dlclose(jit_library_handle_);
- *error_msg = "JIT couldn't find jit_compile_method entry point";
- return false;
- }
- jit_types_loaded_ = reinterpret_cast<void (*)(void*, mirror::Class**, size_t)>(
- dlsym(jit_library_handle_, "jit_types_loaded"));
- if (jit_types_loaded_ == nullptr) {
- dlclose(jit_library_handle_);
- *error_msg = "JIT couldn't find jit_types_loaded entry point";
return false;
}
return true;
@@ -296,7 +292,11 @@ bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool osr) {
}
void Jit::CreateThreadPool() {
- // There is a DCHECK in the 'AddSamples' method to ensure the tread pool
+ if (Runtime::Current()->IsSafeMode()) {
+ // Never create the pool in safe mode.
+ return;
+ }
+ // There is a DCHECK in the 'AddSamples' method to ensure the thread pool
// is not null when we instrument.
// We need peers as we may report the JIT thread, e.g., in the debugger.
@@ -375,7 +375,7 @@ void Jit::NewTypeLoadedIfUsingJit(mirror::Class* type) {
return;
}
jit::Jit* jit = Runtime::Current()->GetJit();
- if (jit->generate_debug_info_) {
+ if (jit_generate_debug_info_(jit->jit_compiler_handle_)) {
DCHECK(jit->jit_types_loaded_ != nullptr);
jit->jit_types_loaded_(jit->jit_compiler_handle_, &type, 1);
}
@@ -390,7 +390,7 @@ void Jit::DumpTypeInfoForLoadedTypes(ClassLinker* linker) {
std::vector<mirror::Class*> classes_;
};
- if (generate_debug_info_) {
+ if (jit_generate_debug_info_(jit_compiler_handle_)) {
ScopedObjectAccess so(Thread::Current());
CollectClasses visitor;
@@ -630,8 +630,11 @@ static bool IgnoreSamplesForMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mut
void Jit::AddSamples(Thread* self, ArtMethod* method, uint16_t count, bool with_backedges) {
if (thread_pool_ == nullptr) {
- // Should only see this when shutting down.
- DCHECK(Runtime::Current()->IsShuttingDown(self));
+ // Should only see this when shutting down, starting up, or in zygote, which doesn't
+ // have a thread pool.
+ DCHECK(Runtime::Current()->IsShuttingDown(self) ||
+ !Runtime::Current()->IsFinishedStarting() ||
+ Runtime::Current()->IsZygote());
return;
}
if (IgnoreSamplesForMethod(method)) {
@@ -795,5 +798,15 @@ ScopedJitSuspend::~ScopedJitSuspend() {
}
}
+void Jit::PostForkChildAction() {
+ // At this point, the compiler options have been adjusted to the particular configuration
+ // of the forked child. Parse them again.
+ jit_update_options_(jit_compiler_handle_);
+
+ // Adjust the status of code cache collection: the status from zygote was to not collect.
+ code_cache_->SetGarbageCollectCode(!jit_generate_debug_info_(jit_compiler_handle_) &&
+ !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled());
+}
+
} // namespace jit
} // namespace art
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 46b0762629..e12b032feb 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -100,10 +100,6 @@ class JitOptions {
return use_jit_compilation_;
}
- bool RWXMemoryAllowed() const {
- return rwx_memory_allowed_;
- }
-
void SetUseJitCompilation(bool b) {
use_jit_compilation_ = b;
}
@@ -125,10 +121,6 @@ class JitOptions {
compile_threshold_ = 0;
}
- void SetRWXMemoryAllowed(bool rwx_allowed) {
- rwx_memory_allowed_ = rwx_allowed;
- }
-
private:
bool use_jit_compilation_;
size_t code_cache_initial_capacity_;
@@ -140,7 +132,6 @@ class JitOptions {
uint16_t invoke_transition_weight_;
bool dump_info_on_shutdown_;
int thread_pool_pthread_priority_;
- bool rwx_memory_allowed_;
ProfileSaverOptions profile_saver_options_;
JitOptions()
@@ -153,8 +144,7 @@ class JitOptions {
priority_thread_weight_(0),
invoke_transition_weight_(0),
dump_info_on_shutdown_(false),
- thread_pool_pthread_priority_(kJitPoolThreadPthreadDefaultPriority),
- rwx_memory_allowed_(true) {}
+ thread_pool_pthread_priority_(kJitPoolThreadPthreadDefaultPriority) {}
DISALLOW_COPY_AND_ASSIGN(JitOptions);
};
@@ -295,6 +285,9 @@ class Jit {
// Start JIT threads.
void Start();
+ // Transition to a zygote child state.
+ void PostForkChildAction();
+
private:
Jit(JitCodeCache* code_cache, JitOptions* options);
@@ -303,13 +296,13 @@ class Jit {
// JIT compiler
static void* jit_library_handle_;
static void* jit_compiler_handle_;
- static void* (*jit_load_)(bool*);
+ static void* (*jit_load_)(void);
static void (*jit_unload_)(void*);
static bool (*jit_compile_method_)(void*, ArtMethod*, Thread*, bool);
static void (*jit_types_loaded_)(void*, mirror::Class**, size_t count);
-
- // Whether we should generate debug info when compiling.
- bool generate_debug_info_;
+ static void (*jit_update_options_)(void*);
+ static bool (*jit_generate_debug_info_)(void*);
+ template <typename T> static bool LoadSymbol(T*, const char* symbol, std::string* error_msg);
// JIT resources owned by runtime.
jit::JitCodeCache* const code_cache_;
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 0bdb0c9e16..97887ccbc9 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -18,7 +18,8 @@
#include <sstream>
-#include "android-base/unique_fd.h"
+#include <android-base/logging.h>
+#include <android-base/unique_fd.h>
#include "arch/context.h"
#include "art_method-inl.h"
@@ -64,6 +65,11 @@ namespace jit {
static constexpr size_t kCodeSizeLogThreshold = 50 * KB;
static constexpr size_t kStackMapSizeLogThreshold = 50 * KB;
+// Data cache will be half of the capacity
+// Code cache will be the other half of the capacity.
+// TODO: Make this variable?
+static constexpr size_t kCodeAndDataCapacityDivider = 2;
+
static constexpr int kProtR = PROT_READ;
static constexpr int kProtRW = PROT_READ | PROT_WRITE;
static constexpr int kProtRWX = PROT_READ | PROT_WRITE | PROT_EXEC;
@@ -183,69 +189,45 @@ class JitCodeCache::JniStubData {
std::vector<ArtMethod*> methods_;
};
-JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
- size_t max_capacity,
- bool used_only_for_profile_data,
- bool rwx_memory_allowed,
- std::string* error_msg) {
+bool JitCodeCache::InitializeMappings(bool rwx_memory_allowed,
+ bool is_zygote,
+ std::string* error_msg) {
ScopedTrace trace(__PRETTY_FUNCTION__);
- CHECK_GE(max_capacity, initial_capacity);
-
- // We need to have 32 bit offsets from method headers in code cache which point to things
- // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
- // Ensure we're below 1 GB to be safe.
- if (max_capacity > 1 * GB) {
- std::ostringstream oss;
- oss << "Maxium code cache capacity is limited to 1 GB, "
- << PrettySize(max_capacity) << " is too big";
- *error_msg = oss.str();
- return nullptr;
- }
- // Register for membarrier expedited sync core if JIT will be generating code.
- if (!used_only_for_profile_data) {
- if (art::membarrier(art::MembarrierCommand::kRegisterPrivateExpeditedSyncCore) != 0) {
- // MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE ensures that CPU instruction pipelines are
- // flushed and it's used when adding code to the JIT. The memory used by the new code may
- // have just been released and, in theory, the old code could still be in a pipeline.
- VLOG(jit) << "Kernel does not support membarrier sync-core";
- }
- }
+ const size_t capacity = max_capacity_;
+ const size_t data_capacity = capacity / kCodeAndDataCapacityDivider;
+ const size_t exec_capacity = capacity - data_capacity;
// File descriptor enabling dual-view mapping of code section.
unique_fd mem_fd;
- // Bionic supports memfd_create, but the call may fail on older kernels.
- mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags= */ 0));
- if (mem_fd.get() < 0) {
- std::ostringstream oss;
- oss << "Failed to initialize dual view JIT. memfd_create() error: " << strerror(errno);
- if (!rwx_memory_allowed) {
- // Without using RWX page permissions, the JIT can not fallback to single mapping as it
- // requires tranitioning the code pages to RWX for updates.
- *error_msg = oss.str();
- return nullptr;
+ // Zygote shouldn't create a shared mapping for JIT, so we cannot use dual view
+ // for it.
+ if (!is_zygote) {
+ // Bionic supports memfd_create, but the call may fail on older kernels.
+ mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags= */ 0));
+ if (mem_fd.get() < 0) {
+ std::ostringstream oss;
+ oss << "Failed to initialize dual view JIT. memfd_create() error: " << strerror(errno);
+ if (!rwx_memory_allowed) {
+ // Without using RWX page permissions, the JIT can not fallback to single mapping as it
+ // requires tranitioning the code pages to RWX for updates.
+ *error_msg = oss.str();
+ return false;
+ }
+ VLOG(jit) << oss.str();
}
- VLOG(jit) << oss.str();
}
- if (mem_fd.get() >= 0 && ftruncate(mem_fd, max_capacity) != 0) {
+ if (mem_fd.get() >= 0 && ftruncate(mem_fd, capacity) != 0) {
std::ostringstream oss;
oss << "Failed to initialize memory file: " << strerror(errno);
*error_msg = oss.str();
- return nullptr;
+ return false;
}
- // Data cache will be half of the initial allocation.
- // Code cache will be the other half of the initial allocation.
- // TODO: Make this variable?
-
- // Align both capacities to page size, as that's the unit mspaces use.
- initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
- max_capacity = RoundDown(max_capacity, 2 * kPageSize);
- const size_t data_capacity = max_capacity / 2;
- const size_t exec_capacity = used_only_for_profile_data ? 0 : max_capacity - data_capacity;
- DCHECK_LE(data_capacity + exec_capacity, max_capacity);
+ std::string data_cache_name = is_zygote ? "zygote-data-code-cache" : "data-code-cache";
+ std::string exec_cache_name = is_zygote ? "zygote-jit-code-cache" : "jit-code-cache";
std::string error_str;
// Map name specific for android_os_Debug.cpp accounting.
@@ -285,7 +267,7 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
mem_fd,
/* start= */ 0,
/* low_4gb= */ true,
- "data-code-cache",
+ data_cache_name.c_str(),
&error_str);
} else {
// Single view of JIT code cache case. Create an initial mapping of data pages large enough
@@ -304,7 +286,7 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
// back to RX after the update.
base_flags = MAP_PRIVATE | MAP_ANON;
data_pages = MemMap::MapAnonymous(
- "data-code-cache",
+ data_cache_name.c_str(),
data_capacity + exec_capacity,
kProtRW,
/* low_4gb= */ true,
@@ -313,9 +295,9 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
if (!data_pages.IsValid()) {
std::ostringstream oss;
- oss << "Failed to create read write cache: " << error_str << " size=" << max_capacity;
+ oss << "Failed to create read write cache: " << error_str << " size=" << capacity;
*error_msg = oss.str();
- return nullptr;
+ return false;
}
MemMap exec_pages;
@@ -326,7 +308,7 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
// (for processes that cannot map WX pages). Otherwise, this region does not need to be
// executable as there is no code in the cache yet.
exec_pages = data_pages.RemapAtEnd(divider,
- "jit-code-cache",
+ exec_cache_name.c_str(),
kProtRX,
base_flags | MAP_FIXED,
mem_fd.get(),
@@ -334,21 +316,22 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
&error_str);
if (!exec_pages.IsValid()) {
std::ostringstream oss;
- oss << "Failed to create read execute code cache: " << error_str << " size=" << max_capacity;
+ oss << "Failed to create read execute code cache: " << error_str << " size=" << capacity;
*error_msg = oss.str();
- return nullptr;
+ return false;
}
if (mem_fd.get() >= 0) {
// For dual view, create the secondary view of code memory used for updating code. This view
// is never executable.
+ std::string name = exec_cache_name + "-rw";
non_exec_pages = MemMap::MapFile(exec_capacity,
kProtR,
base_flags,
mem_fd,
/* start= */ data_capacity,
/* low_4GB= */ false,
- "jit-code-cache-rw",
+ name.c_str(),
&error_str);
if (!non_exec_pages.IsValid()) {
static const char* kFailedNxView = "Failed to map non-executable view of JIT code cache";
@@ -357,44 +340,77 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
VLOG(jit) << kFailedNxView;
} else {
*error_msg = kFailedNxView;
- return nullptr;
+ return false;
}
}
}
} else {
// Profiling only. No memory for code required.
- DCHECK(used_only_for_profile_data);
}
- const size_t initial_data_capacity = initial_capacity / 2;
- const size_t initial_exec_capacity =
- (exec_capacity == 0) ? 0 : (initial_capacity - initial_data_capacity);
+ data_pages_ = std::move(data_pages);
+ exec_pages_ = std::move(exec_pages);
+ non_exec_pages_ = std::move(non_exec_pages);
+ return true;
+}
+
+JitCodeCache* JitCodeCache::Create(bool used_only_for_profile_data,
+ bool rwx_memory_allowed,
+ bool is_zygote,
+ std::string* error_msg) {
+ // Register for membarrier expedited sync core if JIT will be generating code.
+ if (!used_only_for_profile_data) {
+ if (art::membarrier(art::MembarrierCommand::kRegisterPrivateExpeditedSyncCore) != 0) {
+ // MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE ensures that CPU instruction pipelines are
+ // flushed and it's used when adding code to the JIT. The memory used by the new code may
+ // have just been released and, in theory, the old code could still be in a pipeline.
+ VLOG(jit) << "Kernel does not support membarrier sync-core";
+ }
+ }
+
+ // Check whether the provided max capacity in options is below 1GB.
+ size_t max_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheMaxCapacity();
+ // We need to have 32 bit offsets from method headers in code cache which point to things
+ // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
+ // Ensure we're below 1 GB to be safe.
+ if (max_capacity > 1 * GB) {
+ std::ostringstream oss;
+ oss << "Maxium code cache capacity is limited to 1 GB, "
+ << PrettySize(max_capacity) << " is too big";
+ *error_msg = oss.str();
+ return nullptr;
+ }
+
+ size_t initial_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheInitialCapacity();
+
+ std::unique_ptr<JitCodeCache> jit_code_cache(new JitCodeCache());
+
+ MutexLock mu(Thread::Current(), jit_code_cache->lock_);
+ jit_code_cache->InitializeState(initial_capacity, max_capacity);
+
+ // Zygote should never collect code to share the memory with the children.
+ if (is_zygote) {
+ jit_code_cache->SetGarbageCollectCode(false);
+ }
+
+ if (!jit_code_cache->InitializeMappings(rwx_memory_allowed, is_zygote, error_msg)) {
+ return nullptr;
+ }
- return new JitCodeCache(
- std::move(data_pages),
- std::move(exec_pages),
- std::move(non_exec_pages),
- initial_data_capacity,
- initial_exec_capacity,
- max_capacity);
+ jit_code_cache->InitializeSpaces();
+
+ VLOG(jit) << "Created jit code cache: initial capacity="
+ << PrettySize(initial_capacity)
+ << ", maximum capacity="
+ << PrettySize(max_capacity);
+
+ return jit_code_cache.release();
}
-JitCodeCache::JitCodeCache(MemMap&& data_pages,
- MemMap&& exec_pages,
- MemMap&& non_exec_pages,
- size_t initial_data_capacity,
- size_t initial_exec_capacity,
- size_t max_capacity)
+JitCodeCache::JitCodeCache()
: lock_("Jit code cache", kJitCodeCacheLock),
lock_cond_("Jit code cache condition variable", lock_),
collection_in_progress_(false),
- data_pages_(std::move(data_pages)),
- exec_pages_(std::move(exec_pages)),
- non_exec_pages_(std::move(non_exec_pages)),
- max_capacity_(max_capacity),
- current_capacity_(initial_exec_capacity + initial_data_capacity),
- data_end_(initial_data_capacity),
- exec_end_(initial_exec_capacity),
last_collection_increased_code_cache_(false),
garbage_collect_code_(true),
used_memory_for_data_(0),
@@ -406,10 +422,31 @@ JitCodeCache::JitCodeCache(MemMap&& data_pages,
histogram_code_memory_use_("Memory used for compiled code", 16),
histogram_profiling_info_memory_use_("Memory used for profiling info", 16),
is_weak_access_enabled_(true),
- inline_cache_cond_("Jit inline cache condition variable", lock_) {
+ inline_cache_cond_("Jit inline cache condition variable", lock_),
+ zygote_data_pages_(),
+ zygote_exec_pages_(),
+ zygote_data_mspace_(nullptr),
+ zygote_exec_mspace_(nullptr) {
+}
- DCHECK_GE(max_capacity, initial_exec_capacity + initial_data_capacity);
+void JitCodeCache::InitializeState(size_t initial_capacity, size_t max_capacity) {
+ CHECK_GE(max_capacity, initial_capacity);
+ CHECK(max_capacity <= 1 * GB) << "The max supported size for JIT code cache is 1GB";
+ // Align both capacities to page size, as that's the unit mspaces use.
+ initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
+ max_capacity = RoundDown(max_capacity, 2 * kPageSize);
+ data_pages_ = MemMap();
+ exec_pages_ = MemMap();
+ non_exec_pages_ = MemMap();
+ initial_capacity_ = initial_capacity;
+ max_capacity_ = max_capacity;
+ current_capacity_ = initial_capacity,
+ data_end_ = initial_capacity / kCodeAndDataCapacityDivider;
+ exec_end_ = initial_capacity - data_end_;
+}
+
+void JitCodeCache::InitializeSpaces() {
// Initialize the data heap
data_mspace_ = create_mspace_with_base(data_pages_.Begin(), data_end_, false /*locked*/);
CHECK(data_mspace_ != nullptr) << "create_mspace_with_base (data) failed";
@@ -427,19 +464,14 @@ JitCodeCache::JitCodeCache(MemMap&& data_pages,
CheckedCall(mprotect, "create code heap", code_heap->Begin(), code_heap->Size(), kProtRW);
exec_mspace_ = create_mspace_with_base(code_heap->Begin(), exec_end_, false /*locked*/);
CHECK(exec_mspace_ != nullptr) << "create_mspace_with_base (exec) failed";
- SetFootprintLimit(current_capacity_);
+ SetFootprintLimit(initial_capacity_);
// Protect pages containing heap metadata. Updates to the code heap toggle write permission to
// perform the update and there are no other times write access is required.
CheckedCall(mprotect, "protect code heap", code_heap->Begin(), code_heap->Size(), kProtR);
} else {
exec_mspace_ = nullptr;
- SetFootprintLimit(current_capacity_);
+ SetFootprintLimit(initial_capacity_);
}
-
- VLOG(jit) << "Created jit code cache: initial data size="
- << PrettySize(initial_data_capacity)
- << ", initial code size="
- << PrettySize(initial_exec_capacity);
}
JitCodeCache::~JitCodeCache() {}
@@ -861,7 +893,8 @@ void JitCodeCache::CopyInlineCacheInto(const InlineCache& ic,
}
}
-static void ClearMethodCounter(ArtMethod* method, bool was_warm) {
+static void ClearMethodCounter(ArtMethod* method, bool was_warm)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (was_warm) {
method->SetPreviouslyWarm();
}
@@ -1097,7 +1130,7 @@ bool JitCodeCache::RemoveMethod(ArtMethod* method, bool release_memory) {
return false;
}
- method->ClearCounter();
+ method->SetCounter(0);
Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
method, GetQuickToInterpreterBridge());
VLOG(jit)
@@ -1272,31 +1305,6 @@ size_t JitCodeCache::ReserveData(Thread* self,
}
}
-class MarkCodeVisitor final : public StackVisitor {
- public:
- MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in, CodeCacheBitmap* bitmap)
- : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
- code_cache_(code_cache_in),
- bitmap_(bitmap) {}
-
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
- if (method_header == nullptr) {
- return true;
- }
- const void* code = method_header->GetCode();
- if (code_cache_->ContainsPc(code)) {
- // Use the atomic set version, as multiple threads are executing this code.
- bitmap_->AtomicTestAndSet(FromCodeToAllocation(code));
- }
- return true;
- }
-
- private:
- JitCodeCache* const code_cache_;
- CodeCacheBitmap* const bitmap_;
-};
-
class MarkCodeClosure final : public Closure {
public:
MarkCodeClosure(JitCodeCache* code_cache, CodeCacheBitmap* bitmap, Barrier* barrier)
@@ -1305,8 +1313,24 @@ class MarkCodeClosure final : public Closure {
void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedTrace trace(__PRETTY_FUNCTION__);
DCHECK(thread == Thread::Current() || thread->IsSuspended());
- MarkCodeVisitor visitor(thread, code_cache_, bitmap_);
- visitor.WalkStack();
+ StackVisitor::WalkStack(
+ [&](const art::StackVisitor* stack_visitor) {
+ const OatQuickMethodHeader* method_header =
+ stack_visitor->GetCurrentOatQuickMethodHeader();
+ if (method_header == nullptr) {
+ return true;
+ }
+ const void* code = method_header->GetCode();
+ if (code_cache_->ContainsPc(code)) {
+ // Use the atomic set version, as multiple threads are executing this code.
+ bitmap_->AtomicTestAndSet(FromCodeToAllocation(code));
+ }
+ return true;
+ },
+ thread,
+ /* context= */ nullptr,
+ art::StackVisitor::StackWalkKind::kSkipInlinedFrames);
+
if (kIsDebugBuild) {
// The stack walking code queries the side instrumentation stack if it
// sees an instrumentation exit pc, so the JIT code of methods in that stack
@@ -1339,13 +1363,13 @@ void JitCodeCache::NotifyCollectionDone(Thread* self) {
}
void JitCodeCache::SetFootprintLimit(size_t new_footprint) {
- size_t per_space_footprint = new_footprint / 2;
- DCHECK(IsAlignedParam(per_space_footprint, kPageSize));
- DCHECK_EQ(per_space_footprint * 2, new_footprint);
- mspace_set_footprint_limit(data_mspace_, per_space_footprint);
+ size_t data_space_footprint = new_footprint / kCodeAndDataCapacityDivider;
+ DCHECK(IsAlignedParam(data_space_footprint, kPageSize));
+ DCHECK_EQ(data_space_footprint * kCodeAndDataCapacityDivider, new_footprint);
+ mspace_set_footprint_limit(data_mspace_, data_space_footprint);
if (HasCodeMapping()) {
ScopedCodeCacheWrite scc(this);
- mspace_set_footprint_limit(exec_mspace_, per_space_footprint);
+ mspace_set_footprint_limit(exec_mspace_, new_footprint - data_space_footprint);
}
}
@@ -2066,5 +2090,33 @@ void JitCodeCache::Dump(std::ostream& os) {
histogram_profiling_info_memory_use_.PrintMemoryUse(os);
}
+void JitCodeCache::PostForkChildAction(bool is_system_server, bool is_zygote) {
+ MutexLock mu(Thread::Current(), lock_);
+ // Currently, we don't expect any compilations from zygote.
+ CHECK_EQ(number_of_compilations_, 0u);
+ CHECK_EQ(number_of_osr_compilations_, 0u);
+ CHECK(jni_stubs_map_.empty());
+ CHECK(method_code_map_.empty());
+ CHECK(osr_code_map_.empty());
+
+ zygote_data_pages_ = std::move(data_pages_);
+ zygote_exec_pages_ = std::move(exec_pages_);
+ zygote_data_mspace_ = data_mspace_;
+ zygote_exec_mspace_ = exec_mspace_;
+
+ size_t initial_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheInitialCapacity();
+ size_t max_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheMaxCapacity();
+
+ InitializeState(initial_capacity, max_capacity);
+
+ std::string error_msg;
+ if (!InitializeMappings(/* rwx_memory_allowed= */ !is_system_server, is_zygote, &error_msg)) {
+ LOG(WARNING) << "Could not reset JIT state after zygote fork: " << error_msg;
+ return;
+ }
+
+ InitializeSpaces();
+}
+
} // namespace jit
} // namespace art
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index a5075638f2..7a838fddd6 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -89,10 +89,9 @@ class JitCodeCache {
// Create the code cache with a code + data capacity equal to "capacity", error message is passed
// in the out arg error_msg.
- static JitCodeCache* Create(size_t initial_capacity,
- size_t max_capacity,
- bool used_only_for_profile_data,
+ static JitCodeCache* Create(bool used_only_for_profile_data,
bool rwx_memory_allowed,
+ bool is_zygote,
std::string* error_msg);
~JitCodeCache();
@@ -262,14 +261,17 @@ class JitCodeCache {
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ void PostForkChildAction(bool is_system_server, bool is_zygote);
+
private:
- // Take ownership of maps.
- JitCodeCache(MemMap&& data_pages,
- MemMap&& exec_pages,
- MemMap&& non_exec_pages,
- size_t initial_data_capacity,
- size_t initial_exec_capacity,
- size_t max_capacity);
+ JitCodeCache();
+
+ void InitializeState(size_t initial_capacity, size_t max_capacity) REQUIRES(lock_);
+
+ bool InitializeMappings(bool rwx_memory_allowed, bool is_zygote, std::string* error_msg)
+ REQUIRES(lock_);
+
+ void InitializeSpaces() REQUIRES(lock_);
// Internal version of 'CommitCode' that will not retry if the
// allocation fails. Return null if the allocation fails.
@@ -421,6 +423,9 @@ class JitCodeCache {
// ProfilingInfo objects we have allocated.
std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(lock_);
+ // The initial capacity in bytes this code cache starts with.
+ size_t initial_capacity_ GUARDED_BY(lock_);
+
// The maximum capacity in bytes this code cache can go to.
size_t max_capacity_ GUARDED_BY(lock_);
@@ -471,10 +476,19 @@ class JitCodeCache {
// Condition to wait on for accessing inline caches.
ConditionVariable inline_cache_cond_ GUARDED_BY(lock_);
+ // Mem map which holds zygote data (stack maps and profiling info).
+ MemMap zygote_data_pages_;
+ // Mem map which holds zygote code and has executable permission.
+ MemMap zygote_exec_pages_;
+ // The opaque mspace for allocating zygote data.
+ void* zygote_data_mspace_ GUARDED_BY(lock_);
+ // The opaque mspace for allocating zygote code.
+ void* zygote_exec_mspace_ GUARDED_BY(lock_);
+
friend class art::JitJniStubTestHelper;
friend class ScopedCodeCacheWrite;
- DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
+ DISALLOW_COPY_AND_ASSIGN(JitCodeCache);
};
} // namespace jit
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index e3248eaf24..c8d4728589 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -362,7 +362,7 @@ static void SampleClassesAndExecutedMethods(pthread_t profiler_pthread,
}
// Visit all of the methods in the class to see which ones were executed.
for (ArtMethod& method : klass->GetMethods(kRuntimePointerSize)) {
- if (!method.IsNative()) {
+ if (!method.IsNative() && !method.IsAbstract()) {
DCHECK(!method.IsProxyMethod());
const uint16_t counter = method.GetCounter();
// Mark startup methods as hot if they have more than hot_method_sample_threshold
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index df2a8e29cb..647928391b 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -277,43 +277,6 @@ void Monitor::SetObject(mirror::Object* object) {
obj_ = GcRoot<mirror::Object>(object);
}
-// Note: Adapted from CurrentMethodVisitor in thread.cc. We must not resolve here.
-
-struct NthCallerWithDexPcVisitor final : public StackVisitor {
- explicit NthCallerWithDexPcVisitor(Thread* thread, size_t frame)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- method_(nullptr),
- dex_pc_(0),
- current_frame_number_(0),
- wanted_frame_number_(frame) {}
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = GetMethod();
- if (m == nullptr || m->IsRuntimeMethod()) {
- // Runtime method, upcall, or resolution issue. Skip.
- return true;
- }
-
- // Is this the requested frame?
- if (current_frame_number_ == wanted_frame_number_) {
- method_ = m;
- dex_pc_ = GetDexPc(/* abort_on_failure=*/ false);
- return false;
- }
-
- // Look for more.
- current_frame_number_++;
- return true;
- }
-
- ArtMethod* method_;
- uint32_t dex_pc_;
-
- private:
- size_t current_frame_number_;
- const size_t wanted_frame_number_;
-};
-
// This function is inlined and just helps to not have the VLOG and ATRACE check at all the
// potential tracing points.
void Monitor::AtraceMonitorLock(Thread* self, mirror::Object* obj, bool is_wait) {
@@ -326,13 +289,41 @@ void Monitor::AtraceMonitorLockImpl(Thread* self, mirror::Object* obj, bool is_w
// Wait() requires a deeper call stack to be useful. Otherwise you'll see "Waiting at
// Object.java". Assume that we'll wait a nontrivial amount, so it's OK to do a longer
// stack walk than if !is_wait.
- NthCallerWithDexPcVisitor visitor(self, is_wait ? 1U : 0U);
- visitor.WalkStack(false);
+ const size_t wanted_frame_number = is_wait ? 1U : 0U;
+
+ ArtMethod* method = nullptr;
+ uint32_t dex_pc = 0u;
+
+ size_t current_frame_number = 0u;
+ StackVisitor::WalkStack(
+ // Note: Adapted from CurrentMethodVisitor in thread.cc. We must not resolve here.
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* m = stack_visitor->GetMethod();
+ if (m == nullptr || m->IsRuntimeMethod()) {
+ // Runtime method, upcall, or resolution issue. Skip.
+ return true;
+ }
+
+ // Is this the requested frame?
+ if (current_frame_number == wanted_frame_number) {
+ method = m;
+ dex_pc = stack_visitor->GetDexPc(false /* abort_on_error*/);
+ return false;
+ }
+
+ // Look for more.
+ current_frame_number++;
+ return true;
+ },
+ self,
+ /* context= */ nullptr,
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+
const char* prefix = is_wait ? "Waiting on " : "Locking ";
const char* filename;
int32_t line_number;
- TranslateLocation(visitor.method_, visitor.dex_pc_, &filename, &line_number);
+ TranslateLocation(method, dex_pc, &filename, &line_number);
// It would be nice to have a stable "ID" for the object here. However, the only stable thing
// would be the identity hashcode. But we cannot use IdentityHashcode here: For one, there are
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 56e9094983..530371d4c4 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -29,6 +29,7 @@
#include "debugger.h"
#include "hidden_api.h"
#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
#include "jni/java_vm_ext.h"
#include "jni/jni_internal.h"
#include "native_util.h"
@@ -292,7 +293,10 @@ static void ZygoteHooks_nativePostForkSystemServer(JNIEnv* env ATTRIBUTE_UNUSED,
// System server has a window where it can create executable pages for this purpose, but this is
// turned off after this hook. Consequently, the only JIT mode supported is the dual-view JIT
// where one mapping is R->RW and the other is RX. Single view requires RX->RWX->RX.
- Runtime::Current()->CreateJitCodeCache(/*rwx_memory_allowed=*/false);
+ if (Runtime::Current()->GetJit() != nullptr) {
+ Runtime::Current()->GetJit()->GetCodeCache()->PostForkChildAction(
+ /* is_system_server= */ true, /* is_zygote= */ false);
+ }
}
static void ZygoteHooks_nativePostForkChild(JNIEnv* env,
@@ -332,6 +336,15 @@ static void ZygoteHooks_nativePostForkChild(JNIEnv* env,
}
Runtime::Current()->GetHeap()->PostForkChildAction(thread);
+ if (Runtime::Current()->GetJit() != nullptr) {
+ if (!is_system_server) {
+ // System server already called the JIT cache post fork action in `nativePostForkSystemServer`.
+ Runtime::Current()->GetJit()->GetCodeCache()->PostForkChildAction(
+ /* is_system_server= */ false, is_zygote);
+ }
+ // This must be called after EnableDebugFeatures.
+ Runtime::Current()->GetJit()->PostForkChildAction();
+ }
// Update tracing.
if (Trace::GetMethodTracingMode() != TracingMode::kTracingInactive) {
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 33c85973b3..29b569050c 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -586,26 +586,7 @@ bool ParsedOptions::DoParse(const RuntimeOptions& options,
args.Set(M::BackgroundGc, BackgroundGcOption { background_collector_type_ });
}
- // If a reference to the dalvik core.jar snuck in, replace it with
- // the art specific version. This can happen with on device
- // boot.art/boot.oat generation by GenerateImage which relies on the
- // value of BOOTCLASSPATH.
-#if defined(ART_TARGET)
- std::string core_jar("/core.jar");
- std::string core_libart_jar("/core-libart.jar");
-#else
- // The host uses hostdex files.
- std::string core_jar("/core-hostdex.jar");
- std::string core_libart_jar("/core-libart-hostdex.jar");
-#endif
auto boot_class_path_string = args.GetOrDefault(M::BootClassPath);
-
- size_t core_jar_pos = boot_class_path_string.find(core_jar);
- if (core_jar_pos != std::string::npos) {
- boot_class_path_string.replace(core_jar_pos, core_jar.size(), core_libart_jar);
- args.Set(M::BootClassPath, boot_class_path_string);
- }
-
{
auto&& boot_class_path = args.GetOrDefault(M::BootClassPath);
auto&& boot_class_path_locations = args.GetOrDefault(M::BootClassPathLocations);
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index afdfefaffa..d4e3d54a99 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -154,46 +154,36 @@ class CatchBlockStackVisitor final : public StackVisitor {
DISALLOW_COPY_AND_ASSIGN(CatchBlockStackVisitor);
};
-// Counts instrumentation stack frame prior to catch handler or upcall.
-class InstrumentationStackVisitor : public StackVisitor {
- public:
- InstrumentationStackVisitor(Thread* self, size_t frame_depth)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- frame_depth_(frame_depth),
- instrumentation_frames_to_pop_(0) {
- CHECK_NE(frame_depth_, kInvalidFrameDepth);
- }
-
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- size_t current_frame_depth = GetFrameDepth();
- if (current_frame_depth < frame_depth_) {
- CHECK(GetMethod() != nullptr);
- if (UNLIKELY(reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) == GetReturnPc())) {
- if (!IsInInlinedFrame()) {
- // We do not count inlined frames, because we do not instrument them. The reason we
- // include them in the stack walking is the check against `frame_depth_`, which is
- // given to us by a visitor that visits inlined frames.
- ++instrumentation_frames_to_pop_;
+static size_t GetInstrumentationFramesToPop(Thread* self, size_t frame_depth)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ CHECK_NE(frame_depth, kInvalidFrameDepth);
+ size_t instrumentation_frames_to_pop = 0;
+ StackVisitor::WalkStack(
+ [&](art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ size_t current_frame_depth = stack_visitor->GetFrameDepth();
+ if (current_frame_depth < frame_depth) {
+ CHECK(stack_visitor->GetMethod() != nullptr);
+ if (UNLIKELY(reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) ==
+ stack_visitor->GetReturnPc())) {
+ if (!stack_visitor->IsInInlinedFrame()) {
+ // We do not count inlined frames, because we do not instrument them. The reason we
+ // include them in the stack walking is the check against `frame_depth_`, which is
+ // given to us by a visitor that visits inlined frames.
+ ++instrumentation_frames_to_pop;
+ }
+ }
+ return true;
}
- }
- return true;
- } else {
- // We reached the frame of the catch handler or the upcall.
- return false;
- }
- }
-
- size_t GetInstrumentationFramesToPop() const {
- return instrumentation_frames_to_pop_;
- }
-
- private:
- const size_t frame_depth_;
- size_t instrumentation_frames_to_pop_;
-
- DISALLOW_COPY_AND_ASSIGN(InstrumentationStackVisitor);
-};
+ // We reached the frame of the catch handler or the upcall.
+ return false;
+ },
+ self,
+ /* context= */ nullptr,
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames,
+ /* check_suspended */ true,
+ /* include_transitions */ true);
+ return instrumentation_frames_to_pop;
+}
// Finds the appropriate exception catch after calling all method exit instrumentation functions.
// Note that this might change the exception being thrown.
@@ -229,9 +219,8 @@ void QuickExceptionHandler::FindCatch(ObjPtr<mirror::Throwable> exception) {
// Figure out how many of those frames have instrumentation we need to remove (Should be the
// exact same as number of new_pop_count if there aren't inlined frames).
- InstrumentationStackVisitor instrumentation_visitor(self_, handler_frame_depth_);
- instrumentation_visitor.WalkStack(true);
- size_t instrumentation_frames_to_pop = instrumentation_visitor.GetInstrumentationFramesToPop();
+ size_t instrumentation_frames_to_pop =
+ GetInstrumentationFramesToPop(self_, handler_frame_depth_);
if (kDebugExceptionDelivery) {
if (*handler_quick_frame_ == nullptr) {
@@ -647,10 +636,8 @@ uintptr_t QuickExceptionHandler::UpdateInstrumentationStack() {
DCHECK(is_deoptimization_) << "Non-deoptimization handlers should use FindCatch";
uintptr_t return_pc = 0;
if (method_tracing_active_) {
- InstrumentationStackVisitor visitor(self_, handler_frame_depth_);
- visitor.WalkStack(true);
-
- size_t instrumentation_frames_to_pop = visitor.GetInstrumentationFramesToPop();
+ size_t instrumentation_frames_to_pop =
+ GetInstrumentationFramesToPop(self_, handler_frame_depth_);
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
return_pc = instrumentation->PopFramesForDeoptimization(self_, instrumentation_frames_to_pop);
}
@@ -671,53 +658,41 @@ void QuickExceptionHandler::DoLongJump(bool smash_caller_saves) {
UNREACHABLE();
}
-// Prints out methods with their type of frame.
-class DumpFramesWithTypeStackVisitor final : public StackVisitor {
- public:
- explicit DumpFramesWithTypeStackVisitor(Thread* self, bool show_details = false)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- show_details_(show_details) {}
-
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* method = GetMethod();
- if (show_details_) {
- LOG(INFO) << "|> pc = " << std::hex << GetCurrentQuickFramePc();
- LOG(INFO) << "|> addr = " << std::hex << reinterpret_cast<uintptr_t>(GetCurrentQuickFrame());
- if (GetCurrentQuickFrame() != nullptr && method != nullptr) {
- LOG(INFO) << "|> ret = " << std::hex << GetReturnPc();
- }
- }
- if (method == nullptr) {
- // Transition, do go on, we want to unwind over bridges, all the way.
- if (show_details_) {
- LOG(INFO) << "N <transition>";
- }
- return true;
- } else if (method->IsRuntimeMethod()) {
- if (show_details_) {
- LOG(INFO) << "R " << method->PrettyMethod(true);
- }
- return true;
- } else {
- bool is_shadow = GetCurrentShadowFrame() != nullptr;
- LOG(INFO) << (is_shadow ? "S" : "Q")
- << ((!is_shadow && IsInInlinedFrame()) ? "i" : " ")
- << " "
- << method->PrettyMethod(true);
- return true; // Go on.
- }
- }
-
- private:
- bool show_details_;
-
- DISALLOW_COPY_AND_ASSIGN(DumpFramesWithTypeStackVisitor);
-};
-
void QuickExceptionHandler::DumpFramesWithType(Thread* self, bool details) {
- DumpFramesWithTypeStackVisitor visitor(self, details);
- visitor.WalkStack(true);
+ StackVisitor::WalkStack(
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* method = stack_visitor->GetMethod();
+ if (details) {
+ LOG(INFO) << "|> pc = " << std::hex << stack_visitor->GetCurrentQuickFramePc();
+ LOG(INFO) << "|> addr = " << std::hex
+ << reinterpret_cast<uintptr_t>(stack_visitor->GetCurrentQuickFrame());
+ if (stack_visitor->GetCurrentQuickFrame() != nullptr && method != nullptr) {
+ LOG(INFO) << "|> ret = " << std::hex << stack_visitor->GetReturnPc();
+ }
+ }
+ if (method == nullptr) {
+ // Transition, do go on, we want to unwind over bridges, all the way.
+ if (details) {
+ LOG(INFO) << "N <transition>";
+ }
+ return true;
+ } else if (method->IsRuntimeMethod()) {
+ if (details) {
+ LOG(INFO) << "R " << method->PrettyMethod(true);
+ }
+ return true;
+ } else {
+ bool is_shadow = stack_visitor->GetCurrentShadowFrame() != nullptr;
+ LOG(INFO) << (is_shadow ? "S" : "Q")
+ << ((!is_shadow && stack_visitor->IsInInlinedFrame()) ? "i" : " ")
+ << " "
+ << method->PrettyMethod(true);
+ return true; // Go on.
+ }
+ },
+ self,
+ /* context= */ nullptr,
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
}
} // namespace art
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index f016e874ca..9cbbd4172d 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -801,6 +801,8 @@ bool Runtime::Start() {
if (!jit::Jit::LoadCompilerLibrary(&error_msg)) {
LOG(WARNING) << "Failed to load JIT compiler with error " << error_msg;
}
+ CreateJitCodeCache(/*rwx_memory_allowed=*/true);
+ CreateJit();
}
// Send the start phase event. We have to wait till here as this is when the main thread peer
@@ -904,15 +906,8 @@ void Runtime::InitNonZygoteOrPostFork(
}
}
- if (jit_ == nullptr) {
- // The system server's code cache was initialized specially. For other zygote forks or
- // processes create it now.
- if (!is_system_server) {
- CreateJitCodeCache(/*rwx_memory_allowed=*/true);
- }
- // Note that when running ART standalone (not zygote, nor zygote fork),
- // the jit may have already been created.
- CreateJit();
+ if (jit_ != nullptr) {
+ jit_->CreateThreadPool();
}
// Create the thread pools.
@@ -961,127 +956,12 @@ void Runtime::StartDaemonThreads() {
VLOG(startup) << "Runtime::StartDaemonThreads exiting";
}
-// Attempts to open dex files from image(s). Given the image location, try to find the oat file
-// and open it to get the stored dex file. If the image is the first for a multi-image boot
-// classpath, go on and also open the other images.
-static bool OpenDexFilesFromImage(const std::string& image_location,
- std::vector<std::unique_ptr<const DexFile>>* dex_files,
- size_t* failures) {
- DCHECK(dex_files != nullptr) << "OpenDexFilesFromImage: out-param is nullptr";
-
- // Use a work-list approach, so that we can easily reuse the opening code.
- std::vector<std::string> image_locations;
- image_locations.push_back(image_location);
-
- for (size_t index = 0; index < image_locations.size(); ++index) {
- std::string system_filename;
- bool has_system = false;
- std::string cache_filename_unused;
- bool dalvik_cache_exists_unused;
- bool has_cache_unused;
- bool is_global_cache_unused;
- bool found_image = gc::space::ImageSpace::FindImageFilename(image_locations[index].c_str(),
- kRuntimeISA,
- &system_filename,
- &has_system,
- &cache_filename_unused,
- &dalvik_cache_exists_unused,
- &has_cache_unused,
- &is_global_cache_unused);
-
- if (!found_image || !has_system) {
- return false;
- }
-
- // We are falling back to non-executable use of the oat file because patching failed, presumably
- // due to lack of space.
- std::string vdex_filename =
- ImageHeader::GetVdexLocationFromImageLocation(system_filename.c_str());
- std::string oat_filename =
- ImageHeader::GetOatLocationFromImageLocation(system_filename.c_str());
- std::string oat_location =
- ImageHeader::GetOatLocationFromImageLocation(image_locations[index].c_str());
- // Note: in the multi-image case, the image location may end in ".jar," and not ".art." Handle
- // that here.
- if (android::base::EndsWith(oat_location, ".jar")) {
- oat_location.replace(oat_location.length() - 3, 3, "oat");
- }
- std::string error_msg;
-
- std::unique_ptr<VdexFile> vdex_file(VdexFile::Open(vdex_filename,
- /* writable= */ false,
- /* low_4gb= */ false,
- /* unquicken= */ false,
- &error_msg));
- if (vdex_file.get() == nullptr) {
- return false;
- }
-
- std::unique_ptr<File> file(OS::OpenFileForReading(oat_filename.c_str()));
- if (file.get() == nullptr) {
- return false;
- }
- std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file.get(),
- /* writable= */ false,
- /* program_header_only= */ false,
- /* low_4gb= */ false,
- &error_msg));
- if (elf_file.get() == nullptr) {
- return false;
- }
- std::unique_ptr<const OatFile> oat_file(
- OatFile::OpenWithElfFile(/* zip_fd= */ -1,
- elf_file.release(),
- vdex_file.release(),
- oat_location,
- nullptr,
- &error_msg));
- if (oat_file == nullptr) {
- LOG(WARNING) << "Unable to use '" << oat_filename << "' because " << error_msg;
- return false;
- }
-
- for (const OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) {
- if (oat_dex_file == nullptr) {
- *failures += 1;
- continue;
- }
- std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error_msg);
- if (dex_file.get() == nullptr) {
- *failures += 1;
- } else {
- dex_files->push_back(std::move(dex_file));
- }
- }
-
- if (index == 0) {
- // First file. See if this is a multi-image environment, and if so, enqueue the other images.
- const OatHeader& boot_oat_header = oat_file->GetOatHeader();
- const char* boot_cp = boot_oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey);
- if (boot_cp != nullptr) {
- gc::space::ImageSpace::ExtractMultiImageLocations(image_locations[0],
- boot_cp,
- &image_locations);
- }
- }
-
- Runtime::Current()->GetOatFileManager().RegisterOatFile(std::move(oat_file));
- }
- return true;
-}
-
-
static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames,
const std::vector<std::string>& dex_locations,
- const std::string& image_location,
std::vector<std::unique_ptr<const DexFile>>* dex_files) {
DCHECK(dex_files != nullptr) << "OpenDexFiles: out-param is nullptr";
size_t failure_count = 0;
- if (!image_location.empty() && OpenDexFilesFromImage(image_location, dex_files, &failure_count)) {
- return failure_count;
- }
const ArtDexFileLoader dex_file_loader;
- failure_count = 0;
for (size_t i = 0; i < dex_filenames.size(); i++) {
const char* dex_filename = dex_filenames[i].c_str();
const char* dex_location = dex_locations[i].c_str();
@@ -1527,10 +1407,7 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
if (runtime_options.Exists(Opt::BootClassPathDexList)) {
boot_class_path.swap(*runtime_options.GetOrDefault(Opt::BootClassPathDexList));
} else {
- OpenDexFiles(dex_filenames,
- dex_locations,
- runtime_options.GetOrDefault(Opt::Image),
- &boot_class_path);
+ OpenDexFiles(dex_filenames, dex_locations, &boot_class_path);
}
instruction_set_ = runtime_options.GetOrDefault(Opt::ImageInstructionSet);
if (!class_linker_->InitWithoutImage(std::move(boot_class_path), &error_msg)) {
@@ -2503,16 +2380,11 @@ void Runtime::CreateJitCodeCache(bool rwx_memory_allowed) {
return;
}
- // SystemServer has execmem blocked by SELinux so can not use RWX page permissions after the
- // cache initialized.
- jit_options_->SetRWXMemoryAllowed(rwx_memory_allowed);
-
std::string error_msg;
bool profiling_only = !jit_options_->UseJitCompilation();
- jit_code_cache_.reset(jit::JitCodeCache::Create(jit_options_->GetCodeCacheInitialCapacity(),
- jit_options_->GetCodeCacheMaxCapacity(),
- profiling_only,
- jit_options_->RWXMemoryAllowed(),
+ jit_code_cache_.reset(jit::JitCodeCache::Create(profiling_only,
+ rwx_memory_allowed,
+ IsZygote(),
&error_msg));
if (jit_code_cache_.get() == nullptr) {
LOG(WARNING) << "Failed to create JIT Code Cache: " << error_msg;
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 3c057f3c41..0ccc7b79bf 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -634,13 +634,6 @@ class Runtime {
void DeoptimizeBootImage();
bool IsNativeDebuggable() const {
- CHECK(!is_zygote_ || IsAotCompiler());
- return is_native_debuggable_;
- }
-
- // Note: prefer not to use this method, but the checked version above. The separation exists
- // as the runtime state may change for a zygote child.
- bool IsNativeDebuggableZygoteOK() const {
return is_native_debuggable_;
}
@@ -698,7 +691,6 @@ class Runtime {
double GetHashTableMaxLoadFactor() const;
bool IsSafeMode() const {
- CHECK(!is_zygote_);
return safe_mode_;
}
diff --git a/runtime/runtime_options.h b/runtime/runtime_options.h
index 3f5e7762f6..39b44e7872 100644
--- a/runtime/runtime_options.h
+++ b/runtime/runtime_options.h
@@ -23,7 +23,6 @@
#include <vector>
#include "arch/instruction_set.h"
-#include "base/logging.h"
#include "base/variant_map.h"
#include "cmdline_types.h" // TODO: don't need to include this file here
#include "gc/collector_type.h"
diff --git a/runtime/stack.h b/runtime/stack.h
index 02578d25b7..9d30115bb1 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -143,6 +143,36 @@ class StackVisitor {
template <CountTransitions kCount = CountTransitions::kYes>
void WalkStack(bool include_transitions = false) REQUIRES_SHARED(Locks::mutator_lock_);
+ // Convenience helper function to walk the stack with a lambda as a visitor.
+ template <CountTransitions kCountTransitions = CountTransitions::kYes,
+ typename T>
+ ALWAYS_INLINE static void WalkStack(const T& fn,
+ Thread* thread,
+ Context* context,
+ StackWalkKind walk_kind,
+ bool check_suspended = true,
+ bool include_transitions = false)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ class LambdaStackVisitor : public StackVisitor {
+ public:
+ LambdaStackVisitor(const T& fn,
+ Thread* thread,
+ Context* context,
+ StackWalkKind walk_kind,
+ bool check_suspended = true)
+ : StackVisitor(thread, context, walk_kind, check_suspended), fn_(fn) {}
+
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
+ return fn_(this);
+ }
+
+ private:
+ T fn_;
+ };
+ LambdaStackVisitor visitor(fn, thread, context, walk_kind, check_suspended);
+ visitor.template WalkStack<kCountTransitions>(include_transitions);
+ }
+
Thread* GetThread() const {
return thread_;
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index e9fed76d6f..33cd9bbb67 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -3607,42 +3607,34 @@ Context* Thread::GetLongJumpContext() {
return result;
}
-ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc,
+ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc_out,
bool check_suspended,
bool abort_on_error) const {
// Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
// so we don't abort in a special situation (thinlocked monitor) when dumping the Java
// stack.
- struct CurrentMethodVisitor final : public StackVisitor {
- CurrentMethodVisitor(Thread* thread, bool check_suspended, bool abort_on_error)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread,
- /* context= */nullptr,
- StackVisitor::StackWalkKind::kIncludeInlinedFrames,
- check_suspended),
- method_(nullptr),
- dex_pc_(0),
- abort_on_error_(abort_on_error) {}
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = GetMethod();
- if (m->IsRuntimeMethod()) {
- // Continue if this is a runtime method.
- return true;
- }
- method_ = m;
- dex_pc_ = GetDexPc(abort_on_error_);
- return false;
- }
- ArtMethod* method_;
- uint32_t dex_pc_;
- const bool abort_on_error_;
- };
- CurrentMethodVisitor visitor(const_cast<Thread*>(this), check_suspended, abort_on_error);
- visitor.WalkStack(false);
- if (dex_pc != nullptr) {
- *dex_pc = visitor.dex_pc_;
+ ArtMethod* method = nullptr;
+ uint32_t dex_pc = dex::kDexNoIndex;
+ StackVisitor::WalkStack(
+ [&](const StackVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* m = visitor->GetMethod();
+ if (m->IsRuntimeMethod()) {
+ // Continue if this is a runtime method.
+ return true;
+ }
+ method = m;
+ dex_pc = visitor->GetDexPc(abort_on_error);
+ return false;
+ },
+ const_cast<Thread*>(this),
+ /* context= */ nullptr,
+ StackVisitor::StackWalkKind::kIncludeInlinedFrames,
+ check_suspended);
+
+ if (dex_pc_out != nullptr) {
+ *dex_pc_out = dex_pc;
}
- return visitor.method_;
+ return method;
}
bool Thread::HoldsLock(ObjPtr<mirror::Object> object) const {
diff --git a/runtime/ti/agent.cc b/runtime/ti/agent.cc
index 97c39bb3db..033c8acc39 100644
--- a/runtime/ti/agent.cc
+++ b/runtime/ti/agent.cc
@@ -20,6 +20,7 @@
#include "nativehelper/scoped_local_ref.h"
#include "nativeloader/native_loader.h"
+#include "base/logging.h"
#include "base/strlcpy.h"
#include "jni/java_vm_ext.h"
#include "runtime.h"
diff --git a/runtime/ti/agent.h b/runtime/ti/agent.h
index faf76a1334..598c8ffe57 100644
--- a/runtime/ti/agent.h
+++ b/runtime/ti/agent.h
@@ -22,7 +22,8 @@
#include <memory>
-#include "base/logging.h"
+#include <android-base/logging.h>
+#include <android-base/macros.h>
namespace art {
namespace ti {
diff --git a/runtime/trace.cc b/runtime/trace.cc
index ad58c2ea99..f6c36cf989 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -58,32 +58,6 @@ static constexpr uint8_t kOpNewMethod = 1U;
static constexpr uint8_t kOpNewThread = 2U;
static constexpr uint8_t kOpTraceSummary = 3U;
-class BuildStackTraceVisitor : public StackVisitor {
- public:
- explicit BuildStackTraceVisitor(Thread* thread)
- : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- method_trace_(Trace::AllocStackTrace()) {}
-
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = GetMethod();
- // Ignore runtime frames (in particular callee save).
- if (!m->IsRuntimeMethod()) {
- method_trace_->push_back(m);
- }
- return true;
- }
-
- // Returns a stack trace where the topmost frame corresponds with the first element of the vector.
- std::vector<ArtMethod*>* GetStackTrace() const {
- return method_trace_;
- }
-
- private:
- std::vector<ArtMethod*>* const method_trace_;
-
- DISALLOW_COPY_AND_ASSIGN(BuildStackTraceVisitor);
-};
-
static const char kTraceTokenChar = '*';
static const uint16_t kTraceHeaderLength = 32;
static const uint32_t kTraceMagicValue = 0x574f4c53;
@@ -228,9 +202,19 @@ static void Append8LE(uint8_t* buf, uint64_t val) {
}
static void GetSample(Thread* thread, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
- BuildStackTraceVisitor build_trace_visitor(thread);
- build_trace_visitor.WalkStack();
- std::vector<ArtMethod*>* stack_trace = build_trace_visitor.GetStackTrace();
+ std::vector<ArtMethod*>* const stack_trace = Trace::AllocStackTrace();
+ StackVisitor::WalkStack(
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* m = stack_visitor->GetMethod();
+ // Ignore runtime frames (in particular callee save).
+ if (!m->IsRuntimeMethod()) {
+ stack_trace->push_back(m);
+ }
+ return true;
+ },
+ thread,
+ /* context= */ nullptr,
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
Trace* the_trace = reinterpret_cast<Trace*>(arg);
the_trace->CompareAndUpdateStackTrace(thread, stack_trace);
}
diff --git a/test/021-string2/src/Main.java b/test/021-string2/src/Main.java
index c713aa43a6..141a08983b 100644
--- a/test/021-string2/src/Main.java
+++ b/test/021-string2/src/Main.java
@@ -15,14 +15,13 @@
*/
import junit.framework.Assert;
-import java.lang.reflect.Method;
import java.util.Locale;
/**
* more string tests
*/
public class Main {
- public static void main(String args[]) throws Exception {
+ public static void main(String args[]) {
String test = "0123456789";
String test1 = new String("0123456789"); // different object
String test2 = new String("0123456780"); // different value
@@ -86,9 +85,7 @@ public class Main {
Assert.assertEquals("this is a path", test.replaceAll("/", " "));
Assert.assertEquals("this is a path", test.replace("/", " "));
- Class<?> Strings = Class.forName("com.android.org.bouncycastle.util.Strings");
- Method fromUTF8ByteArray = Strings.getDeclaredMethod("fromUTF8ByteArray", byte[].class);
- String result = (String) fromUTF8ByteArray.invoke(null, new byte[] {'O', 'K'});
+ String result = new String(new char[] { 'O', 'K' });
System.out.println(result);
testCompareToAndEquals();
diff --git a/test/137-cfi/cfi.cc b/test/137-cfi/cfi.cc
index 985d27309e..0cb220e49f 100644
--- a/test/137-cfi/cfi.cc
+++ b/test/137-cfi/cfi.cc
@@ -30,6 +30,7 @@
#include <backtrace/Backtrace.h>
#include "base/file_utils.h"
+#include "base/logging.h"
#include "base/macros.h"
#include "base/utils.h"
#include "gc/heap.h"
diff --git a/test/461-get-reference-vreg/get_reference_vreg_jni.cc b/test/461-get-reference-vreg/get_reference_vreg_jni.cc
index ddc86dffa4..817a647805 100644
--- a/test/461-get-reference-vreg/get_reference_vreg_jni.cc
+++ b/test/461-get-reference-vreg/get_reference_vreg_jni.cc
@@ -25,62 +25,50 @@ namespace art {
namespace {
-class TestVisitor : public StackVisitor {
- public:
- TestVisitor(Thread* thread, Context* context, mirror::Object* this_value)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- this_value_(this_value),
- found_method_index_(0) {}
-
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = GetMethod();
- std::string m_name(m->GetName());
-
- if (m_name.compare("$noinline$testThisWithInstanceCall") == 0) {
- found_method_index_ = 1;
- uint32_t value = 0;
- CHECK(GetVReg(m, 1, kReferenceVReg, &value));
- CHECK_EQ(reinterpret_cast<mirror::Object*>(value), this_value_);
- CHECK_EQ(GetThisObject(), this_value_);
- } else if (m_name.compare("$noinline$testThisWithStaticCall") == 0) {
- found_method_index_ = 2;
- uint32_t value = 0;
- CHECK(GetVReg(m, 1, kReferenceVReg, &value));
- } else if (m_name.compare("$noinline$testParameter") == 0) {
- found_method_index_ = 3;
- uint32_t value = 0;
- CHECK(GetVReg(m, 1, kReferenceVReg, &value));
- } else if (m_name.compare("$noinline$testObjectInScope") == 0) {
- found_method_index_ = 4;
- uint32_t value = 0;
- CHECK(GetVReg(m, 0, kReferenceVReg, &value));
- }
-
- return true;
- }
+jint FindMethodIndex(jobject this_value_jobj) {
+ ScopedObjectAccess soa(Thread::Current());
+ std::unique_ptr<Context> context(Context::Create());
+ ObjPtr<mirror::Object> this_value = soa.Decode<mirror::Object>(this_value_jobj);
+ jint found_method_index = 0;
+ StackVisitor::WalkStack(
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* m = stack_visitor->GetMethod();
+ std::string m_name(m->GetName());
- mirror::Object* this_value_;
+ if (m_name.compare("$noinline$testThisWithInstanceCall") == 0) {
+ found_method_index = 1;
+ uint32_t value = 0;
+ CHECK(stack_visitor->GetVReg(m, 1, kReferenceVReg, &value));
+ CHECK_EQ(reinterpret_cast<mirror::Object*>(value), this_value);
+ CHECK_EQ(stack_visitor->GetThisObject(), this_value);
+ } else if (m_name.compare("$noinline$testThisWithStaticCall") == 0) {
+ found_method_index = 2;
+ uint32_t value = 0;
+ CHECK(stack_visitor->GetVReg(m, 1, kReferenceVReg, &value));
+ } else if (m_name.compare("$noinline$testParameter") == 0) {
+ found_method_index = 3;
+ uint32_t value = 0;
+ CHECK(stack_visitor->GetVReg(m, 1, kReferenceVReg, &value));
+ } else if (m_name.compare("$noinline$testObjectInScope") == 0) {
+ found_method_index = 4;
+ uint32_t value = 0;
+ CHECK(stack_visitor->GetVReg(m, 0, kReferenceVReg, &value));
+ }
- // Value returned to Java to ensure the methods testSimpleVReg and testPairVReg
- // have been found and tested.
- jint found_method_index_;
-};
+ return true;
+ },
+ soa.Self(),
+ context.get(),
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+ return found_method_index;
+}
extern "C" JNIEXPORT jint JNICALL Java_Main_doNativeCallRef(JNIEnv*, jobject value) {
- ScopedObjectAccess soa(Thread::Current());
- std::unique_ptr<Context> context(Context::Create());
- TestVisitor visitor(soa.Self(), context.get(), soa.Decode<mirror::Object>(value).Ptr());
- visitor.WalkStack();
- return visitor.found_method_index_;
+ return FindMethodIndex(value);
}
extern "C" JNIEXPORT jint JNICALL Java_Main_doStaticNativeCallRef(JNIEnv*, jclass) {
- ScopedObjectAccess soa(Thread::Current());
- std::unique_ptr<Context> context(Context::Create());
- TestVisitor visitor(soa.Self(), context.get(), nullptr);
- visitor.WalkStack();
- return visitor.found_method_index_;
+ return FindMethodIndex(nullptr);
}
} // namespace
diff --git a/test/543-env-long-ref/env_long_ref.cc b/test/543-env-long-ref/env_long_ref.cc
index 165f5bf412..1885f8d9f5 100644
--- a/test/543-env-long-ref/env_long_ref.cc
+++ b/test/543-env-long-ref/env_long_ref.cc
@@ -23,44 +23,28 @@
namespace art {
-namespace {
-
-class TestVisitor : public StackVisitor {
- public:
- TestVisitor(const ScopedObjectAccess& soa, Context* context, jobject expected_value)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(soa.Self(), context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- expected_value_(expected_value),
- found_(false),
- soa_(soa) {}
-
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = GetMethod();
- std::string m_name(m->GetName());
-
- if (m_name == "testCase") {
- found_ = true;
- uint32_t value = 0;
- CHECK(GetVReg(m, 1, kReferenceVReg, &value));
- CHECK_EQ(reinterpret_cast<mirror::Object*>(value),
- soa_.Decode<mirror::Object>(expected_value_).Ptr());
- }
- return true;
- }
-
- jobject expected_value_;
- bool found_;
- const ScopedObjectAccess& soa_;
-};
-
-} // namespace
-
extern "C" JNIEXPORT void JNICALL Java_Main_lookForMyRegisters(JNIEnv*, jclass, jobject value) {
ScopedObjectAccess soa(Thread::Current());
std::unique_ptr<Context> context(Context::Create());
- TestVisitor visitor(soa, context.get(), value);
- visitor.WalkStack();
- CHECK(visitor.found_);
+ bool found = false;
+ StackVisitor::WalkStack(
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* m = stack_visitor->GetMethod();
+ std::string m_name(m->GetName());
+
+ if (m_name == "testCase") {
+ found = true;
+ uint32_t stack_value = 0;
+ CHECK(stack_visitor->GetVReg(m, 1, kReferenceVReg, &stack_value));
+ CHECK_EQ(reinterpret_cast<mirror::Object*>(stack_value),
+ soa.Decode<mirror::Object>(value).Ptr());
+ }
+ return true;
+ },
+ soa.Self(),
+ context.get(),
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+ CHECK(found);
}
} // namespace art
diff --git a/test/570-checker-osr/osr.cc b/test/570-checker-osr/osr.cc
index 7b88842fcc..b2b363447f 100644
--- a/test/570-checker-osr/osr.cc
+++ b/test/570-checker-osr/osr.cc
@@ -23,39 +23,33 @@
#include "scoped_thread_state_change-inl.h"
#include "stack.h"
#include "stack_map.h"
+#include "thread-current-inl.h"
namespace art {
-class OsrVisitor : public StackVisitor {
- public:
- explicit OsrVisitor(Thread* thread, const char* method_name)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- method_name_(method_name),
- in_osr_method_(false),
- in_interpreter_(false) {}
+namespace {
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = GetMethod();
- std::string m_name(m->GetName());
-
- if (m_name.compare(method_name_) == 0) {
- const OatQuickMethodHeader* header =
- Runtime::Current()->GetJit()->GetCodeCache()->LookupOsrMethodHeader(m);
- if (header != nullptr && header == GetCurrentOatQuickMethodHeader()) {
- in_osr_method_ = true;
- } else if (IsShadowFrame()) {
- in_interpreter_ = true;
- }
- return false;
- }
- return true;
- }
+template <typename Handler>
+void ProcessMethodWithName(JNIEnv* env, jstring method_name, const Handler& handler) {
+ ScopedUtfChars chars(env, method_name);
+ CHECK(chars.c_str() != nullptr);
+ ScopedObjectAccess soa(Thread::Current());
+ StackVisitor::WalkStack(
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ std::string m_name(stack_visitor->GetMethod()->GetName());
+
+ if (m_name.compare(chars.c_str()) == 0) {
+ handler(stack_visitor);
+ return false;
+ }
+ return true;
+ },
+ soa.Self(),
+ /* context= */ nullptr,
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+}
- const char* const method_name_;
- bool in_osr_method_;
- bool in_interpreter_;
-};
+} // namespace
extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInOsrCode(JNIEnv* env,
jclass,
@@ -65,12 +59,19 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInOsrCode(JNIEnv* env,
// Just return true for non-jit configurations to stop the infinite loop.
return JNI_TRUE;
}
- ScopedUtfChars chars(env, method_name);
- CHECK(chars.c_str() != nullptr);
- ScopedObjectAccess soa(Thread::Current());
- OsrVisitor visitor(soa.Self(), chars.c_str());
- visitor.WalkStack();
- return visitor.in_osr_method_;
+ bool in_osr_code = false;
+ ProcessMethodWithName(
+ env,
+ method_name,
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* m = stack_visitor->GetMethod();
+ const OatQuickMethodHeader* header =
+ Runtime::Current()->GetJit()->GetCodeCache()->LookupOsrMethodHeader(m);
+ if (header != nullptr && header == stack_visitor->GetCurrentOatQuickMethodHeader()) {
+ in_osr_code = true;
+ }
+ });
+ return in_osr_code;
}
extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInInterpreter(JNIEnv* env,
@@ -80,86 +81,56 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInInterpreter(JNIEnv* env,
// The return value is irrelevant if we're not using JIT.
return false;
}
- ScopedUtfChars chars(env, method_name);
- CHECK(chars.c_str() != nullptr);
- ScopedObjectAccess soa(Thread::Current());
- OsrVisitor visitor(soa.Self(), chars.c_str());
- visitor.WalkStack();
- return visitor.in_interpreter_;
+ bool in_interpreter = false;
+ ProcessMethodWithName(
+ env,
+ method_name,
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* m = stack_visitor->GetMethod();
+ const OatQuickMethodHeader* header =
+ Runtime::Current()->GetJit()->GetCodeCache()->LookupOsrMethodHeader(m);
+ if ((header == nullptr || header != stack_visitor->GetCurrentOatQuickMethodHeader()) &&
+ stack_visitor->IsShadowFrame()) {
+ in_interpreter = true;
+ }
+ });
+ return in_interpreter;
}
-class ProfilingInfoVisitor : public StackVisitor {
- public:
- explicit ProfilingInfoVisitor(Thread* thread, const char* method_name)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- method_name_(method_name) {}
-
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = GetMethod();
- std::string m_name(m->GetName());
-
- if (m_name.compare(method_name_) == 0) {
- ProfilingInfo::Create(Thread::Current(), m, /* retry_allocation */ true);
- return false;
- }
- return true;
- }
-
- const char* const method_name_;
-};
-
extern "C" JNIEXPORT void JNICALL Java_Main_ensureHasProfilingInfo(JNIEnv* env,
jclass,
jstring method_name) {
if (!Runtime::Current()->UseJitCompilation()) {
return;
}
- ScopedUtfChars chars(env, method_name);
- CHECK(chars.c_str() != nullptr);
- ScopedObjectAccess soa(Thread::Current());
- ProfilingInfoVisitor visitor(soa.Self(), chars.c_str());
- visitor.WalkStack();
+ ProcessMethodWithName(
+ env,
+ method_name,
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* m = stack_visitor->GetMethod();
+ ProfilingInfo::Create(Thread::Current(), m, /* retry_allocation */ true);
+ });
}
-class OsrCheckVisitor : public StackVisitor {
- public:
- OsrCheckVisitor(Thread* thread, const char* method_name)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- method_name_(method_name) {}
-
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = GetMethod();
- std::string m_name(m->GetName());
-
- jit::Jit* jit = Runtime::Current()->GetJit();
- if (m_name.compare(method_name_) == 0) {
- while (jit->GetCodeCache()->LookupOsrMethodHeader(m) == nullptr) {
- // Sleep to yield to the compiler thread.
- usleep(1000);
- // Will either ensure it's compiled or do the compilation itself.
- jit->CompileMethod(m, Thread::Current(), /* osr */ true);
- }
- return false;
- }
- return true;
- }
-
- const char* const method_name_;
-};
-
extern "C" JNIEXPORT void JNICALL Java_Main_ensureHasOsrCode(JNIEnv* env,
jclass,
jstring method_name) {
if (!Runtime::Current()->UseJitCompilation()) {
return;
}
- ScopedUtfChars chars(env, method_name);
- CHECK(chars.c_str() != nullptr);
- ScopedObjectAccess soa(Thread::Current());
- OsrCheckVisitor visitor(soa.Self(), chars.c_str());
- visitor.WalkStack();
+ ProcessMethodWithName(
+ env,
+ method_name,
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* m = stack_visitor->GetMethod();
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ while (jit->GetCodeCache()->LookupOsrMethodHeader(m) == nullptr) {
+ // Sleep to yield to the compiler thread.
+ usleep(1000);
+ // Will either ensure it's compiled or do the compilation itself.
+ jit->CompileMethod(m, Thread::Current(), /* osr */ true);
+ }
+ });
}
} // namespace art
diff --git a/test/common/stack_inspect.cc b/test/common/stack_inspect.cc
index 581aa74d4e..393e773275 100644
--- a/test/common/stack_inspect.cc
+++ b/test/common/stack_inspect.cc
@@ -66,42 +66,30 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInterpretedAt(JNIEnv* env,
// public static native boolean isInterpretedFunction(String smali);
-// TODO Remove 'allow_runtime_frames' option once we have deoptimization through runtime frames.
-struct MethodIsInterpretedVisitor : public StackVisitor {
- public:
- MethodIsInterpretedVisitor(Thread* thread, ArtMethod* goal, bool require_deoptable)
- : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- goal_(goal),
- method_is_interpreted_(true),
- method_found_(false),
- prev_was_runtime_(true),
- require_deoptable_(require_deoptable) {}
-
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- if (goal_ == GetMethod()) {
- method_is_interpreted_ = (require_deoptable_ && prev_was_runtime_) || IsShadowFrame();
- method_found_ = true;
- return false;
- }
- prev_was_runtime_ = GetMethod()->IsRuntimeMethod();
- return true;
- }
-
- bool IsInterpreted() {
- return method_is_interpreted_;
- }
-
- bool IsFound() {
- return method_found_;
- }
-
- private:
- const ArtMethod* goal_;
- bool method_is_interpreted_;
- bool method_found_;
- bool prev_was_runtime_;
- bool require_deoptable_;
-};
+static bool IsMethodInterpreted(Thread* self,
+ const ArtMethod* goal,
+ const bool require_deoptable,
+ /* out */ bool* method_is_interpreted)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ *method_is_interpreted = true;
+ bool method_found = false;
+ bool prev_was_runtime = true;
+ StackVisitor::WalkStack(
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (goal == stack_visitor->GetMethod()) {
+ *method_is_interpreted =
+ (require_deoptable && prev_was_runtime) || stack_visitor->IsShadowFrame();
+ method_found = true;
+ return false;
+ }
+ prev_was_runtime = stack_visitor->GetMethod()->IsRuntimeMethod();
+ return true;
+ },
+ self,
+ /* context= */ nullptr,
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+ return method_found;
+}
// TODO Remove 'require_deoptimizable' option once we have deoptimization through runtime frames.
extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInterpretedFunction(
@@ -119,23 +107,18 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInterpretedFunction(
env->ThrowNew(env->FindClass("java/lang/Error"), "Unable to interpret method argument!");
return JNI_FALSE;
}
- bool result;
- bool found;
{
ScopedObjectAccess soa(env);
ArtMethod* goal = jni::DecodeArtMethod(id);
- MethodIsInterpretedVisitor v(soa.Self(), goal, require_deoptimizable);
- v.WalkStack();
+ bool is_interpreted;
+ if (!IsMethodInterpreted(soa.Self(), goal, require_deoptimizable, &is_interpreted)) {
+ env->ThrowNew(env->FindClass("java/lang/Error"), "Unable to find given method in stack!");
+ return JNI_FALSE;
+ }
bool enters_interpreter = Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(
goal->GetEntryPointFromQuickCompiledCode());
- result = (v.IsInterpreted() || enters_interpreter);
- found = v.IsFound();
- }
- if (!found) {
- env->ThrowNew(env->FindClass("java/lang/Error"), "Unable to find given method in stack!");
- return JNI_FALSE;
+ return (is_interpreted || enters_interpreter);
}
- return result;
}
// public static native void assertIsInterpreted();
@@ -196,24 +179,24 @@ extern "C" JNIEXPORT void JNICALL Java_Main_assertCallerIsManaged(JNIEnv* env, j
}
}
-struct GetCallingFrameVisitor : public StackVisitor {
- GetCallingFrameVisitor(Thread* thread, Context* context)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
-
- bool VisitFrame() override NO_THREAD_SAFETY_ANALYSIS {
- // Discard stubs and Main.getThisOfCaller.
- return GetMethod() == nullptr || GetMethod()->IsNative();
- }
-};
-
extern "C" JNIEXPORT jobject JNICALL Java_Main_getThisOfCaller(
JNIEnv* env, jclass cls ATTRIBUTE_UNUSED) {
ScopedObjectAccess soa(env);
std::unique_ptr<art::Context> context(art::Context::Create());
- GetCallingFrameVisitor visitor(soa.Self(), context.get());
- visitor.WalkStack();
- return soa.AddLocalReference<jobject>(visitor.GetThisObject());
+ jobject result = nullptr;
+ StackVisitor::WalkStack(
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Discard stubs and Main.getThisOfCaller.
+ if (stack_visitor->GetMethod() == nullptr || stack_visitor->GetMethod()->IsNative()) {
+ return true;
+ }
+ result = soa.AddLocalReference<jobject>(stack_visitor->GetThisObject());
+ return false;
+ },
+ soa.Self(),
+ context.get(),
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+ return result;
}
} // namespace art
diff --git a/tools/hiddenapi/hiddenapi.cc b/tools/hiddenapi/hiddenapi.cc
index 7f4c546df5..7c93a8b283 100644
--- a/tools/hiddenapi/hiddenapi.cc
+++ b/tools/hiddenapi/hiddenapi.cc
@@ -1060,7 +1060,7 @@ class HiddenApi final {
// Write into public/private API files.
std::ofstream file_public(out_public_path_.c_str());
std::ofstream file_private(out_private_path_.c_str());
- for (const std::pair<std::string, bool> entry : boot_members) {
+ for (const std::pair<const std::string, bool>& entry : boot_members) {
if (entry.second) {
file_public << entry.first << std::endl;
} else {