summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/apex/Android.bp4
-rw-r--r--build/apex/com.android.runtime.avbpubkey (renamed from build/apex/runtime.avbpubkey)bin1032 -> 1032 bytes
-rw-r--r--build/apex/com.android.runtime.pem (renamed from build/apex/runtime.pem)0
-rw-r--r--compiler/jit/jit_compiler.cc125
-rw-r--r--compiler/jit/jit_compiler.h8
-rw-r--r--compiler/optimizing/intrinsics_x86.cc107
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc99
-rw-r--r--dex2oat/dex2oat.cc13
-rw-r--r--dex2oat/dex2oat_test.cc59
-rw-r--r--dex2oat/linker/image_test.cc16
-rw-r--r--dex2oat/linker/image_test.h1
-rw-r--r--dex2oat/linker/image_writer.cc54
-rw-r--r--dex2oat/linker/image_writer.h9
-rw-r--r--dex2oat/linker/oat_writer.cc5
-rw-r--r--dexlayout/dexlayout.cc26
-rw-r--r--dt_fd_forward/Android.bp1
-rw-r--r--dt_fd_forward/dt_fd_forward.cc4
-rw-r--r--libartbase/Android.bp1
-rw-r--r--libartbase/base/hiddenapi_flags.cc26
-rw-r--r--libartbase/base/hiddenapi_flags.h131
-rw-r--r--libdexfile/dex/class_accessor-inl.h5
-rw-r--r--libdexfile/dex/dex_file.h1
-rw-r--r--libdexfile/dex/dex_file_layout.cc1
-rw-r--r--libdexfile/dex/dex_file_verifier.cc2
-rw-r--r--libdexfile/dex/hidden_api_access_flags.h80
-rw-r--r--libdexfile/dex/test_dex_file_builder.h1
-rw-r--r--oatdump/oatdump.cc4
-rw-r--r--openjdkjvmti/ti_logging.cc7
-rw-r--r--openjdkjvmti/ti_stack.cc90
-rw-r--r--runtime/art_field-inl.h4
-rw-r--r--runtime/art_field.h4
-rw-r--r--runtime/art_method-inl.h26
-rw-r--r--runtime/art_method.h30
-rw-r--r--runtime/base/mutex.h8
-rw-r--r--runtime/class_linker-inl.h4
-rw-r--r--runtime/class_linker.cc282
-rw-r--r--runtime/class_linker.h4
-rw-r--r--runtime/debugger.cc530
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h8
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc2
-rw-r--r--runtime/gc/accounting/mod_union_table_test.cc2
-rw-r--r--runtime/gc/allocation_record.cc47
-rw-r--r--runtime/gc/collector/immune_spaces_test.cc18
-rw-r--r--runtime/gc/space/image_space.cc300
-rw-r--r--runtime/hidden_api.cc26
-rw-r--r--runtime/hidden_api.h10
-rw-r--r--runtime/hidden_api_test.cc38
-rw-r--r--runtime/image.cc6
-rw-r--r--runtime/image.h60
-rw-r--r--runtime/instrumentation.cc121
-rw-r--r--runtime/interpreter/interpreter_cache.h1
-rw-r--r--runtime/interpreter/mterp/arm/arithmetic.S60
-rw-r--r--runtime/interpreter/mterp/arm/array.S4
-rw-r--r--runtime/interpreter/mterp/arm/control_flow.S2
-rw-r--r--runtime/interpreter/mterp/arm/floating_point.S46
-rw-r--r--runtime/interpreter/mterp/arm/main.S19
-rw-r--r--runtime/interpreter/mterp/arm/object.S4
-rw-r--r--runtime/interpreter/mterp/arm/other.S22
-rw-r--r--runtime/interpreter/mterp/arm64/floating_point.S89
-rw-r--r--runtime/interpreter/mterp/arm64/main.S15
-rw-r--r--runtime/interpreter/mterp/x86/arithmetic.S52
-rw-r--r--runtime/interpreter/mterp/x86/floating_point.S10
-rw-r--r--runtime/interpreter/mterp/x86/main.S13
-rw-r--r--runtime/interpreter/mterp/x86_64/arithmetic.S54
-rw-r--r--runtime/interpreter/mterp/x86_64/floating_point.S14
-rw-r--r--runtime/interpreter/mterp/x86_64/main.S13
-rw-r--r--runtime/jit/jit.cc87
-rw-r--r--runtime/jit/jit.h23
-rw-r--r--runtime/jit/jit_code_cache.cc308
-rw-r--r--runtime/jit/jit_code_cache.h36
-rw-r--r--runtime/jit/profile_saver.cc2
-rw-r--r--runtime/monitor.cc71
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc15
-rw-r--r--runtime/parsed_options.cc19
-rw-r--r--runtime/quick_exception_handler.cc159
-rw-r--r--runtime/runtime.cc144
-rw-r--r--runtime/runtime.h8
-rw-r--r--runtime/stack.h30
-rw-r--r--runtime/thread.cc52
-rw-r--r--runtime/trace.cc42
-rw-r--r--runtime/vdex_file.cc1
-rw-r--r--test/021-string2/src/Main.java7
-rw-r--r--test/461-get-reference-vreg/get_reference_vreg_jni.cc86
-rw-r--r--test/543-env-long-ref/env_long_ref.cc54
-rw-r--r--test/570-checker-osr/osr.cc167
-rw-r--r--test/674-hiddenapi/hiddenapi.cc3
-rw-r--r--test/common/stack_inspect.cc105
-rw-r--r--tools/hiddenapi/hiddenapi.cc17
-rw-r--r--tools/hiddenapi/hiddenapi_test.cc56
-rw-r--r--tools/veridex/hidden_api.h16
-rw-r--r--tools/veridex/hidden_api_finder.cc8
-rw-r--r--tools/veridex/precise_hidden_api_finder.cc5
-rw-r--r--tools/veridex/veridex.cc8
93 files changed, 2213 insertions, 2144 deletions
diff --git a/build/apex/Android.bp b/build/apex/Android.bp
index bfaacb10fa..159e5c1992 100644
--- a/build/apex/Android.bp
+++ b/build/apex/Android.bp
@@ -68,8 +68,8 @@ art_tools_host_binaries = [
apex_key {
name: "com.android.runtime.key",
- public_key: "runtime.avbpubkey",
- private_key: "runtime.pem",
+ public_key: "com.android.runtime.avbpubkey",
+ private_key: "com.android.runtime.pem",
}
// TODO: Introduce `apex_defaults` to factor common parts of `apex`
diff --git a/build/apex/runtime.avbpubkey b/build/apex/com.android.runtime.avbpubkey
index b0ffc9b11f..b0ffc9b11f 100644
--- a/build/apex/runtime.avbpubkey
+++ b/build/apex/com.android.runtime.avbpubkey
Binary files differ
diff --git a/build/apex/runtime.pem b/build/apex/com.android.runtime.pem
index 4c7ce4b30b..4c7ce4b30b 100644
--- a/build/apex/runtime.pem
+++ b/build/apex/com.android.runtime.pem
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index bb35065921..0eab8356e7 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -26,7 +26,6 @@
#include "base/systrace.h"
#include "base/time_utils.h"
#include "base/timing_logger.h"
-#include "base/unix_file/fd_file.h"
#include "debug/elf_debug_writer.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
@@ -34,11 +33,6 @@
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "jit/jit_logger.h"
-#include "oat_file-inl.h"
-#include "oat_quick_method_header.h"
-#include "object_lock.h"
-#include "optimizing/register_allocator.h"
-#include "thread_list.h"
namespace art {
namespace jit {
@@ -47,46 +41,7 @@ JitCompiler* JitCompiler::Create() {
return new JitCompiler();
}
-extern "C" void* jit_load(bool* generate_debug_info) {
- VLOG(jit) << "loading jit compiler";
- auto* const jit_compiler = JitCompiler::Create();
- CHECK(jit_compiler != nullptr);
- *generate_debug_info = jit_compiler->GetCompilerOptions().GetGenerateDebugInfo();
- VLOG(jit) << "Done loading jit compiler";
- return jit_compiler;
-}
-
-extern "C" void jit_unload(void* handle) {
- DCHECK(handle != nullptr);
- delete reinterpret_cast<JitCompiler*>(handle);
-}
-
-extern "C" bool jit_compile_method(
- void* handle, ArtMethod* method, Thread* self, bool osr)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
- DCHECK(jit_compiler != nullptr);
- return jit_compiler->CompileMethod(self, method, osr);
-}
-
-extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t count)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
- DCHECK(jit_compiler != nullptr);
- const CompilerOptions& compiler_options = jit_compiler->GetCompilerOptions();
- if (compiler_options.GetGenerateDebugInfo()) {
- const ArrayRef<mirror::Class*> types_array(types, count);
- std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForClasses(
- kRuntimeISA, compiler_options.GetInstructionSetFeatures(), types_array);
- MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
- // We never free debug info for types, so we don't need to provide a handle
- // (which would have been otherwise used as identifier to remove it later).
- AddNativeDebugInfoForJit(nullptr /* handle */, elf_file);
- }
-}
-
-JitCompiler::JitCompiler() {
- compiler_options_.reset(new CompilerOptions());
+void JitCompiler::ParseCompilerOptions() {
// Special case max code units for inlining, whose default is "unset" (implictly
// meaning no limit). Do this before parsing the actual passed options.
compiler_options_->SetInlineMaxCodeUnits(CompilerOptions::kDefaultInlineMaxCodeUnits);
@@ -94,8 +49,8 @@ JitCompiler::JitCompiler() {
{
std::string error_msg;
if (!compiler_options_->ParseCompilerOptions(runtime->GetCompilerOptions(),
- /*ignore_unrecognized=*/ true,
- &error_msg)) {
+ /*ignore_unrecognized=*/ true,
+ &error_msg)) {
LOG(FATAL) << error_msg;
UNREACHABLE();
}
@@ -103,8 +58,11 @@ JitCompiler::JitCompiler() {
// JIT is never PIC, no matter what the runtime compiler options specify.
compiler_options_->SetNonPic();
- // Set debuggability based on the runtime value.
- compiler_options_->SetDebuggable(runtime->IsJavaDebuggable());
+ // If the options don't provide whether we generate debuggable code, set
+ // debuggability based on the runtime value.
+ if (!compiler_options_->GetDebuggable()) {
+ compiler_options_->SetDebuggable(runtime->IsJavaDebuggable());
+ }
const InstructionSet instruction_set = compiler_options_->GetInstructionSet();
if (kRuntimeISA == InstructionSet::kArm) {
@@ -148,6 +106,65 @@ JitCompiler::JitCompiler() {
compiler_options_->compiling_with_core_image_ =
CompilerDriver::IsCoreImageFilename(runtime->GetImageLocation());
+ if (compiler_options_->GetGenerateDebugInfo()) {
+ jit_logger_.reset(new JitLogger());
+ jit_logger_->OpenLog();
+ }
+}
+
+extern "C" void* jit_load() {
+ VLOG(jit) << "Create jit compiler";
+ auto* const jit_compiler = JitCompiler::Create();
+ CHECK(jit_compiler != nullptr);
+ VLOG(jit) << "Done creating jit compiler";
+ return jit_compiler;
+}
+
+extern "C" void jit_unload(void* handle) {
+ DCHECK(handle != nullptr);
+ delete reinterpret_cast<JitCompiler*>(handle);
+}
+
+extern "C" bool jit_compile_method(
+ void* handle, ArtMethod* method, Thread* self, bool osr)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
+ DCHECK(jit_compiler != nullptr);
+ return jit_compiler->CompileMethod(self, method, osr);
+}
+
+extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t count)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
+ DCHECK(jit_compiler != nullptr);
+ const CompilerOptions& compiler_options = jit_compiler->GetCompilerOptions();
+ if (compiler_options.GetGenerateDebugInfo()) {
+ const ArrayRef<mirror::Class*> types_array(types, count);
+ std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForClasses(
+ kRuntimeISA, compiler_options.GetInstructionSetFeatures(), types_array);
+ MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
+ // We never free debug info for types, so we don't need to provide a handle
+ // (which would have been otherwise used as identifier to remove it later).
+ AddNativeDebugInfoForJit(nullptr /* handle */, elf_file);
+ }
+}
+
+extern "C" void jit_update_options(void* handle) {
+ JitCompiler* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
+ DCHECK(jit_compiler != nullptr);
+ jit_compiler->ParseCompilerOptions();
+}
+
+extern "C" bool jit_generate_debug_info(void* handle) {
+ JitCompiler* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
+ DCHECK(jit_compiler != nullptr);
+ return jit_compiler->GetCompilerOptions().GetGenerateDebugInfo();
+}
+
+JitCompiler::JitCompiler() {
+ compiler_options_.reset(new CompilerOptions());
+ ParseCompilerOptions();
+
compiler_driver_.reset(new CompilerDriver(
compiler_options_.get(),
/* verification_results */ nullptr,
@@ -157,14 +174,6 @@ JitCompiler::JitCompiler() {
/* swap_fd */ -1));
// Disable dedupe so we can remove compiled methods.
compiler_driver_->SetDedupeEnabled(false);
-
- size_t thread_count = compiler_driver_->GetThreadCount();
- if (compiler_options_->GetGenerateDebugInfo()) {
- DCHECK_EQ(thread_count, 1u)
- << "Generating debug info only works with one compiler thread";
- jit_logger_.reset(new JitLogger());
- jit_logger_->OpenLog();
- }
}
JitCompiler::~JitCompiler() {
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index 5840fece2e..d201611d79 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -43,10 +43,13 @@ class JitCompiler {
const CompilerOptions& GetCompilerOptions() const {
return *compiler_options_.get();
}
+
CompilerDriver* GetCompilerDriver() const {
return compiler_driver_.get();
}
+ void ParseCompilerOptions();
+
private:
std::unique_ptr<CompilerOptions> compiler_options_;
std::unique_ptr<CompilerDriver> compiler_driver_;
@@ -54,11 +57,6 @@ class JitCompiler {
JitCompiler();
- // This is in the compiler since the runtime doesn't have access to the compiled method
- // structures.
- bool AddToCodeCache(ArtMethod* method, const CompiledMethod* compiled_method)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
DISALLOW_COPY_AND_ASSIGN(JitCompiler);
};
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 6dd4681847..a73f4e8b94 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -545,6 +545,96 @@ static void GenFPToFPCall(HInvoke* invoke, CodeGeneratorX86* codegen, QuickEntry
__ cfi().AdjustCFAOffset(-16);
}
+static void CreateLowestOneBitLocations(ArenaAllocator* allocator, bool is_long, HInvoke* invoke) {
+ LocationSummary* locations =
+ new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
+ if (is_long) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ } else {
+ locations->SetInAt(0, Location::Any());
+ }
+ locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
+}
+
+static void GenLowestOneBit(X86Assembler* assembler,
+ CodeGeneratorX86* codegen,
+ bool is_long,
+ HInvoke* invoke) {
+ LocationSummary* locations = invoke->GetLocations();
+ Location src = locations->InAt(0);
+ Location out_loc = locations->Out();
+
+ if (invoke->InputAt(0)->IsConstant()) {
+ // Evaluate this at compile time.
+ int64_t value = Int64FromConstant(invoke->InputAt(0)->AsConstant());
+ if (value == 0) {
+ if (is_long) {
+ __ xorl(out_loc.AsRegisterPairLow<Register>(), out_loc.AsRegisterPairLow<Register>());
+ __ xorl(out_loc.AsRegisterPairHigh<Register>(), out_loc.AsRegisterPairHigh<Register>());
+ } else {
+ __ xorl(out_loc.AsRegister<Register>(), out_loc.AsRegister<Register>());
+ }
+ return;
+ }
+ // Nonzero value.
+ value = is_long ? CTZ(static_cast<uint64_t>(value))
+ : CTZ(static_cast<uint32_t>(value));
+ if (is_long) {
+ if (value >= 32) {
+ int shift = value-32;
+ codegen->Load32BitValue(out_loc.AsRegisterPairLow<Register>(), 0);
+ codegen->Load32BitValue(out_loc.AsRegisterPairHigh<Register>(), 1 << shift);
+ } else {
+ codegen->Load32BitValue(out_loc.AsRegisterPairLow<Register>(), 1 << value);
+ codegen->Load32BitValue(out_loc.AsRegisterPairHigh<Register>(), 0);
+ }
+ } else {
+ codegen->Load32BitValue(out_loc.AsRegister<Register>(), 1 << value);
+ }
+ return;
+ }
+ // Handle non constant case
+ if (is_long) {
+ DCHECK(src.IsRegisterPair());
+ Register src_lo = src.AsRegisterPairLow<Register>();
+ Register src_hi = src.AsRegisterPairHigh<Register>();
+
+ Register out_lo = out_loc.AsRegisterPairLow<Register>();
+ Register out_hi = out_loc.AsRegisterPairHigh<Register>();
+
+ __ movl(out_lo, src_lo);
+ __ movl(out_hi, src_hi);
+
+ __ negl(out_lo);
+ __ adcl(out_hi, Immediate(0));
+ __ negl(out_hi);
+
+ __ andl(out_lo, src_lo);
+ __ andl(out_hi, src_hi);
+ } else {
+ if (codegen->GetInstructionSetFeatures().HasAVX2() && src.IsRegister()) {
+ Register out = out_loc.AsRegister<Register>();
+ __ blsi(out, src.AsRegister<Register>());
+ } else {
+ Register out = out_loc.AsRegister<Register>();
+ // Do tmp & -tmp
+ if (src.IsRegister()) {
+ __ movl(out, src.AsRegister<Register>());
+ } else {
+ DCHECK(src.IsStackSlot());
+ __ movl(out, Address(ESP, src.GetStackIndex()));
+ }
+ __ negl(out);
+
+ if (src.IsRegister()) {
+ __ andl(out, src.AsRegister<Register>());
+ } else {
+ __ andl(out, Address(ESP, src.GetStackIndex()));
+ }
+ }
+ }
+}
+
void IntrinsicLocationsBuilderX86::VisitMathCos(HInvoke* invoke) {
CreateFPToFPCallLocations(allocator_, invoke);
}
@@ -657,6 +747,21 @@ void IntrinsicCodeGeneratorX86::VisitMathTanh(HInvoke* invoke) {
GenFPToFPCall(invoke, codegen_, kQuickTanh);
}
+void IntrinsicLocationsBuilderX86::VisitIntegerLowestOneBit(HInvoke* invoke) {
+ CreateLowestOneBitLocations(allocator_, /*is_long=*/ false, invoke);
+}
+void IntrinsicCodeGeneratorX86::VisitIntegerLowestOneBit(HInvoke* invoke) {
+ GenLowestOneBit(GetAssembler(), codegen_, /*is_long=*/ false, invoke);
+}
+
+void IntrinsicLocationsBuilderX86::VisitLongLowestOneBit(HInvoke* invoke) {
+ CreateLowestOneBitLocations(allocator_, /*is_long=*/ true, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitLongLowestOneBit(HInvoke* invoke) {
+ GenLowestOneBit(GetAssembler(), codegen_, /*is_long=*/ true, invoke);
+}
+
static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
LocationSummary* locations =
new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
@@ -2965,8 +3070,6 @@ UNIMPLEMENTED_INTRINSIC(X86, FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86, DoubleIsInfinite)
UNIMPLEMENTED_INTRINSIC(X86, IntegerHighestOneBit)
UNIMPLEMENTED_INTRINSIC(X86, LongHighestOneBit)
-UNIMPLEMENTED_INTRINSIC(X86, IntegerLowestOneBit)
-UNIMPLEMENTED_INTRINSIC(X86, LongLowestOneBit)
UNIMPLEMENTED_INTRINSIC(X86, CRC32Update)
UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOf);
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 7db26dc9be..88c766fabc 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -2413,59 +2413,64 @@ static void GenOneBit(X86_64Assembler* assembler,
}
// Handle the non-constant cases.
- CpuRegister tmp = locations->GetTemp(0).AsRegister<CpuRegister>();
- if (is_high) {
- // Use architectural support: basically 1 << bsr.
- if (src.IsRegister()) {
+ if (!is_high && codegen->GetInstructionSetFeatures().HasAVX2() &&
+ src.IsRegister()) {
+ __ blsi(out, src.AsRegister<CpuRegister>());
+ } else {
+ CpuRegister tmp = locations->GetTemp(0).AsRegister<CpuRegister>();
+ if (is_high) {
+ // Use architectural support: basically 1 << bsr.
+ if (src.IsRegister()) {
+ if (is_long) {
+ __ bsrq(tmp, src.AsRegister<CpuRegister>());
+ } else {
+ __ bsrl(tmp, src.AsRegister<CpuRegister>());
+ }
+ } else if (is_long) {
+ DCHECK(src.IsDoubleStackSlot());
+ __ bsrq(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
+ } else {
+ DCHECK(src.IsStackSlot());
+ __ bsrl(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
+ }
+ // BSR sets ZF if the input was zero.
+ NearLabel is_zero, done;
+ __ j(kEqual, &is_zero);
+ __ movl(out, Immediate(1)); // Clears upper bits too.
if (is_long) {
- __ bsrq(tmp, src.AsRegister<CpuRegister>());
+ __ shlq(out, tmp);
} else {
- __ bsrl(tmp, src.AsRegister<CpuRegister>());
+ __ shll(out, tmp);
}
- } else if (is_long) {
- DCHECK(src.IsDoubleStackSlot());
- __ bsrq(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
- } else {
- DCHECK(src.IsStackSlot());
- __ bsrl(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
- }
- // BSR sets ZF if the input was zero.
- NearLabel is_zero, done;
- __ j(kEqual, &is_zero);
- __ movl(out, Immediate(1)); // Clears upper bits too.
- if (is_long) {
- __ shlq(out, tmp);
- } else {
- __ shll(out, tmp);
- }
- __ jmp(&done);
- __ Bind(&is_zero);
- __ xorl(out, out); // Clears upper bits too.
- __ Bind(&done);
- } else {
- // Copy input into temporary.
- if (src.IsRegister()) {
+ __ jmp(&done);
+ __ Bind(&is_zero);
+ __ xorl(out, out); // Clears upper bits too.
+ __ Bind(&done);
+ } else {
+ // Copy input into temporary.
+ if (src.IsRegister()) {
+ if (is_long) {
+ __ movq(tmp, src.AsRegister<CpuRegister>());
+ } else {
+ __ movl(tmp, src.AsRegister<CpuRegister>());
+ }
+ } else if (is_long) {
+ DCHECK(src.IsDoubleStackSlot());
+ __ movq(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
+ } else {
+ DCHECK(src.IsStackSlot());
+ __ movl(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
+ }
+ // Do the bit twiddling: basically tmp & -tmp;
if (is_long) {
- __ movq(tmp, src.AsRegister<CpuRegister>());
+ __ movq(out, tmp);
+ __ negq(tmp);
+ __ andq(out, tmp);
} else {
- __ movl(tmp, src.AsRegister<CpuRegister>());
+ __ movl(out, tmp);
+ __ negl(tmp);
+ __ andl(out, tmp);
}
- } else if (is_long) {
- DCHECK(src.IsDoubleStackSlot());
- __ movq(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
- } else {
- DCHECK(src.IsStackSlot());
- __ movl(tmp, Address(CpuRegister(RSP), src.GetStackIndex()));
- }
- // Do the bit twiddling: basically tmp & -tmp;
- if (is_long) {
- __ movq(out, tmp);
- __ negq(tmp);
- __ andq(out, tmp);
- } else {
- __ movl(out, tmp);
- __ negl(tmp);
- __ andl(out, tmp);
}
}
}
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index b634598407..3a24542221 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1930,7 +1930,7 @@ class Dex2Oat final {
// ImageWriter, if necessary.
// Note: Flushing (and closing) the file is the caller's responsibility, except for the failure
// case (when the file will be explicitly erased).
- bool WriteOutputFiles() {
+ bool WriteOutputFiles(jobject class_loader) {
TimingLogger::ScopedTiming t("dex2oat Oat", timings_);
// Sync the data to the file, in case we did dex2dex transformations.
@@ -1965,6 +1965,7 @@ class Dex2Oat final {
image_storage_mode_,
oat_filenames_,
dex_file_oat_index_map_,
+ class_loader,
dirty_image_objects_.get()));
// We need to prepare method offsets in the image address space for direct method patching.
@@ -2839,11 +2840,12 @@ class ScopedGlobalRef {
static dex2oat::ReturnCode CompileImage(Dex2Oat& dex2oat) {
dex2oat.LoadClassProfileDescriptors();
+ jobject class_loader = dex2oat.Compile();
// Keep the class loader that was used for compilation live for the rest of the compilation
// process.
- ScopedGlobalRef class_loader(dex2oat.Compile());
+ ScopedGlobalRef global_ref(class_loader);
- if (!dex2oat.WriteOutputFiles()) {
+ if (!dex2oat.WriteOutputFiles(class_loader)) {
dex2oat.EraseOutputFiles();
return dex2oat::ReturnCode::kOther;
}
@@ -2883,11 +2885,12 @@ static dex2oat::ReturnCode CompileImage(Dex2Oat& dex2oat) {
}
static dex2oat::ReturnCode CompileApp(Dex2Oat& dex2oat) {
+ jobject class_loader = dex2oat.Compile();
// Keep the class loader that was used for compilation live for the rest of the compilation
// process.
- ScopedGlobalRef class_loader(dex2oat.Compile());
+ ScopedGlobalRef global_ref(class_loader);
- if (!dex2oat.WriteOutputFiles()) {
+ if (!dex2oat.WriteOutputFiles(class_loader)) {
dex2oat.EraseOutputFiles();
return dex2oat::ReturnCode::kOther;
}
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 1fa21d51fc..97a5f2453e 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -1069,7 +1069,8 @@ class Dex2oatClassLoaderContextTest : public Dex2oatTest {
void RunTest(const char* class_loader_context,
const char* expected_classpath_key,
bool expected_success,
- bool use_second_source = false) {
+ bool use_second_source = false,
+ bool generate_image = false) {
std::string dex_location = GetUsedDexLocation();
std::string odex_location = GetUsedOatLocation();
@@ -1080,6 +1081,9 @@ class Dex2oatClassLoaderContextTest : public Dex2oatTest {
if (class_loader_context != nullptr) {
extra_args.push_back(std::string("--class-loader-context=") + class_loader_context);
}
+ if (generate_image) {
+ extra_args.push_back(std::string("--app-image-file=") + GetUsedImageLocation());
+ }
auto check_oat = [expected_classpath_key](const OatFile& oat_file) {
ASSERT_TRUE(expected_classpath_key != nullptr);
const char* classpath = oat_file.GetOatHeader().GetStoreValueByKey(OatHeader::kClassPathKey);
@@ -1104,6 +1108,10 @@ class Dex2oatClassLoaderContextTest : public Dex2oatTest {
return GetOdexDir() + "/Context.odex";
}
+ std::string GetUsedImageLocation() {
+ return GetOdexDir() + "/Context.art";
+ }
+
const char* kEmptyClassPathKey = "PCL[]";
};
@@ -1213,6 +1221,55 @@ TEST_F(Dex2oatClassLoaderContextTest, ContextWithSharedLibrary) {
RunTest(context.c_str(), expected_classpath_key.c_str(), true);
}
+TEST_F(Dex2oatClassLoaderContextTest, ContextWithSharedLibraryAndImage) {
+ std::vector<std::unique_ptr<const DexFile>> dex_files1 = OpenTestDexFiles("Nested");
+ std::vector<std::unique_ptr<const DexFile>> dex_files2 = OpenTestDexFiles("MultiDex");
+
+ std::string context = "PCL[" + GetTestDexFileName("Nested") + "]" +
+ "{PCL[" + GetTestDexFileName("MultiDex") + "]}";
+ std::string expected_classpath_key = "PCL[" + CreateClassPathWithChecksums(dex_files1) + "]" +
+ "{PCL[" + CreateClassPathWithChecksums(dex_files2) + "]}";
+ RunTest(context.c_str(),
+ expected_classpath_key.c_str(),
+ /*expected_success=*/ true,
+ /*use_second_source=*/ false,
+ /*generate_image=*/ true);
+}
+
+TEST_F(Dex2oatClassLoaderContextTest, ContextWithSameSharedLibrariesAndImage) {
+ std::vector<std::unique_ptr<const DexFile>> dex_files1 = OpenTestDexFiles("Nested");
+ std::vector<std::unique_ptr<const DexFile>> dex_files2 = OpenTestDexFiles("MultiDex");
+
+ std::string context = "PCL[" + GetTestDexFileName("Nested") + "]" +
+ "{PCL[" + GetTestDexFileName("MultiDex") + "]" +
+ "#PCL[" + GetTestDexFileName("MultiDex") + "]}";
+ std::string expected_classpath_key = "PCL[" + CreateClassPathWithChecksums(dex_files1) + "]" +
+ "{PCL[" + CreateClassPathWithChecksums(dex_files2) + "]" +
+ "#PCL[" + CreateClassPathWithChecksums(dex_files2) + "]}";
+ RunTest(context.c_str(),
+ expected_classpath_key.c_str(),
+ /*expected_success=*/ true,
+ /*use_second_source=*/ false,
+ /*generate_image=*/ true);
+}
+
+TEST_F(Dex2oatClassLoaderContextTest, ContextWithSharedLibrariesDependenciesAndImage) {
+ std::vector<std::unique_ptr<const DexFile>> dex_files1 = OpenTestDexFiles("Nested");
+ std::vector<std::unique_ptr<const DexFile>> dex_files2 = OpenTestDexFiles("MultiDex");
+
+ std::string context = "PCL[" + GetTestDexFileName("Nested") + "]" +
+ "{PCL[" + GetTestDexFileName("MultiDex") + "]" +
+ "{PCL[" + GetTestDexFileName("Nested") + "]}}";
+ std::string expected_classpath_key = "PCL[" + CreateClassPathWithChecksums(dex_files1) + "]" +
+ "{PCL[" + CreateClassPathWithChecksums(dex_files2) + "]" +
+ "{PCL[" + CreateClassPathWithChecksums(dex_files1) + "]}}";
+ RunTest(context.c_str(),
+ expected_classpath_key.c_str(),
+ /*expected_success=*/ true,
+ /*use_second_source=*/ false,
+ /*generate_image=*/ true);
+}
+
class Dex2oatDeterminism : public Dex2oatTest {};
TEST_F(Dex2oatDeterminism, UnloadCompile) {
diff --git a/dex2oat/linker/image_test.cc b/dex2oat/linker/image_test.cc
index 69dac19df9..64b98cdb37 100644
--- a/dex2oat/linker/image_test.cc
+++ b/dex2oat/linker/image_test.cc
@@ -74,13 +74,11 @@ TEST_F(ImageTest, ImageHeaderIsValid) {
oat_data_begin,
oat_data_end,
oat_file_end,
- /*boot_image_begin*/0U,
- /*boot_image_size*/0U,
- /*boot_oat_begin*/0U,
- /*boot_oat_size_*/0U,
+ /*boot_image_begin=*/ 0u,
+ /*boot_image_size=*/ 0u,
sizeof(void*),
ImageHeader::kDefaultStorageMode,
- /*data_size*/0u);
+ /*data_size=*/ 0u);
ASSERT_TRUE(image_header.IsValid());
ASSERT_TRUE(!image_header.IsAppImage());
@@ -110,7 +108,7 @@ TEST_F(ImageTest, TestDefaultMethods) {
// Test the pointer to quick code is the same in origin method
// and in the copied method form the same oat file.
ObjPtr<mirror::Class> iface_klass =
- class_linker_->LookupClass(self, "LIface;", /* class_loader */ nullptr);
+ class_linker_->LookupClass(self, "LIface;", /*class_loader=*/ nullptr);
ASSERT_NE(nullptr, iface_klass);
ArtMethod* origin = iface_klass->FindInterfaceMethod("defaultMethod", "()V", pointer_size);
ASSERT_NE(nullptr, origin);
@@ -120,7 +118,7 @@ TEST_F(ImageTest, TestDefaultMethods) {
ASSERT_NE(nullptr, code);
ASSERT_FALSE(class_linker_->IsQuickToInterpreterBridge(code));
ObjPtr<mirror::Class> impl_klass =
- class_linker_->LookupClass(self, "LImpl;", /* class_loader */ nullptr);
+ class_linker_->LookupClass(self, "LImpl;", /*class_loader=*/ nullptr);
ASSERT_NE(nullptr, impl_klass);
ArtMethod* copied = FindCopiedMethod(origin, impl_klass);
ASSERT_NE(nullptr, copied);
@@ -131,7 +129,7 @@ TEST_F(ImageTest, TestDefaultMethods) {
// but the copied method has pointer to interpreter
// because these methods are in different oat files.
ObjPtr<mirror::Class> iterable_klass =
- class_linker_->LookupClass(self, "Ljava/lang/Iterable;", /* class_loader */ nullptr);
+ class_linker_->LookupClass(self, "Ljava/lang/Iterable;", /*class_loader=*/ nullptr);
ASSERT_NE(nullptr, iterable_klass);
origin = iterable_klass->FindClassMethod(
"forEach", "(Ljava/util/function/Consumer;)V", pointer_size);
@@ -143,7 +141,7 @@ TEST_F(ImageTest, TestDefaultMethods) {
ASSERT_NE(nullptr, code);
ASSERT_FALSE(class_linker_->IsQuickToInterpreterBridge(code));
ObjPtr<mirror::Class> iterablebase_klass =
- class_linker_->LookupClass(self, "LIterableBase;", /* class_loader */ nullptr);
+ class_linker_->LookupClass(self, "LIterableBase;", /*class_loader=*/ nullptr);
ASSERT_NE(nullptr, iterablebase_klass);
copied = FindCopiedMethod(origin, iterablebase_klass);
ASSERT_NE(nullptr, copied);
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index 182f96c614..c90eaddb4c 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -219,6 +219,7 @@ inline void ImageTest::DoCompile(ImageHeader::StorageMode storage_mode,
storage_mode,
oat_filename_vector,
dex_file_to_oat_index_map,
+ /*class_loader=*/ nullptr,
/*dirty_image_objects=*/ nullptr));
{
{
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index 248a4414dc..75b35556f1 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -146,9 +146,11 @@ static ArrayRef<const uint8_t> MaybeCompressData(ArrayRef<const uint8_t> source,
// Separate objects into multiple bins to optimize dirty memory use.
static constexpr bool kBinObjects = true;
-ObjPtr<mirror::ClassLoader> ImageWriter::GetClassLoader() {
- CHECK_EQ(class_loaders_.size(), compiler_options_.IsAppImage() ? 1u : 0u);
- return compiler_options_.IsAppImage() ? *class_loaders_.begin() : nullptr;
+ObjPtr<mirror::ClassLoader> ImageWriter::GetAppClassLoader() const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return compiler_options_.IsAppImage()
+ ? ObjPtr<mirror::ClassLoader>::DownCast(Thread::Current()->DecodeJObject(app_class_loader_))
+ : nullptr;
}
// Return true if an object is already in an image space.
@@ -675,7 +677,7 @@ bool ImageWriter::Write(int image_fd,
{
// Preload deterministic contents to the dex cache arrays we're going to write.
ScopedObjectAccess soa(self);
- ObjPtr<mirror::ClassLoader> class_loader = GetClassLoader();
+ ObjPtr<mirror::ClassLoader> class_loader = GetAppClassLoader();
std::vector<ObjPtr<mirror::DexCache>> dex_caches = FindDexCaches(self);
for (ObjPtr<mirror::DexCache> dex_cache : dex_caches) {
if (IsInBootImage(dex_cache.Ptr())) {
@@ -1470,27 +1472,15 @@ class ImageWriter::PruneClassLoaderClassesVisitor : public ClassLoaderVisitor {
Runtime::Current()->GetClassLinker()->ClassTableForClassLoader(class_loader);
class_table->Visit(classes_visitor);
removed_class_count_ += classes_visitor.Prune();
-
- // Record app image class loader. The fake boot class loader should not get registered
- // and we should end up with only one class loader for an app and none for boot image.
- if (class_loader != nullptr && class_table != nullptr) {
- DCHECK(class_loader_ == nullptr);
- class_loader_ = class_loader;
- }
}
size_t GetRemovedClassCount() const {
return removed_class_count_;
}
- ObjPtr<mirror::ClassLoader> GetClassLoader() const REQUIRES_SHARED(Locks::mutator_lock_) {
- return class_loader_;
- }
-
private:
ImageWriter* const image_writer_;
size_t removed_class_count_;
- ObjPtr<mirror::ClassLoader> class_loader_;
};
void ImageWriter::VisitClassLoaders(ClassLoaderVisitor* visitor) {
@@ -1701,13 +1691,10 @@ void ImageWriter::PruneNonImageClasses() {
});
// Remove the undesired classes from the class roots.
- ObjPtr<mirror::ClassLoader> class_loader;
{
PruneClassLoaderClassesVisitor class_loader_visitor(this);
VisitClassLoaders(&class_loader_visitor);
VLOG(compiler) << "Pruned " << class_loader_visitor.GetRemovedClassCount() << " classes";
- class_loader = class_loader_visitor.GetClassLoader();
- DCHECK_EQ(class_loader != nullptr, compiler_options_.IsAppImage());
}
// Clear references to removed classes from the DexCaches.
@@ -1715,7 +1702,7 @@ void ImageWriter::PruneNonImageClasses() {
for (ObjPtr<mirror::DexCache> dex_cache : dex_caches) {
// Pass the class loader associated with the DexCache. This can either be
// the app's `class_loader` or `nullptr` if boot class loader.
- PruneDexCache(dex_cache, IsInBootImage(dex_cache.Ptr()) ? nullptr : class_loader);
+ PruneDexCache(dex_cache, IsInBootImage(dex_cache.Ptr()) ? nullptr : GetAppClassLoader());
}
// Drop the array class cache in the ClassLinker, as these are roots holding those classes live.
@@ -2034,18 +2021,17 @@ mirror::Object* ImageWriter::TryAssignBinSlot(WorkStack& work_stack,
}
} else if (obj->IsClassLoader()) {
// Register the class loader if it has a class table.
- // The fake boot class loader should not get registered and we should end up with only one
- // class loader.
+ // The fake boot class loader should not get registered.
mirror::ClassLoader* class_loader = obj->AsClassLoader();
if (class_loader->GetClassTable() != nullptr) {
DCHECK(compiler_options_.IsAppImage());
- DCHECK(class_loaders_.empty());
- class_loaders_.insert(class_loader);
- ImageInfo& image_info = GetImageInfo(oat_index);
- // Note: Avoid locking to prevent lock order violations from root visiting;
- // image_info.class_table_ table is only accessed from the image writer
- // and class_loader->GetClassTable() is iterated but not modified.
- image_info.class_table_->CopyWithoutLocks(*class_loader->GetClassTable());
+ if (class_loader == GetAppClassLoader()) {
+ ImageInfo& image_info = GetImageInfo(oat_index);
+ // Note: Avoid locking to prevent lock order violations from root visiting;
+ // image_info.class_table_ table is only accessed from the image writer
+ // and class_loader->GetClassTable() is iterated but not modified.
+ image_info.class_table_->CopyWithoutLocks(*class_loader->GetClassTable());
+ }
}
}
AssignImageBinSlot(obj, oat_index);
@@ -2323,10 +2309,8 @@ void ImageWriter::CalculateNewObjectOffsets() {
ProcessWorkStack(&work_stack);
// Store the class loader in the class roots.
- CHECK_EQ(class_loaders_.size(), 1u);
CHECK_EQ(image_roots.size(), 1u);
- CHECK(*class_loaders_.begin() != nullptr);
- image_roots[0]->Set<false>(ImageHeader::kAppImageClassLoader, *class_loaders_.begin());
+ image_roots[0]->Set<false>(ImageHeader::kAppImageClassLoader, GetAppClassLoader());
}
// Verify that all objects have assigned image bin slots.
@@ -2589,9 +2573,7 @@ void ImageWriter::CreateHeader(size_t oat_index) {
PointerToLowMemUInt32(oat_data_end),
PointerToLowMemUInt32(oat_file_end),
boot_image_begin,
- boot_image_end - boot_image_begin,
- boot_oat_begin,
- boot_oat_end - boot_oat_begin,
+ boot_oat_end - boot_image_begin,
static_cast<uint32_t>(target_ptr_size_),
image_storage_mode_,
/*data_size*/0u);
@@ -3474,6 +3456,7 @@ ImageWriter::ImageWriter(
ImageHeader::StorageMode image_storage_mode,
const std::vector<const char*>& oat_filenames,
const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map,
+ jobject class_loader,
const HashSet<std::string>* dirty_image_objects)
: compiler_options_(compiler_options),
global_image_begin_(reinterpret_cast<uint8_t*>(image_begin)),
@@ -3482,6 +3465,7 @@ ImageWriter::ImageWriter(
image_infos_(oat_filenames.size()),
dirty_methods_(0u),
clean_methods_(0u),
+ app_class_loader_(class_loader),
boot_image_live_objects_(nullptr),
image_storage_mode_(image_storage_mode),
oat_filenames_(oat_filenames),
diff --git a/dex2oat/linker/image_writer.h b/dex2oat/linker/image_writer.h
index 8aabaa3a9b..782bbd2fc2 100644
--- a/dex2oat/linker/image_writer.h
+++ b/dex2oat/linker/image_writer.h
@@ -81,6 +81,7 @@ class ImageWriter final {
ImageHeader::StorageMode image_storage_mode,
const std::vector<const char*>& oat_filenames,
const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map,
+ jobject class_loader,
const HashSet<std::string>* dirty_image_objects);
/*
@@ -111,7 +112,7 @@ class ImageWriter final {
return true;
}
- ObjPtr<mirror::ClassLoader> GetClassLoader();
+ ObjPtr<mirror::ClassLoader> GetAppClassLoader() const REQUIRES_SHARED(Locks::mutator_lock_);
template <typename T>
T* GetImageAddress(T* object) const REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -771,10 +772,8 @@ class ImageWriter final {
// Prune class memoization table to speed up ContainsBootClassLoaderNonImageClass.
std::unordered_map<mirror::Class*, bool> prune_class_memo_;
- // Class loaders with a class table to write out. There should only be one class loader because
- // dex2oat loads the dex files to be compiled into a single class loader. For the boot image,
- // null is a valid entry.
- std::unordered_set<mirror::ClassLoader*> class_loaders_;
+ // The application class loader. Null for boot image.
+ jobject app_class_loader_;
// Boot image live objects, null for app image.
mirror::ObjectArray<mirror::Object>* boot_image_live_objects_;
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index 18528dccee..9aaabc49dd 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -1489,7 +1489,7 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
const std::vector<const DexFile*>* dex_files)
: OatDexMethodVisitor(writer, offset),
pointer_size_(GetInstructionSetPointerSize(writer_->compiler_options_.GetInstructionSet())),
- class_loader_(writer->HasImage() ? writer->image_writer_->GetClassLoader() : nullptr),
+ class_loader_(writer->HasImage() ? writer->image_writer_->GetAppClassLoader() : nullptr),
dex_files_(dex_files),
class_linker_(Runtime::Current()->GetClassLinker()) {}
@@ -1630,7 +1630,7 @@ class OatWriter::WriteCodeMethodVisitor : public OrderedMethodVisitor {
offset_(relative_offset),
dex_file_(nullptr),
pointer_size_(GetInstructionSetPointerSize(writer_->compiler_options_.GetInstructionSet())),
- class_loader_(writer->HasImage() ? writer->image_writer_->GetClassLoader() : nullptr),
+ class_loader_(writer->HasImage() ? writer->image_writer_->GetAppClassLoader() : nullptr),
out_(out),
file_offset_(file_offset),
class_linker_(Runtime::Current()->GetClassLinker()),
@@ -2271,6 +2271,7 @@ size_t OatWriter::InitOatCodeDexFiles(size_t offset) {
}
if (HasImage()) {
+ ScopedAssertNoThreadSuspension sants("Init image method visitor", Thread::Current());
InitImageMethodVisitor image_visitor(this, offset, dex_files_);
success = VisitDexMethods(&image_visitor);
image_visitor.Postprocess();
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
index 09f0b20ca1..32122ebf93 100644
--- a/dexlayout/dexlayout.cc
+++ b/dexlayout/dexlayout.cc
@@ -34,6 +34,7 @@
#include "android-base/stringprintf.h"
#include "base/logging.h" // For VLOG_IS_ON.
+#include "base/hiddenapi_flags.h"
#include "base/mem_map.h"
#include "base/os.h"
#include "base/utils.h"
@@ -222,15 +223,10 @@ static char* CreateAccessFlagStr(uint32_t flags, AccessFor for_what) {
return str;
}
-static const char* GetHiddenapiFlagStr(uint32_t hiddenapi_flags) {
- static const char* const kValue[] = {
- "WHITELIST", /* 0x0 */
- "LIGHT_GREYLIST", /* 0x1 */
- "DARK_GREYLIST", /* 0x2 */
- "BLACKLIST", /* 0x3 */
- };
- DCHECK_LT(hiddenapi_flags, arraysize(kValue));
- return kValue[hiddenapi_flags];
+static std::string GetHiddenapiFlagStr(uint32_t hiddenapi_flags) {
+ std::string api_list(hiddenapi::ApiList::FromDexFlags(hiddenapi_flags).GetName());
+ std::transform(api_list.begin(), api_list.end(), api_list.begin(), ::toupper);
+ return api_list;
}
static std::string GetSignatureForProtoId(const dex_ir::ProtoId* proto) {
@@ -1173,7 +1169,6 @@ void DexLayout::DumpMethod(uint32_t idx,
char* type_descriptor = strdup(GetSignatureForProtoId(method_id->Proto()).c_str());
const char* back_descriptor = method_id->Class()->GetStringId()->Data();
char* access_str = CreateAccessFlagStr(flags, kAccessForMethod);
- const char* hiddenapi_str = GetHiddenapiFlagStr(hiddenapi_flags);
if (options_.output_format_ == kOutputPlain) {
fprintf(out_file_, " #%d : (in %s)\n", i, back_descriptor);
@@ -1181,7 +1176,10 @@ void DexLayout::DumpMethod(uint32_t idx,
fprintf(out_file_, " type : '%s'\n", type_descriptor);
fprintf(out_file_, " access : 0x%04x (%s)\n", flags, access_str);
if (hiddenapi_flags != 0u) {
- fprintf(out_file_, " hiddenapi : 0x%04x (%s)\n", hiddenapi_flags, hiddenapi_str);
+ fprintf(out_file_,
+ " hiddenapi : 0x%04x (%s)\n",
+ hiddenapi_flags,
+ GetHiddenapiFlagStr(hiddenapi_flags).c_str());
}
if (code == nullptr) {
fprintf(out_file_, " code : (none)\n");
@@ -1291,7 +1289,6 @@ void DexLayout::DumpSField(uint32_t idx,
const char* type_descriptor = field_id->Type()->GetStringId()->Data();
const char* back_descriptor = field_id->Class()->GetStringId()->Data();
char* access_str = CreateAccessFlagStr(flags, kAccessForField);
- const char* hiddenapi_str = GetHiddenapiFlagStr(hiddenapi_flags);
if (options_.output_format_ == kOutputPlain) {
fprintf(out_file_, " #%d : (in %s)\n", i, back_descriptor);
@@ -1299,7 +1296,10 @@ void DexLayout::DumpSField(uint32_t idx,
fprintf(out_file_, " type : '%s'\n", type_descriptor);
fprintf(out_file_, " access : 0x%04x (%s)\n", flags, access_str);
if (hiddenapi_flags != 0u) {
- fprintf(out_file_, " hiddenapi : 0x%04x (%s)\n", hiddenapi_flags, hiddenapi_str);
+ fprintf(out_file_,
+ " hiddenapi : 0x%04x (%s)\n",
+ hiddenapi_flags,
+ GetHiddenapiFlagStr(hiddenapi_flags).c_str());
}
if (init != nullptr) {
fputs(" value : ", out_file_);
diff --git a/dt_fd_forward/Android.bp b/dt_fd_forward/Android.bp
index 1ba2323a15..2a2aa188bb 100644
--- a/dt_fd_forward/Android.bp
+++ b/dt_fd_forward/Android.bp
@@ -41,6 +41,7 @@ cc_defaults {
header_libs: [
"javavm_headers",
"dt_fd_forward_export",
+ "art_libartbase_headers", // For strlcpy emulation.
],
multilib: {
lib32: {
diff --git a/dt_fd_forward/dt_fd_forward.cc b/dt_fd_forward/dt_fd_forward.cc
index a99f7850c0..d5b6de5ead 100644
--- a/dt_fd_forward/dt_fd_forward.cc
+++ b/dt_fd_forward/dt_fd_forward.cc
@@ -50,6 +50,8 @@
#include <jni.h>
#include <jdwpTransport.h>
+#include <base/strlcpy.h>
+
namespace dt_fd_forward {
// Helper that puts line-number in error message.
@@ -651,7 +653,7 @@ void FdForwardTransport::Free(void* data) {
jdwpTransportError FdForwardTransport::GetLastError(/*out*/char** err) {
std::string data = global_last_error_;
*err = reinterpret_cast<char*>(Alloc(data.size() + 1));
- strcpy(*err, data.c_str());
+ strlcpy(*err, data.c_str(), data.size() + 1);
return OK;
}
diff --git a/libartbase/Android.bp b/libartbase/Android.bp
index 6a667bc7a7..58d12a1e04 100644
--- a/libartbase/Android.bp
+++ b/libartbase/Android.bp
@@ -27,6 +27,7 @@ cc_defaults {
"base/file_magic.cc",
"base/file_utils.cc",
"base/hex_dump.cc",
+ "base/hiddenapi_flags.cc",
"base/logging.cc",
"base/malloc_arena_pool.cc",
"base/membarrier.cc",
diff --git a/libartbase/base/hiddenapi_flags.cc b/libartbase/base/hiddenapi_flags.cc
new file mode 100644
index 0000000000..6caa75c570
--- /dev/null
+++ b/libartbase/base/hiddenapi_flags.cc
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hiddenapi_flags.h"
+
+namespace art {
+namespace hiddenapi {
+
+constexpr const char* ApiList::kNames[ApiList::kValueCount];
+constexpr SdkVersion ApiList::kMaxSdkVersions[ApiList::kValueCount];
+
+} // namespace hiddenapi
+} // namespace art
diff --git a/libartbase/base/hiddenapi_flags.h b/libartbase/base/hiddenapi_flags.h
new file mode 100644
index 0000000000..8e7269cc60
--- /dev/null
+++ b/libartbase/base/hiddenapi_flags.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTBASE_BASE_HIDDENAPI_FLAGS_H_
+#define ART_LIBARTBASE_BASE_HIDDENAPI_FLAGS_H_
+
+#include "sdk_version.h"
+
+#include "android-base/logging.h"
+
+namespace art {
+namespace hiddenapi {
+
+/*
+ * This class represents the information whether a field/method is in
+ * public API (whitelist) or if it isn't, apps targeting which SDK
+ * versions are allowed to access it.
+ */
+class ApiList {
+ private:
+ using IntValueType = uint32_t;
+
+ enum class Value : IntValueType {
+ // Values independent of target SDK version of app
+ kWhitelist = 0,
+ kGreylist = 1,
+ kBlacklist = 2,
+
+ // Values dependent on target SDK version of app. Put these last as
+ // their list will be extended in future releases.
+ // The max release code implicitly includes all maintenance releases,
+ // e.g. GreylistMaxO is accessible to targetSdkVersion <= 27 (O_MR1).
+ kGreylistMaxO = 3,
+
+ // Special values
+ kInvalid = static_cast<uint32_t>(-1),
+ kMinValue = kWhitelist,
+ kMaxValue = kGreylistMaxO,
+ };
+
+ static constexpr const char* kNames[] = {
+ "whitelist",
+ "greylist",
+ "blacklist",
+ "greylist-max-o",
+ };
+
+ static constexpr SdkVersion kMaxSdkVersions[] {
+ /* whitelist */ SdkVersion::kMax,
+ /* greylist */ SdkVersion::kMax,
+ /* blacklist */ SdkVersion::kMin,
+ /* greylist-max-o */ SdkVersion::kO_MR1,
+ };
+
+ static ApiList MinValue() { return ApiList(Value::kMinValue); }
+ static ApiList MaxValue() { return ApiList(Value::kMaxValue); }
+
+ explicit ApiList(Value value) : value_(value) {}
+
+ const Value value_;
+
+ public:
+ static ApiList Whitelist() { return ApiList(Value::kWhitelist); }
+ static ApiList Greylist() { return ApiList(Value::kGreylist); }
+ static ApiList Blacklist() { return ApiList(Value::kBlacklist); }
+ static ApiList GreylistMaxO() { return ApiList(Value::kGreylistMaxO); }
+ static ApiList Invalid() { return ApiList(Value::kInvalid); }
+
+ // Decodes ApiList from dex hiddenapi flags.
+ static ApiList FromDexFlags(uint32_t dex_flags) {
+ if (MinValue().GetIntValue() <= dex_flags && dex_flags <= MaxValue().GetIntValue()) {
+ return ApiList(static_cast<Value>(dex_flags));
+ }
+ return Invalid();
+ }
+
+ // Returns the ApiList with a given name.
+ static ApiList FromName(const std::string& str) {
+ for (IntValueType i = MinValue().GetIntValue(); i <= MaxValue().GetIntValue(); i++) {
+ ApiList current = ApiList(static_cast<Value>(i));
+ if (str == current.GetName()) {
+ return current;
+ }
+ }
+ return Invalid();
+ }
+
+ bool operator==(const ApiList other) const { return value_ == other.value_; }
+ bool operator!=(const ApiList other) const { return !(*this == other); }
+
+ bool IsValid() const { return *this != Invalid(); }
+
+ IntValueType GetIntValue() const {
+ DCHECK(IsValid());
+ return static_cast<IntValueType>(value_);
+ }
+
+ const char* GetName() const { return kNames[GetIntValue()]; }
+
+ SdkVersion GetMaxAllowedSdkVersion() const { return kMaxSdkVersions[GetIntValue()]; }
+
+ static constexpr size_t kValueCount = static_cast<size_t>(Value::kMaxValue) + 1;
+};
+
+inline std::ostream& operator<<(std::ostream& os, ApiList value) {
+ os << value.GetName();
+ return os;
+}
+
+inline bool AreValidDexFlags(uint32_t dex_flags) {
+ return ApiList::FromDexFlags(dex_flags).IsValid();
+}
+
+} // namespace hiddenapi
+} // namespace art
+
+
+#endif // ART_LIBARTBASE_BASE_HIDDENAPI_FLAGS_H_
diff --git a/libdexfile/dex/class_accessor-inl.h b/libdexfile/dex/class_accessor-inl.h
index e9e3a98224..f0f14c67ea 100644
--- a/libdexfile/dex/class_accessor-inl.h
+++ b/libdexfile/dex/class_accessor-inl.h
@@ -19,6 +19,7 @@
#include "class_accessor.h"
+#include "base/hiddenapi_flags.h"
#include "base/leb128.h"
#include "class_iterator.h"
#include "code_item_accessors-inl.h"
@@ -65,7 +66,7 @@ inline void ClassAccessor::Method::Read() {
code_off_ = DecodeUnsignedLeb128(&ptr_pos_);
if (hiddenapi_ptr_pos_ != nullptr) {
hiddenapi_flags_ = DecodeUnsignedLeb128(&hiddenapi_ptr_pos_);
- DCHECK(hiddenapi::AreValidFlags(hiddenapi_flags_));
+ DCHECK(hiddenapi::AreValidDexFlags(hiddenapi_flags_));
}
}
@@ -74,7 +75,7 @@ inline void ClassAccessor::Field::Read() {
access_flags_ = DecodeUnsignedLeb128(&ptr_pos_);
if (hiddenapi_ptr_pos_ != nullptr) {
hiddenapi_flags_ = DecodeUnsignedLeb128(&hiddenapi_ptr_pos_);
- DCHECK(hiddenapi::AreValidFlags(hiddenapi_flags_));
+ DCHECK(hiddenapi::AreValidDexFlags(hiddenapi_flags_));
}
}
diff --git a/libdexfile/dex/dex_file.h b/libdexfile/dex/dex_file.h
index b3e7ad46c6..83f47fec19 100644
--- a/libdexfile/dex/dex_file.h
+++ b/libdexfile/dex/dex_file.h
@@ -29,7 +29,6 @@
#include "base/value_object.h"
#include "class_iterator.h"
#include "dex_file_types.h"
-#include "hidden_api_access_flags.h"
#include "jni.h"
#include "modifiers.h"
diff --git a/libdexfile/dex/dex_file_layout.cc b/libdexfile/dex/dex_file_layout.cc
index 1e36e05f50..75a31112bb 100644
--- a/libdexfile/dex/dex_file_layout.cc
+++ b/libdexfile/dex/dex_file_layout.cc
@@ -18,6 +18,7 @@
#include <sys/mman.h>
+#include "base/bit_utils.h"
#include "dex_file.h"
namespace art {
diff --git a/libdexfile/dex/dex_file_verifier.cc b/libdexfile/dex/dex_file_verifier.cc
index 4d33cd59f7..78e4618f04 100644
--- a/libdexfile/dex/dex_file_verifier.cc
+++ b/libdexfile/dex/dex_file_verifier.cc
@@ -1632,7 +1632,7 @@ bool DexFileVerifier::CheckIntraHiddenapiClassData() {
failure = true;
return;
}
- if (!hiddenapi::AreValidFlags(decoded_flags)) {
+ if (!hiddenapi::AreValidDexFlags(decoded_flags)) {
ErrorStringPrintf("Hiddenapi class data flags invalid (%u) for %s %i",
decoded_flags, member_type, member.GetIndex());
failure = true;
diff --git a/libdexfile/dex/hidden_api_access_flags.h b/libdexfile/dex/hidden_api_access_flags.h
deleted file mode 100644
index 77bfbc99b3..0000000000
--- a/libdexfile/dex/hidden_api_access_flags.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_LIBDEXFILE_DEX_HIDDEN_API_ACCESS_FLAGS_H_
-#define ART_LIBDEXFILE_DEX_HIDDEN_API_ACCESS_FLAGS_H_
-
-#include "base/bit_utils.h"
-#include "base/macros.h"
-#include "dex/modifiers.h"
-
-/* This class is used for encoding and decoding access flags of class members
- * from the boot class path. These access flags might contain additional two bits
- * of information on whether the given class member should be hidden from apps
- * and under what circumstances.
- *
- * Two bits are encoded for each class member in the HiddenapiClassData item,
- * stored in a stream of uleb128-encoded values for each ClassDef item.
- * The two bits correspond to values in the ApiList enum below.
- *
- * At runtime, two bits are set aside in the uint32_t access flags in the
- * intrinsics ordinal space (thus intrinsics need to be special-cased). These are
- * two consecutive bits and they are directly used to store the integer value of
- * the ApiList enum values.
- *
- */
-
-namespace art {
-namespace hiddenapi {
-
-enum class ApiList {
- kWhitelist = 0,
- kLightGreylist,
- kDarkGreylist,
- kBlacklist,
- kNoList,
-};
-
-inline bool AreValidFlags(uint32_t flags) {
- return flags <= static_cast<uint32_t>(ApiList::kBlacklist);
-}
-
-inline std::ostream& operator<<(std::ostream& os, ApiList value) {
- switch (value) {
- case ApiList::kWhitelist:
- os << "whitelist";
- break;
- case ApiList::kLightGreylist:
- os << "light greylist";
- break;
- case ApiList::kDarkGreylist:
- os << "dark greylist";
- break;
- case ApiList::kBlacklist:
- os << "blacklist";
- break;
- case ApiList::kNoList:
- os << "no list";
- break;
- }
- return os;
-}
-
-} // namespace hiddenapi
-} // namespace art
-
-
-#endif // ART_LIBDEXFILE_DEX_HIDDEN_API_ACCESS_FLAGS_H_
diff --git a/libdexfile/dex/test_dex_file_builder.h b/libdexfile/dex/test_dex_file_builder.h
index 2d8a0bbfe4..072aafb394 100644
--- a/libdexfile/dex/test_dex_file_builder.h
+++ b/libdexfile/dex/test_dex_file_builder.h
@@ -26,6 +26,7 @@
#include <android-base/logging.h>
+#include "base/bit_utils.h"
#include "dex/dex_file_loader.h"
#include "dex/standard_dex_file.h"
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index dd1ff2a49d..51f60084a1 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1783,9 +1783,7 @@ class ImageDumper {
os << "BOOT IMAGE BEGIN: " << reinterpret_cast<void*>(image_header_.GetBootImageBegin())
<< "\n";
- os << "BOOT IMAGE SIZE: " << image_header_.GetBootImageSize() << "\n";
- os << "BOOT OAT BEGIN: " << reinterpret_cast<void*>(image_header_.GetBootOatBegin()) << "\n";
- os << "BOOT OAT SIZE: " << image_header_.GetBootOatSize() << "\n\n";
+ os << "BOOT IMAGE SIZE: " << image_header_.GetBootImageSize() << "\n\n";
for (size_t i = 0; i < ImageHeader::kSectionCount; ++i) {
auto section = static_cast<ImageHeader::ImageSections>(i);
diff --git a/openjdkjvmti/ti_logging.cc b/openjdkjvmti/ti_logging.cc
index 1d24d3b6b6..60f4340fc7 100644
--- a/openjdkjvmti/ti_logging.cc
+++ b/openjdkjvmti/ti_logging.cc
@@ -34,6 +34,7 @@
#include "art_jvmti.h"
#include "base/mutex.h"
+#include "base/strlcpy.h"
#include "thread-current-inl.h"
namespace openjdkjvmti {
@@ -47,13 +48,13 @@ jvmtiError LogUtil::GetLastError(jvmtiEnv* env, char** data) {
if (tienv->last_error_.empty()) {
return ERR(ABSENT_INFORMATION);
}
+ const size_t size = tienv->last_error_.size() + 1;
char* out;
- jvmtiError err = tienv->Allocate(tienv->last_error_.size() + 1,
- reinterpret_cast<unsigned char**>(&out));
+ jvmtiError err = tienv->Allocate(size, reinterpret_cast<unsigned char**>(&out));
if (err != OK) {
return err;
}
- strcpy(out, tienv->last_error_.c_str());
+ strlcpy(out, tienv->last_error_.c_str(), size);
*data = out;
return OK;
}
diff --git a/openjdkjvmti/ti_stack.cc b/openjdkjvmti/ti_stack.cc
index 5de4a81f5e..4a3eac8a15 100644
--- a/openjdkjvmti/ti_stack.cc
+++ b/openjdkjvmti/ti_stack.cc
@@ -673,34 +673,24 @@ jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
return ERR(NONE);
}
-// Walks up the stack counting Java frames. This is not StackVisitor::ComputeNumFrames, as
-// runtime methods and transitions must not be counted.
-struct GetFrameCountVisitor : public art::StackVisitor {
- explicit GetFrameCountVisitor(art::Thread* thread)
- : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- count(0) {}
-
- bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
- art::ArtMethod* m = GetMethod();
- const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
- if (do_count) {
- count++;
- }
- return true;
- }
-
- size_t count;
-};
-
struct GetFrameCountClosure : public art::Closure {
public:
GetFrameCountClosure() : count(0) {}
void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
- GetFrameCountVisitor visitor(self);
- visitor.WalkStack(false);
-
- count = visitor.count;
+ // This is not StackVisitor::ComputeNumFrames, as runtime methods and transitions must not be
+ // counted.
+ art::StackVisitor::WalkStack(
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::ArtMethod* m = stack_visitor->GetMethod();
+ if (m != nullptr && !m->IsRuntimeMethod()) {
+ count++;
+ }
+ return true;
+ },
+ self,
+ /* context= */ nullptr,
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
}
size_t count;
@@ -743,46 +733,30 @@ jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED,
return ERR(NONE);
}
-// Walks up the stack 'n' callers, when used with Thread::WalkStack.
-struct GetLocationVisitor : public art::StackVisitor {
- GetLocationVisitor(art::Thread* thread, size_t n_in)
- : art::StackVisitor(thread, nullptr, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- n(n_in),
- count(0),
- caller(nullptr),
- caller_dex_pc(0) {}
-
- bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
- art::ArtMethod* m = GetMethod();
- const bool do_count = !(m == nullptr || m->IsRuntimeMethod());
- if (do_count) {
- DCHECK(caller == nullptr);
- if (count == n) {
- caller = m;
- caller_dex_pc = GetDexPc(false);
- return false;
- }
- count++;
- }
- return true;
- }
-
- const size_t n;
- size_t count;
- art::ArtMethod* caller;
- uint32_t caller_dex_pc;
-};
-
struct GetLocationClosure : public art::Closure {
public:
explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
- GetLocationVisitor visitor(self, n);
- visitor.WalkStack(false);
-
- method = visitor.caller;
- dex_pc = visitor.caller_dex_pc;
+ // Walks up the stack 'n' callers.
+ size_t count = 0u;
+ art::StackVisitor::WalkStack(
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::ArtMethod* m = stack_visitor->GetMethod();
+ if (m != nullptr && !m->IsRuntimeMethod()) {
+ DCHECK(method == nullptr);
+ if (count == n) {
+ method = m;
+ dex_pc = stack_visitor->GetDexPc(/*abort_on_failure=*/false);
+ return false;
+ }
+ count++;
+ }
+ return true;
+ },
+ self,
+ /* context= */ nullptr,
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
}
const size_t n;
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h
index c5fb7d5f40..53e4c11511 100644
--- a/runtime/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -399,6 +399,10 @@ inline ArtField* ArtField::FindStaticFieldWithOffset(ObjPtr<mirror::Class> klass
return FindFieldWithOffset<kExactOffset>(klass->GetSFields(), field_offset);
}
+inline mirror::ClassLoader* ArtField::GetClassLoader() {
+ return GetDeclaringClass()->GetClassLoader();
+}
+
} // namespace art
#endif // ART_RUNTIME_ART_FIELD_INL_H_
diff --git a/runtime/art_field.h b/runtime/art_field.h
index 1cf7afa022..99f2a1cd07 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -20,7 +20,6 @@
#include <jni.h>
#include "dex/dex_file_types.h"
-#include "dex/hidden_api_access_flags.h"
#include "dex/modifiers.h"
#include "dex/primitive.h"
#include "gc_root.h"
@@ -35,6 +34,7 @@ class ScopedObjectAccessAlreadyRunnable;
namespace mirror {
class Class;
+class ClassLoader;
class DexCache;
class Object;
class String;
@@ -45,6 +45,8 @@ class ArtField final {
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ObjPtr<mirror::Class> GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_);
+ mirror::ClassLoader* GetClassLoader() REQUIRES_SHARED(Locks::mutator_lock_);
+
void SetDeclaringClass(ObjPtr<mirror::Class> new_declaring_class)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index f2541160ff..c240017900 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -31,6 +31,7 @@
#include "dex/invoke_type.h"
#include "dex/primitive.h"
#include "gc_root-inl.h"
+#include "imtable-inl.h"
#include "intrinsics_enum.h"
#include "jit/profiling_info.h"
#include "mirror/class-inl.h"
@@ -421,6 +422,31 @@ inline CodeItemDebugInfoAccessor ArtMethod::DexInstructionDebugInfo() {
return CodeItemDebugInfoAccessor(*GetDexFile(), GetCodeItem(), GetDexMethodIndex());
}
+inline void ArtMethod::SetCounter(int16_t hotness_count) {
+ DCHECK(!IsAbstract()) << PrettyMethod();
+ hotness_count_ = hotness_count;
+}
+
+inline uint16_t ArtMethod::GetCounter() {
+ DCHECK(!IsAbstract()) << PrettyMethod();
+ return hotness_count_;
+}
+
+inline uint32_t ArtMethod::GetImtIndex() {
+ if (LIKELY(IsAbstract() && imt_index_ != 0)) {
+ uint16_t imt_index = ~imt_index_;
+ DCHECK_EQ(imt_index, ImTable::GetImtIndex(this)) << PrettyMethod();
+ return imt_index;
+ } else {
+ return ImTable::GetImtIndex(this);
+ }
+}
+
+inline void ArtMethod::CalculateAndSetImtIndex() {
+ DCHECK(IsAbstract()) << PrettyMethod();
+ imt_index_ = ~ImTable::GetImtIndex(this);
+}
+
} // namespace art
#endif // ART_RUNTIME_ART_METHOD_INL_H_
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 5bbee92c14..cc214f7ca3 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -650,24 +650,13 @@ class ArtMethod final {
void CopyFrom(ArtMethod* src, PointerSize image_pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Note, hotness_counter_ updates are non-atomic but it doesn't need to be precise. Also,
- // given that the counter is only 16 bits wide we can expect wrap-around in some
- // situations. Consumers of hotness_count_ must be able to deal with that.
- uint16_t IncrementCounter() {
- return ++hotness_count_;
- }
+ ALWAYS_INLINE void SetCounter(int16_t hotness_count) REQUIRES_SHARED(Locks::mutator_lock_);
- void ClearCounter() {
- hotness_count_ = 0;
- }
+ ALWAYS_INLINE uint16_t GetCounter() REQUIRES_SHARED(Locks::mutator_lock_);
- void SetCounter(int16_t hotness_count) {
- hotness_count_ = hotness_count;
- }
+ ALWAYS_INLINE uint32_t GetImtIndex() REQUIRES_SHARED(Locks::mutator_lock_);
- uint16_t GetCounter() const {
- return hotness_count_;
- }
+ void CalculateAndSetImtIndex() REQUIRES_SHARED(Locks::mutator_lock_);
static constexpr MemberOffset HotnessCountOffset() {
return MemberOffset(OFFSETOF_MEMBER(ArtMethod, hotness_count_));
@@ -772,9 +761,14 @@ class ArtMethod final {
// ifTable.
uint16_t method_index_;
- // The hotness we measure for this method. Not atomic, as we allow
- // missing increments: if the method is hot, we will see it eventually.
- uint16_t hotness_count_;
+ union {
+ // Non-abstract methods: The hotness we measure for this method. Not atomic,
+ // as we allow missing increments: if the method is hot, we will see it eventually.
+ uint16_t hotness_count_;
+ // Abstract methods: IMT index (bitwise negated) or zero if it was not cached.
+ // The negation is needed to distinguish zero index and missing cached entry.
+ uint16_t imt_index_;
+ };
// Fake padding field gets inserted here.
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 0c8fe58252..e391a1549b 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -73,13 +73,17 @@ enum LockLevel : uint8_t {
// level lock, it is permitted to acquire a second one - with internal safeguards to ensure that
// the second lock acquisition does not result in deadlock. This is implemented in the lock
// order by treating the second acquisition of a kThreadWaitLock as a kThreadWaitWakeLock
- // acquisition. Thus, acquiring kThreadWaitWakeLock requires holding kThreadWaitLock.
+ // acquisition. Thus, acquiring kThreadWaitWakeLock requires holding kThreadWaitLock. This entry
+ // is here near the bottom of the hierarchy because other locks should not be
+ // acquired while it is held. kThreadWaitLock cannot be moved here because GC
+ // activity acquires locks while holding the wait lock.
kThreadWaitWakeLock,
- kThreadWaitLock,
kJdwpAdbStateLock,
kJdwpSocketLock,
kRegionSpaceRegionLock,
kMarkSweepMarkStackLock,
+ // Can be held while GC related work is done, and thus must be above kMarkSweepMarkStackLock
+ kThreadWaitLock,
kCHALock,
kJitCodeCacheLock,
kRosAllocGlobalLock,
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 7e011371b7..6703205502 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -135,7 +135,7 @@ inline ObjPtr<mirror::Class> ClassLinker::ResolveType(dex::TypeIndex type_idx,
ObjPtr<mirror::Class> resolved_type =
referrer->GetDexCache<kWithoutReadBarrier>()->GetResolvedType(type_idx);
if (UNLIKELY(resolved_type == nullptr)) {
- resolved_type = DoResolveType(type_idx, referrer->GetDeclaringClass());
+ resolved_type = DoResolveType(type_idx, referrer);
}
return resolved_type;
}
@@ -149,7 +149,7 @@ inline ObjPtr<mirror::Class> ClassLinker::ResolveType(dex::TypeIndex type_idx,
ObjPtr<mirror::Class> resolved_type =
referrer->GetDexCache<kWithoutReadBarrier>()->GetResolvedType(type_idx);
if (UNLIKELY(resolved_type == nullptr)) {
- resolved_type = DoResolveType(type_idx, referrer->GetDeclaringClass());
+ resolved_type = DoResolveType(type_idx, referrer);
}
return resolved_type;
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 639fa7ec92..991faa27d3 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1094,51 +1094,165 @@ static bool GetDexPathListElementName(ObjPtr<mirror::Object> element,
return false;
}
-static bool FlattenPathClassLoader(ObjPtr<mirror::ClassLoader> class_loader,
- std::list<ObjPtr<mirror::String>>* out_dex_file_names,
- std::string* error_msg)
+static bool GetDexFileNames(ScopedObjectAccessUnchecked& soa,
+ ObjPtr<mirror::ClassLoader> class_loader,
+ /*out*/std::list<ObjPtr<mirror::String>>* dex_files,
+ /*out*/std::string* error_msg)
REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK(out_dex_file_names != nullptr);
- DCHECK(error_msg != nullptr);
- ScopedObjectAccessUnchecked soa(Thread::Current());
StackHandleScope<1> hs(soa.Self());
Handle<mirror::ClassLoader> handle(hs.NewHandle(class_loader));
- while (!ClassLinker::IsBootClassLoader(soa, class_loader)) {
- if (soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_PathClassLoader) !=
- class_loader->GetClass()) {
- *error_msg = StringPrintf("Unknown class loader type %s",
- class_loader->PrettyTypeOf().c_str());
- // Unsupported class loader.
+ // Get element names. Sets error to true on failure.
+ auto add_element_names = [&](ObjPtr<mirror::Object> element, bool* error)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (element == nullptr) {
+ *error_msg = "Null dex element";
+ *error = true; // Null element is a critical error.
+ return false; // Had an error, stop the visit.
+ }
+ ObjPtr<mirror::String> name;
+ if (!GetDexPathListElementName(element, &name)) {
+ *error_msg = "Invalid dex path list element";
+ *error = true; // Invalid element, make it a critical error.
+ return false; // Stop the visit.
+ }
+ if (name != nullptr) {
+ dex_files->push_front(name);
+ }
+ return true; // Continue with the next Element.
+ };
+ bool error = VisitClassLoaderDexElements(soa,
+ handle,
+ add_element_names,
+ /*defaultReturn=*/ false);
+ return !error;
+}
+
+static bool CompareClassLoaderTypes(ScopedObjectAccessUnchecked& soa,
+ ObjPtr<mirror::ClassLoader> image_class_loader,
+ ObjPtr<mirror::ClassLoader> class_loader,
+ std::string* error_msg)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (ClassLinker::IsBootClassLoader(soa, class_loader)) {
+ if (!ClassLinker::IsBootClassLoader(soa, image_class_loader)) {
+ *error_msg = "Hierarchies don't match";
return false;
}
- // Get element names. Sets error to true on failure.
- auto add_element_names = [&](ObjPtr<mirror::Object> element, bool* error)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- if (element == nullptr) {
- *error_msg = "Null dex element";
- *error = true; // Null element is a critical error.
- return false; // Had an error, stop the visit.
- }
- ObjPtr<mirror::String> name;
- if (!GetDexPathListElementName(element, &name)) {
- *error_msg = "Invalid dex path list element";
- *error = false; // Invalid element is not a critical error.
- return false; // Stop the visit.
- }
- if (name != nullptr) {
- out_dex_file_names->push_front(name);
- }
- return true; // Continue with the next Element.
- };
- bool error = VisitClassLoaderDexElements(soa,
- handle,
- add_element_names,
- /* defaultReturn= */ false);
- if (error) {
- // An error occurred during DexPathList Element visiting.
+ } else if (ClassLinker::IsBootClassLoader(soa, image_class_loader)) {
+ *error_msg = "Hierarchies don't match";
+ return false;
+ } else if (class_loader->GetClass() != image_class_loader->GetClass()) {
+ *error_msg = StringPrintf("Class loader types don't match %s and %s",
+ image_class_loader->PrettyTypeOf().c_str(),
+ class_loader->PrettyTypeOf().c_str());
+ return false;
+ } else if (soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_PathClassLoader) !=
+ class_loader->GetClass()) {
+ *error_msg = StringPrintf("Unknown class loader type %s",
+ class_loader->PrettyTypeOf().c_str());
+ // Unsupported class loader.
+ return false;
+ }
+ return true;
+}
+
+static bool CompareDexFiles(const std::list<ObjPtr<mirror::String>>& image_dex_files,
+ const std::list<ObjPtr<mirror::String>>& loader_dex_files,
+ std::string* error_msg)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ bool equal = (image_dex_files.size() == loader_dex_files.size()) &&
+ std::equal(image_dex_files.begin(),
+ image_dex_files.end(),
+ loader_dex_files.begin(),
+ [](ObjPtr<mirror::String> lhs, ObjPtr<mirror::String> rhs)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return lhs->Equals(rhs);
+ });
+ if (!equal) {
+ VLOG(image) << "Image dex files " << image_dex_files.size();
+ for (ObjPtr<mirror::String> name : image_dex_files) {
+ VLOG(image) << name->ToModifiedUtf8();
+ }
+ VLOG(image) << "Loader dex files " << loader_dex_files.size();
+ for (ObjPtr<mirror::String> name : loader_dex_files) {
+ VLOG(image) << name->ToModifiedUtf8();
+ }
+ *error_msg = "Mismatch in dex files";
+ }
+ return equal;
+}
+
+static bool CompareClassLoaders(ScopedObjectAccessUnchecked& soa,
+ ObjPtr<mirror::ClassLoader> image_class_loader,
+ ObjPtr<mirror::ClassLoader> class_loader,
+ bool check_dex_file_names,
+ std::string* error_msg)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!CompareClassLoaderTypes(soa, image_class_loader, class_loader, error_msg)) {
+ return false;
+ }
+
+ if (ClassLinker::IsBootClassLoader(soa, class_loader)) {
+ // No need to check further.
+ return true;
+ }
+
+ if (check_dex_file_names) {
+ std::list<ObjPtr<mirror::String>> image_dex_files;
+ if (!GetDexFileNames(soa, image_class_loader, &image_dex_files, error_msg)) {
+ return false;
+ }
+
+ std::list<ObjPtr<mirror::String>> loader_dex_files;
+ if (!GetDexFileNames(soa, class_loader, &loader_dex_files, error_msg)) {
+ return false;
+ }
+
+ if (!CompareDexFiles(image_dex_files, loader_dex_files, error_msg)) {
return false;
}
- class_loader = class_loader->GetParent();
+ }
+
+ ArtField* field =
+ jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_sharedLibraryLoaders);
+ ObjPtr<mirror::Object> shared_libraries_image_loader = field->GetObject(image_class_loader.Ptr());
+ ObjPtr<mirror::Object> shared_libraries_loader = field->GetObject(class_loader.Ptr());
+ if (shared_libraries_image_loader == nullptr) {
+ if (shared_libraries_loader != nullptr) {
+ *error_msg = "Mismatch in shared libraries";
+ return false;
+ }
+ } else if (shared_libraries_loader == nullptr) {
+ *error_msg = "Mismatch in shared libraries";
+ return false;
+ } else {
+ ObjPtr<mirror::ObjectArray<mirror::ClassLoader>> array1 =
+ shared_libraries_image_loader->AsObjectArray<mirror::ClassLoader>();
+ ObjPtr<mirror::ObjectArray<mirror::ClassLoader>> array2 =
+ shared_libraries_loader->AsObjectArray<mirror::ClassLoader>();
+ if (array1->GetLength() != array2->GetLength()) {
+ *error_msg = "Mismatch in number of shared libraries";
+ return false;
+ }
+
+ for (int32_t i = 0; i < array1->GetLength(); ++i) {
+ // Do a full comparison of the class loaders, including comparing their dex files.
+ if (!CompareClassLoaders(soa,
+ array1->Get(i),
+ array2->Get(i),
+ /*check_dex_file_names=*/ true,
+ error_msg)) {
+ return false;
+ }
+ }
+ }
+
+ // Do a full comparison of the class loaders, including comparing their dex files.
+ if (!CompareClassLoaders(soa,
+ image_class_loader->GetParent(),
+ class_loader->GetParent(),
+ /*check_dex_file_names=*/ true,
+ error_msg)) {
+ return false;
}
return true;
}
@@ -1907,6 +2021,7 @@ bool ClassLinker::AddImageSpace(
if (app_image) {
ScopedObjectAccessUnchecked soa(Thread::Current());
+ ScopedAssertNoThreadSuspension sants("Checking app image", soa.Self());
// Check that the class loader resolves the same way as the ones in the image.
// Image class loader [A][B][C][image dex files]
// Class loader = [???][dex_elements][image dex files]
@@ -1919,21 +2034,12 @@ bool ClassLinker::AddImageSpace(
*error_msg = "Unexpected BootClassLoader in app image";
return false;
}
- std::list<ObjPtr<mirror::String>> image_dex_file_names;
- std::string temp_error_msg;
- if (!FlattenPathClassLoader(image_class_loader.Get(), &image_dex_file_names, &temp_error_msg)) {
- *error_msg = StringPrintf("Failed to flatten image class loader hierarchy '%s'",
- temp_error_msg.c_str());
- return false;
- }
- std::list<ObjPtr<mirror::String>> loader_dex_file_names;
- if (!FlattenPathClassLoader(class_loader.Get(), &loader_dex_file_names, &temp_error_msg)) {
- *error_msg = StringPrintf("Failed to flatten class loader hierarchy '%s'",
- temp_error_msg.c_str());
- return false;
- }
- // Add the temporary dex path list elements at the end.
+ // The dex files of `class_loader` are not setup yet, so we cannot do a full comparison
+ // of `class_loader` and `image_class_loader` in `CompareClassLoaders`. Therefore, we
+ // special case the comparison of dex files of the two class loaders, but then do full
+ // comparisons for their shared libraries and parent.
auto elements = soa.Decode<mirror::ObjectArray<mirror::Object>>(dex_elements);
+ std::list<ObjPtr<mirror::String>> loader_dex_file_names;
for (size_t i = 0, num_elems = elements->GetLength(); i < num_elems; ++i) {
ObjPtr<mirror::Object> element = elements->GetWithoutChecks(i);
if (element != nullptr) {
@@ -1944,31 +2050,29 @@ bool ClassLinker::AddImageSpace(
}
}
}
- // Ignore the number of image dex files since we are adding those to the class loader anyways.
- CHECK_GE(static_cast<size_t>(image_dex_file_names.size()),
- static_cast<size_t>(dex_caches->GetLength()));
- size_t image_count = image_dex_file_names.size() - dex_caches->GetLength();
- // Check that the dex file names match.
- bool equal = image_count == loader_dex_file_names.size();
- if (equal) {
- auto it1 = image_dex_file_names.begin();
- auto it2 = loader_dex_file_names.begin();
- for (size_t i = 0; equal && i < image_count; ++i, ++it1, ++it2) {
- equal = equal && (*it1)->Equals(*it2);
- }
- }
- if (!equal) {
- VLOG(image) << "Image dex files " << image_dex_file_names.size();
- for (ObjPtr<mirror::String> name : image_dex_file_names) {
- VLOG(image) << name->ToModifiedUtf8();
- }
- VLOG(image) << "Loader dex files " << loader_dex_file_names.size();
- for (ObjPtr<mirror::String> name : loader_dex_file_names) {
- VLOG(image) << name->ToModifiedUtf8();
- }
- *error_msg = "Rejecting application image due to class loader mismatch";
- // Ignore class loader mismatch for now since these would just use possibly incorrect
- // oat code anyways. The structural class check should be done in the parent.
+ std::string temp_error_msg;
+ std::list<ObjPtr<mirror::String>> image_dex_file_names;
+ bool success = GetDexFileNames(
+ soa, image_class_loader.Get(), &image_dex_file_names, &temp_error_msg);
+ if (success) {
+ // Ignore the number of image dex files since we are adding those to the class loader anyways.
+ CHECK_GE(static_cast<size_t>(image_dex_file_names.size()),
+ static_cast<size_t>(dex_caches->GetLength()));
+ size_t image_count = image_dex_file_names.size() - dex_caches->GetLength();
+ image_dex_file_names.resize(image_count);
+ success = success && CompareDexFiles(image_dex_file_names,
+ loader_dex_file_names,
+ &temp_error_msg);
+ success = success && CompareClassLoaders(soa,
+ image_class_loader.Get(),
+ class_loader.Get(),
+ /*check_dex_file_names=*/ false,
+ &temp_error_msg);
+ }
+ if (!success) {
+ *error_msg = StringPrintf("Rejecting application image due to class loader mismatch: '%s'",
+ temp_error_msg.c_str());
+ return false;
}
}
@@ -3147,7 +3251,7 @@ bool ClassLinker::ShouldUseInterpreterEntrypoint(ArtMethod* method, const void*
return (jit == nullptr) || !jit->GetCodeCache()->ContainsPc(quick_code);
}
- if (runtime->IsNativeDebuggableZygoteOK()) {
+ if (runtime->IsNativeDebuggable()) {
DCHECK(runtime->UseJitCompilation() && runtime->GetJit()->JitAtFirstUse());
// If we are doing native debugging, ignore application's AOT code,
// since we want to JIT it (at first use) with extra stackmaps for native
@@ -3530,6 +3634,10 @@ void ClassLinker::LoadMethod(const DexFile& dex_file,
dex_file, dst->GetClassDef(), dex_method_idx);
}
dst->SetAccessFlags(access_flags);
+ // Must be done after SetAccessFlags since IsAbstract depends on it.
+ if (klass->IsInterface() && dst->IsAbstract()) {
+ dst->CalculateAndSetImtIndex();
+ }
}
void ClassLinker::AppendToBootClassPath(Thread* self, const DexFile& dex_file) {
@@ -6619,7 +6727,7 @@ void ClassLinker::FillIMTFromIfTable(ObjPtr<mirror::IfTable> if_table,
// or interface methods in the IMT here they will not create extra conflicts since we compare
// names and signatures in SetIMTRef.
ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_);
- const uint32_t imt_index = ImTable::GetImtIndex(interface_method);
+ const uint32_t imt_index = interface_method->GetImtIndex();
// There is only any conflicts if all of the interface methods for an IMT slot don't have
// the same implementation method, keep track of this to avoid creating a conflict table in
@@ -6673,7 +6781,7 @@ void ClassLinker::FillIMTFromIfTable(ObjPtr<mirror::IfTable> if_table,
}
DCHECK(implementation_method != nullptr);
ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_);
- const uint32_t imt_index = ImTable::GetImtIndex(interface_method);
+ const uint32_t imt_index = interface_method->GetImtIndex();
if (!imt[imt_index]->IsRuntimeMethod() ||
imt[imt_index] == unimplemented_method ||
imt[imt_index] == imt_conflict_method) {
@@ -7599,7 +7707,7 @@ bool ClassLinker::LinkInterfaceMethods(
auto* interface_method = iftable->GetInterface(i)->GetVirtualMethod(j, image_pointer_size_);
MethodNameAndSignatureComparator interface_name_comparator(
interface_method->GetInterfaceMethodIfProxy(image_pointer_size_));
- uint32_t imt_index = ImTable::GetImtIndex(interface_method);
+ uint32_t imt_index = interface_method->GetImtIndex();
ArtMethod** imt_ptr = &out_imt[imt_index];
// For each method listed in the interface's method list, find the
// matching method in our class's method list. We want to favor the
@@ -8076,14 +8184,22 @@ ObjPtr<mirror::Class> ClassLinker::DoLookupResolvedType(dex::TypeIndex type_idx,
return type;
}
-ObjPtr<mirror::Class> ClassLinker::DoResolveType(dex::TypeIndex type_idx,
- ObjPtr<mirror::Class> referrer) {
+template <typename T>
+ObjPtr<mirror::Class> ClassLinker::DoResolveType(dex::TypeIndex type_idx, T referrer) {
StackHandleScope<2> hs(Thread::Current());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(referrer->GetClassLoader()));
return DoResolveType(type_idx, dex_cache, class_loader);
}
+// Instantiate the above.
+template ObjPtr<mirror::Class> ClassLinker::DoResolveType(dex::TypeIndex type_idx,
+ ArtField* referrer);
+template ObjPtr<mirror::Class> ClassLinker::DoResolveType(dex::TypeIndex type_idx,
+ ArtMethod* referrer);
+template ObjPtr<mirror::Class> ClassLinker::DoResolveType(dex::TypeIndex type_idx,
+ ObjPtr<mirror::Class> referrer);
+
ObjPtr<mirror::Class> ClassLinker::DoResolveType(dex::TypeIndex type_idx,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader) {
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 47931fec75..15a7204dd2 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -931,8 +931,8 @@ class ClassLinker {
REQUIRES_SHARED(Locks::mutator_lock_);
// Implementation of ResolveType() called when the type was not found in the dex cache.
- ObjPtr<mirror::Class> DoResolveType(dex::TypeIndex type_idx,
- ObjPtr<mirror::Class> referrer)
+ template <typename T>
+ ObjPtr<mirror::Class> DoResolveType(dex::TypeIndex type_idx, T referrer)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
ObjPtr<mirror::Class> DoResolveType(dex::TypeIndex type_idx,
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 3ad7fc92a2..4af97f0f35 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -2362,25 +2362,18 @@ void Dbg::GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>*
}
static int GetStackDepth(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_) {
- struct CountStackDepthVisitor : public StackVisitor {
- explicit CountStackDepthVisitor(Thread* thread_in)
- : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- depth(0) {}
-
- // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
- // annotalysis.
- bool VisitFrame() override NO_THREAD_SAFETY_ANALYSIS {
- if (!GetMethod()->IsRuntimeMethod()) {
- ++depth;
- }
- return true;
- }
- size_t depth;
- };
-
- CountStackDepthVisitor visitor(thread);
- visitor.WalkStack();
- return visitor.depth;
+ size_t depth = 0u;
+ StackVisitor::WalkStack(
+ [&depth](const StackVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!visitor->GetMethod()->IsRuntimeMethod()) {
+ ++depth;
+ }
+ return true;
+ },
+ thread,
+ /* context= */ nullptr,
+ StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+ return depth;
}
JDWP::JdwpError Dbg::GetThreadFrameCount(JDWP::ObjectId thread_id, size_t* result) {
@@ -2398,47 +2391,10 @@ JDWP::JdwpError Dbg::GetThreadFrameCount(JDWP::ObjectId thread_id, size_t* resul
return JDWP::ERR_NONE;
}
-JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame,
- size_t frame_count, JDWP::ExpandBuf* buf) {
- class GetFrameVisitor : public StackVisitor {
- public:
- GetFrameVisitor(Thread* thread, size_t start_frame_in, size_t frame_count_in,
- JDWP::ExpandBuf* buf_in)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- depth_(0),
- start_frame_(start_frame_in),
- frame_count_(frame_count_in),
- buf_(buf_in) {
- expandBufAdd4BE(buf_, frame_count_);
- }
-
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- if (GetMethod()->IsRuntimeMethod()) {
- return true; // The debugger can't do anything useful with a frame that has no Method*.
- }
- if (depth_ >= start_frame_ + frame_count_) {
- return false;
- }
- if (depth_ >= start_frame_) {
- JDWP::FrameId frame_id(GetFrameId());
- JDWP::JdwpLocation location;
- SetJdwpLocation(&location, GetMethod(), GetDexPc());
- VLOG(jdwp) << StringPrintf(" Frame %3zd: id=%3" PRIu64 " ", depth_, frame_id) << location;
- expandBufAdd8BE(buf_, frame_id);
- expandBufAddLocation(buf_, location);
- }
- ++depth_;
- return true;
- }
-
- private:
- size_t depth_;
- const size_t start_frame_;
- const size_t frame_count_;
- JDWP::ExpandBuf* buf_;
- };
-
+JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id,
+ const size_t start_frame,
+ const size_t frame_count,
+ JDWP::ExpandBuf* buf) {
ScopedObjectAccessUnchecked soa(Thread::Current());
JDWP::JdwpError error;
Thread* thread = DecodeThread(soa, thread_id, &error);
@@ -2448,8 +2404,34 @@ JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_fram
if (!IsSuspendedForDebugger(soa, thread)) {
return JDWP::ERR_THREAD_NOT_SUSPENDED;
}
- GetFrameVisitor visitor(thread, start_frame, frame_count, buf);
- visitor.WalkStack();
+
+ expandBufAdd4BE(buf, frame_count);
+
+ size_t depth = 0u;
+ StackVisitor::WalkStack(
+ [&](StackVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (visitor->GetMethod()->IsRuntimeMethod()) {
+ return true; // The debugger can't do anything useful with a frame that has no Method*.
+ }
+ if (depth >= start_frame + frame_count) {
+ return false;
+ }
+ if (depth >= start_frame) {
+ JDWP::FrameId frame_id(visitor->GetFrameId());
+ JDWP::JdwpLocation location;
+ SetJdwpLocation(&location, visitor->GetMethod(), visitor->GetDexPc());
+ VLOG(jdwp)
+ << StringPrintf(" Frame %3zd: id=%3" PRIu64 " ", depth, frame_id) << location;
+ expandBufAdd8BE(buf, frame_id);
+ expandBufAddLocation(buf, location);
+ }
+ ++depth;
+ return true;
+ },
+ thread,
+ /* context= */ nullptr,
+ StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+
return JDWP::ERR_NONE;
}
@@ -2530,28 +2512,6 @@ void Dbg::SuspendSelf() {
Runtime::Current()->GetThreadList()->SuspendSelfForDebugger();
}
-struct GetThisVisitor : public StackVisitor {
- GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id_in)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- this_object(nullptr),
- frame_id(frame_id_in) {}
-
- // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
- // annotalysis.
- bool VisitFrame() override NO_THREAD_SAFETY_ANALYSIS {
- if (frame_id != GetFrameId()) {
- return true; // continue
- } else {
- this_object = GetThisObject();
- return false;
- }
- }
-
- mirror::Object* this_object;
- JDWP::FrameId frame_id;
-};
-
JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id,
JDWP::ObjectId* result) {
ScopedObjectAccessUnchecked soa(Thread::Current());
@@ -2564,48 +2524,50 @@ JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame
return JDWP::ERR_THREAD_NOT_SUSPENDED;
}
std::unique_ptr<Context> context(Context::Create());
- GetThisVisitor visitor(thread, context.get(), frame_id);
- visitor.WalkStack();
- *result = gRegistry->Add(visitor.this_object);
+ mirror::Object* this_object = nullptr;
+ StackVisitor::WalkStack(
+ [&](art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (frame_id != stack_visitor->GetFrameId()) {
+ return true; // continue
+ } else {
+ this_object = stack_visitor->GetThisObject();
+ return false;
+ }
+ },
+ thread,
+ context.get(),
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+ *result = gRegistry->Add(this_object);
return JDWP::ERR_NONE;
}
-// Walks the stack until we find the frame with the given FrameId.
-class FindFrameVisitor final : public StackVisitor {
- public:
- FindFrameVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- frame_id_(frame_id),
- error_(JDWP::ERR_INVALID_FRAMEID) {}
-
- // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
- // annotalysis.
- bool VisitFrame() override NO_THREAD_SAFETY_ANALYSIS {
- if (GetFrameId() != frame_id_) {
- return true; // Not our frame, carry on.
- }
- ArtMethod* m = GetMethod();
- if (m->IsNative()) {
- // We can't read/write local value from/into native method.
- error_ = JDWP::ERR_OPAQUE_FRAME;
- } else {
- // We found our frame.
- error_ = JDWP::ERR_NONE;
- }
- return false;
- }
-
- JDWP::JdwpError GetError() const {
- return error_;
- }
-
- private:
- const JDWP::FrameId frame_id_;
- JDWP::JdwpError error_;
-
- DISALLOW_COPY_AND_ASSIGN(FindFrameVisitor);
-};
+template <typename FrameHandler>
+static JDWP::JdwpError FindAndHandleNonNativeFrame(Thread* thread,
+ JDWP::FrameId frame_id,
+ const FrameHandler& handler)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ JDWP::JdwpError result = JDWP::ERR_INVALID_FRAMEID;
+ std::unique_ptr<Context> context(Context::Create());
+ StackVisitor::WalkStack(
+ [&](art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (stack_visitor->GetFrameId() != frame_id) {
+ return true; // Not our frame, carry on.
+ }
+ ArtMethod* m = stack_visitor->GetMethod();
+ if (m->IsNative()) {
+ // We can't read/write local value from/into native method.
+ result = JDWP::ERR_OPAQUE_FRAME;
+ } else {
+ // We found our frame.
+ result = handler(stack_visitor);
+ }
+ return false;
+ },
+ thread,
+ context.get(),
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+ return result;
+}
JDWP::JdwpError Dbg::GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pReply) {
JDWP::ObjectId thread_id = request->ReadThreadId();
@@ -2620,31 +2582,29 @@ JDWP::JdwpError Dbg::GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pRe
if (!IsSuspendedForDebugger(soa, thread)) {
return JDWP::ERR_THREAD_NOT_SUSPENDED;
}
- // Find the frame with the given frame_id.
- std::unique_ptr<Context> context(Context::Create());
- FindFrameVisitor visitor(thread, context.get(), frame_id);
- visitor.WalkStack();
- if (visitor.GetError() != JDWP::ERR_NONE) {
- return visitor.GetError();
- }
- // Read the values from visitor's context.
- int32_t slot_count = request->ReadSigned32("slot count");
- expandBufAdd4BE(pReply, slot_count); /* "int values" */
- for (int32_t i = 0; i < slot_count; ++i) {
- uint32_t slot = request->ReadUnsigned32("slot");
- JDWP::JdwpTag reqSigByte = request->ReadTag();
+ return FindAndHandleNonNativeFrame(
+ thread,
+ frame_id,
+ [&](art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Read the values from visitor's context.
+ int32_t slot_count = request->ReadSigned32("slot count");
+ expandBufAdd4BE(pReply, slot_count); /* "int values" */
+ for (int32_t i = 0; i < slot_count; ++i) {
+ uint32_t slot = request->ReadUnsigned32("slot");
+ JDWP::JdwpTag reqSigByte = request->ReadTag();
- VLOG(jdwp) << " --> slot " << slot << " " << reqSigByte;
+ VLOG(jdwp) << " --> slot " << slot << " " << reqSigByte;
- size_t width = Dbg::GetTagWidth(reqSigByte);
- uint8_t* ptr = expandBufAddSpace(pReply, width + 1);
- error = Dbg::GetLocalValue(visitor, soa, slot, reqSigByte, ptr, width);
- if (error != JDWP::ERR_NONE) {
- return error;
- }
- }
- return JDWP::ERR_NONE;
+ size_t width = Dbg::GetTagWidth(reqSigByte);
+ uint8_t* ptr = expandBufAddSpace(pReply, width + 1);
+ error = Dbg::GetLocalValue(*stack_visitor, soa, slot, reqSigByte, ptr, width);
+ if (error != JDWP::ERR_NONE) {
+ return error;
+ }
+ }
+ return JDWP::ERR_NONE;
+ });
}
constexpr JDWP::JdwpError kStackFrameLocalAccessError = JDWP::ERR_ABSENT_INFORMATION;
@@ -2791,29 +2751,27 @@ JDWP::JdwpError Dbg::SetLocalValues(JDWP::Request* request) {
if (!IsSuspendedForDebugger(soa, thread)) {
return JDWP::ERR_THREAD_NOT_SUSPENDED;
}
- // Find the frame with the given frame_id.
- std::unique_ptr<Context> context(Context::Create());
- FindFrameVisitor visitor(thread, context.get(), frame_id);
- visitor.WalkStack();
- if (visitor.GetError() != JDWP::ERR_NONE) {
- return visitor.GetError();
- }
-
- // Writes the values into visitor's context.
- int32_t slot_count = request->ReadSigned32("slot count");
- for (int32_t i = 0; i < slot_count; ++i) {
- uint32_t slot = request->ReadUnsigned32("slot");
- JDWP::JdwpTag sigByte = request->ReadTag();
- size_t width = Dbg::GetTagWidth(sigByte);
- uint64_t value = request->ReadValue(width);
- VLOG(jdwp) << " --> slot " << slot << " " << sigByte << " " << value;
- error = Dbg::SetLocalValue(thread, visitor, slot, sigByte, value, width);
- if (error != JDWP::ERR_NONE) {
- return error;
- }
- }
- return JDWP::ERR_NONE;
+ return FindAndHandleNonNativeFrame(
+ thread,
+ frame_id,
+ [&](art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Writes the values into visitor's context.
+ int32_t slot_count = request->ReadSigned32("slot count");
+ for (int32_t i = 0; i < slot_count; ++i) {
+ uint32_t slot = request->ReadUnsigned32("slot");
+ JDWP::JdwpTag sigByte = request->ReadTag();
+ size_t width = Dbg::GetTagWidth(sigByte);
+ uint64_t value = request->ReadValue(width);
+
+ VLOG(jdwp) << " --> slot " << slot << " " << sigByte << " " << value;
+ error = Dbg::SetLocalValue(thread, *stack_visitor, slot, sigByte, value, width);
+ if (error != JDWP::ERR_NONE) {
+ return error;
+ }
+ }
+ return JDWP::ERR_NONE;
+ });
}
template<typename T>
@@ -2985,107 +2943,71 @@ void Dbg::PostFieldModificationEvent(ArtMethod* m, int dex_pc,
gJdwpState->PostFieldEvent(&location, f, this_object, field_value, true);
}
-/**
- * Finds the location where this exception will be caught. We search until we reach the top
- * frame, in which case this exception is considered uncaught.
- */
-class CatchLocationFinder : public StackVisitor {
- public:
- CatchLocationFinder(Thread* self, const Handle<mirror::Throwable>& exception, Context* context)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- exception_(exception),
- handle_scope_(self),
- this_at_throw_(handle_scope_.NewHandle<mirror::Object>(nullptr)),
- catch_method_(nullptr),
- throw_method_(nullptr),
- catch_dex_pc_(dex::kDexNoIndex),
- throw_dex_pc_(dex::kDexNoIndex) {
- }
-
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* method = GetMethod();
- DCHECK(method != nullptr);
- if (method->IsRuntimeMethod()) {
- // Ignore callee save method.
- DCHECK(method->IsCalleeSaveMethod());
- return true;
- }
-
- uint32_t dex_pc = GetDexPc();
- if (throw_method_ == nullptr) {
- // First Java method found. It is either the method that threw the exception,
- // or the Java native method that is reporting an exception thrown by
- // native code.
- this_at_throw_.Assign(GetThisObject());
- throw_method_ = method;
- throw_dex_pc_ = dex_pc;
- }
-
- if (dex_pc != dex::kDexNoIndex) {
- StackHandleScope<1> hs(GetThread());
- uint32_t found_dex_pc;
- Handle<mirror::Class> exception_class(hs.NewHandle(exception_->GetClass()));
- bool unused_clear_exception;
- found_dex_pc = method->FindCatchBlock(exception_class, dex_pc, &unused_clear_exception);
- if (found_dex_pc != dex::kDexNoIndex) {
- catch_method_ = method;
- catch_dex_pc_ = found_dex_pc;
- return false; // End stack walk.
- }
- }
- return true; // Continue stack walk.
- }
-
- ArtMethod* GetCatchMethod() REQUIRES_SHARED(Locks::mutator_lock_) {
- return catch_method_;
- }
-
- ArtMethod* GetThrowMethod() REQUIRES_SHARED(Locks::mutator_lock_) {
- return throw_method_;
- }
-
- mirror::Object* GetThisAtThrow() REQUIRES_SHARED(Locks::mutator_lock_) {
- return this_at_throw_.Get();
- }
-
- uint32_t GetCatchDexPc() const {
- return catch_dex_pc_;
- }
-
- uint32_t GetThrowDexPc() const {
- return throw_dex_pc_;
- }
-
- private:
- const Handle<mirror::Throwable>& exception_;
- StackHandleScope<1> handle_scope_;
- MutableHandle<mirror::Object> this_at_throw_;
- ArtMethod* catch_method_;
- ArtMethod* throw_method_;
- uint32_t catch_dex_pc_;
- uint32_t throw_dex_pc_;
-
- DISALLOW_COPY_AND_ASSIGN(CatchLocationFinder);
-};
-
void Dbg::PostException(mirror::Throwable* exception_object) {
if (!IsDebuggerActive()) {
return;
}
Thread* const self = Thread::Current();
- StackHandleScope<1> handle_scope(self);
+ StackHandleScope<2> handle_scope(self);
Handle<mirror::Throwable> h_exception(handle_scope.NewHandle(exception_object));
+ MutableHandle<mirror::Object> this_at_throw = handle_scope.NewHandle<mirror::Object>(nullptr);
std::unique_ptr<Context> context(Context::Create());
- CatchLocationFinder clf(self, h_exception, context.get());
- clf.WalkStack(/* include_transitions= */ false);
+
+ ArtMethod* catch_method = nullptr;
+ ArtMethod* throw_method = nullptr;
+ uint32_t catch_dex_pc = dex::kDexNoIndex;
+ uint32_t throw_dex_pc = dex::kDexNoIndex;
+ StackVisitor::WalkStack(
+ /**
+ * Finds the location where this exception will be caught. We search until we reach the top
+ * frame, in which case this exception is considered uncaught.
+ */
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* method = stack_visitor->GetMethod();
+ DCHECK(method != nullptr);
+ if (method->IsRuntimeMethod()) {
+ // Ignore callee save method.
+ DCHECK(method->IsCalleeSaveMethod());
+ return true;
+ }
+
+ uint32_t dex_pc = stack_visitor->GetDexPc();
+ if (throw_method == nullptr) {
+ // First Java method found. It is either the method that threw the exception,
+ // or the Java native method that is reporting an exception thrown by
+ // native code.
+ this_at_throw.Assign(stack_visitor->GetThisObject());
+ throw_method = method;
+ throw_dex_pc = dex_pc;
+ }
+
+ if (dex_pc != dex::kDexNoIndex) {
+ StackHandleScope<1> hs(stack_visitor->GetThread());
+ uint32_t found_dex_pc;
+ Handle<mirror::Class> exception_class(hs.NewHandle(h_exception->GetClass()));
+ bool unused_clear_exception;
+ found_dex_pc = method->FindCatchBlock(exception_class, dex_pc, &unused_clear_exception);
+ if (found_dex_pc != dex::kDexNoIndex) {
+ catch_method = method;
+ catch_dex_pc = found_dex_pc;
+ return false; // End stack walk.
+ }
+ }
+ return true; // Continue stack walk.
+ },
+ self,
+ context.get(),
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+
JDWP::EventLocation exception_throw_location;
- SetEventLocation(&exception_throw_location, clf.GetThrowMethod(), clf.GetThrowDexPc());
+ SetEventLocation(&exception_throw_location, throw_method, throw_dex_pc);
JDWP::EventLocation exception_catch_location;
- SetEventLocation(&exception_catch_location, clf.GetCatchMethod(), clf.GetCatchDexPc());
+ SetEventLocation(&exception_catch_location, catch_method, catch_dex_pc);
- gJdwpState->PostException(&exception_throw_location, h_exception.Get(), &exception_catch_location,
- clf.GetThisAtThrow());
+ gJdwpState->PostException(&exception_throw_location,
+ h_exception.Get(),
+ &exception_catch_location,
+ this_at_throw.Get());
}
void Dbg::PostClassPrepare(mirror::Class* c) {
@@ -3649,56 +3571,6 @@ bool Dbg::IsForcedInterpreterNeededForUpcallImpl(Thread* thread, ArtMethod* m) {
return instrumentation->IsDeoptimized(m);
}
-class NeedsDeoptimizationVisitor : public StackVisitor {
- public:
- explicit NeedsDeoptimizationVisitor(Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- needs_deoptimization_(false) {}
-
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- // The visitor is meant to be used when handling exception from compiled code only.
- CHECK(!IsShadowFrame()) << "We only expect to visit compiled frame: "
- << ArtMethod::PrettyMethod(GetMethod());
- ArtMethod* method = GetMethod();
- if (method == nullptr) {
- // We reach an upcall and don't need to deoptimize this part of the stack (ManagedFragment)
- // so we can stop the visit.
- DCHECK(!needs_deoptimization_);
- return false;
- }
- if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) {
- // We found a compiled frame in the stack but instrumentation is set to interpret
- // everything: we need to deoptimize.
- needs_deoptimization_ = true;
- return false;
- }
- if (Runtime::Current()->GetInstrumentation()->IsDeoptimized(method)) {
- // We found a deoptimized method in the stack.
- needs_deoptimization_ = true;
- return false;
- }
- ShadowFrame* frame = GetThread()->FindDebuggerShadowFrame(GetFrameId());
- if (frame != nullptr) {
- // The debugger allocated a ShadowFrame to update a variable in the stack: we need to
- // deoptimize the stack to execute (and deallocate) this frame.
- needs_deoptimization_ = true;
- return false;
- }
- return true;
- }
-
- bool NeedsDeoptimization() const {
- return needs_deoptimization_;
- }
-
- private:
- // Do we need to deoptimize the stack?
- bool needs_deoptimization_;
-
- DISALLOW_COPY_AND_ASSIGN(NeedsDeoptimizationVisitor);
-};
-
// Do we need to deoptimize the stack to handle an exception?
bool Dbg::IsForcedInterpreterNeededForExceptionImpl(Thread* thread) {
const SingleStepControl* const ssc = thread->GetSingleStepControl();
@@ -3708,9 +3580,45 @@ bool Dbg::IsForcedInterpreterNeededForExceptionImpl(Thread* thread) {
}
// Deoptimization is required if at least one method in the stack needs it. However we
// skip frames that will be unwound (thus not executed).
- NeedsDeoptimizationVisitor visitor(thread);
- visitor.WalkStack(true); // includes upcall.
- return visitor.NeedsDeoptimization();
+ bool needs_deoptimization = false;
+ StackVisitor::WalkStack(
+ [&](art::StackVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ // The visitor is meant to be used when handling exception from compiled code only.
+ CHECK(!visitor->IsShadowFrame()) << "We only expect to visit compiled frame: "
+ << ArtMethod::PrettyMethod(visitor->GetMethod());
+ ArtMethod* method = visitor->GetMethod();
+ if (method == nullptr) {
+ // We reach an upcall and don't need to deoptimize this part of the stack (ManagedFragment)
+ // so we can stop the visit.
+ DCHECK(!needs_deoptimization);
+ return false;
+ }
+ if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) {
+ // We found a compiled frame in the stack but instrumentation is set to interpret
+ // everything: we need to deoptimize.
+ needs_deoptimization = true;
+ return false;
+ }
+ if (Runtime::Current()->GetInstrumentation()->IsDeoptimized(method)) {
+ // We found a deoptimized method in the stack.
+ needs_deoptimization = true;
+ return false;
+ }
+ ShadowFrame* frame = visitor->GetThread()->FindDebuggerShadowFrame(visitor->GetFrameId());
+ if (frame != nullptr) {
+ // The debugger allocated a ShadowFrame to update a variable in the stack: we need to
+ // deoptimize the stack to execute (and deallocate) this frame.
+ needs_deoptimization = true;
+ return false;
+ }
+ return true;
+ },
+ thread,
+ /* context= */ nullptr,
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames,
+ /* check_suspended */ true,
+ /* include_transitions */ true);
+ return needs_deoptimization;
}
// Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 2236e61d75..a18cca4cb2 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -533,13 +533,7 @@ ALWAYS_INLINE ArtMethod* FindMethodToCall(uint32_t method_idx,
UNREACHABLE();
}
case kInterface: {
- size_t imt_index;
- InterpreterCache* tls_cache = self->GetInterpreterCache();
- if (UNLIKELY(!tls_cache->Get(resolved_method, &imt_index))) {
- imt_index = ImTable::GetImtIndex(resolved_method);
- tls_cache->Set(resolved_method, imt_index);
- }
- DCHECK_EQ(imt_index, ImTable::GetImtIndex(resolved_method));
+ size_t imt_index = resolved_method->GetImtIndex();
PointerSize pointer_size = class_linker->GetImagePointerSize();
ObjPtr<mirror::Class> klass = (*this_object)->GetClass();
ArtMethod* imt_method = klass->GetImt(pointer_size)->Get(imt_index, pointer_size);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 147249000f..b6adcf070d 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -2671,7 +2671,7 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(ArtMethod* interface_metho
DCHECK(!interface_method->IsRuntimeMethod());
// Look whether we have a match in the ImtConflictTable.
- uint32_t imt_index = ImTable::GetImtIndex(interface_method);
+ uint32_t imt_index = interface_method->GetImtIndex();
ArtMethod* conflict_method = imt->Get(imt_index, kRuntimePointerSize);
if (LIKELY(conflict_method->IsRuntimeMethod())) {
ImtConflictTable* current_table = conflict_method->GetImtConflictTable(kRuntimePointerSize);
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
index b39628b1dc..e66a174587 100644
--- a/runtime/gc/accounting/mod_union_table_test.cc
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -162,9 +162,9 @@ ModUnionTable* ModUnionTableFactory::Create(
}
default: {
UNIMPLEMENTED(FATAL) << "Invalid type " << type;
+ UNREACHABLE();
}
}
- return nullptr;
}
TEST_F(ModUnionTableTest, TestCardCache) {
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index e11fa5c212..80e3394f86 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -184,34 +184,6 @@ void AllocRecordObjectMap::BroadcastForNewAllocationRecords() {
new_record_condition_.Broadcast(Thread::Current());
}
-class AllocRecordStackVisitor : public StackVisitor {
- public:
- AllocRecordStackVisitor(Thread* thread, size_t max_depth, AllocRecordStackTrace* trace_out)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- max_depth_(max_depth),
- trace_(trace_out) {}
-
- // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
- // annotalysis.
- bool VisitFrame() override NO_THREAD_SAFETY_ANALYSIS {
- if (trace_->GetDepth() >= max_depth_) {
- return false;
- }
- ArtMethod* m = GetMethod();
- // m may be null if we have inlined methods of unresolved classes. b/27858645
- if (m != nullptr && !m->IsRuntimeMethod()) {
- m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
- trace_->AddStackElement(AllocRecordStackTraceElement(m, GetDexPc()));
- }
- return true;
- }
-
- private:
- const size_t max_depth_;
- AllocRecordStackTrace* const trace_;
-};
-
void AllocRecordObjectMap::SetAllocTrackingEnabled(bool enable) {
Thread* self = Thread::Current();
Heap* heap = Runtime::Current()->GetHeap();
@@ -268,11 +240,26 @@ void AllocRecordObjectMap::RecordAllocation(Thread* self,
// Get stack trace outside of lock in case there are allocations during the stack walk.
// b/27858645.
AllocRecordStackTrace trace;
- AllocRecordStackVisitor visitor(self, max_stack_depth_, /*out*/ &trace);
{
StackHandleScope<1> hs(self);
auto obj_wrapper = hs.NewHandleWrapper(obj);
- visitor.WalkStack();
+
+ StackVisitor::WalkStack(
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (trace.GetDepth() >= max_stack_depth_) {
+ return false;
+ }
+ ArtMethod* m = stack_visitor->GetMethod();
+ // m may be null if we have inlined methods of unresolved classes. b/27858645
+ if (m != nullptr && !m->IsRuntimeMethod()) {
+ m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
+ trace.AddStackElement(AllocRecordStackTraceElement(m, stack_visitor->GetDexPc()));
+ }
+ return true;
+ },
+ self,
+ /* context= */ nullptr,
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
}
MutexLock mu(self, *Locks::alloc_tracker_lock_);
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index c2a67bf9f6..9f98f6c7f0 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -113,22 +113,20 @@ class ImmuneSpacesTest : public CommonRuntimeTest {
ImageSection sections[ImageHeader::kSectionCount];
new (image_map.Begin()) ImageHeader(
/*image_begin=*/ PointerToLowMemUInt32(image_map.Begin()),
- /*image_size=*/ image_map.Size(),
+ /*image_size=*/ image_size,
sections,
/*image_roots=*/ PointerToLowMemUInt32(image_map.Begin()) + 1,
/*oat_checksum=*/ 0u,
// The oat file data in the header is always right after the image space.
/*oat_file_begin=*/ PointerToLowMemUInt32(oat_map.Begin()),
- /*oat_data_begin=*/PointerToLowMemUInt32(oat_map.Begin()),
- /*oat_data_end=*/PointerToLowMemUInt32(oat_map.Begin() + oat_size),
- /*oat_file_end=*/PointerToLowMemUInt32(oat_map.Begin() + oat_size),
- /*boot_image_begin=*/0u,
- /*boot_image_size=*/0u,
- /*boot_oat_begin=*/0u,
- /*boot_oat_size=*/0u,
- /*pointer_size=*/sizeof(void*),
+ /*oat_data_begin=*/ PointerToLowMemUInt32(oat_map.Begin()),
+ /*oat_data_end=*/ PointerToLowMemUInt32(oat_map.Begin() + oat_size),
+ /*oat_file_end=*/ PointerToLowMemUInt32(oat_map.Begin() + oat_size),
+ /*boot_image_begin=*/ 0u,
+ /*boot_image_size=*/ 0u,
+ /*pointer_size=*/ sizeof(void*),
ImageHeader::kStorageModeUncompressed,
- /*data_size=*/0u);
+ /*data_size=*/ 0u);
return new DummyImageSpace(std::move(image_map),
std::move(live_bitmap),
std::move(oat_file),
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 0766999c02..bfb37463e5 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -383,20 +383,16 @@ class ImageSpace::Loader {
public:
static std::unique_ptr<ImageSpace> InitAppImage(const char* image_filename,
const char* image_location,
- bool validate_oat_file,
const OatFile* oat_file,
/*inout*/MemMap* image_reservation,
- /*inout*/MemMap* oat_reservation,
/*out*/std::string* error_msg)
REQUIRES_SHARED(Locks::mutator_lock_) {
TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
std::unique_ptr<ImageSpace> space = Init(image_filename,
image_location,
- validate_oat_file,
oat_file,
&logger,
image_reservation,
- oat_reservation,
error_msg);
if (space != nullptr) {
TimingLogger::ScopedTiming timing("RelocateImage", &logger);
@@ -438,11 +434,9 @@ class ImageSpace::Loader {
static std::unique_ptr<ImageSpace> Init(const char* image_filename,
const char* image_location,
- bool validate_oat_file,
const OatFile* oat_file,
TimingLogger* logger,
/*inout*/MemMap* image_reservation,
- /*inout*/MemMap* oat_reservation,
/*out*/std::string* error_msg)
REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(image_filename != nullptr);
@@ -479,8 +473,8 @@ class ImageSpace::Loader {
}
if (oat_file != nullptr) {
- // If we have an oat file, check the oat file checksum. The oat file is only non-null for the
- // app image case. Otherwise, we open the oat file after the image and check the checksum there.
+ // If we have an oat file (i.e. for app image), check the oat file checksum.
+ // Otherwise, we open the oat file after the image and check the checksum there.
const uint32_t oat_checksum = oat_file->GetOatHeader().GetChecksum();
const uint32_t image_oat_checksum = image_header->GetOatChecksum();
if (oat_checksum != image_oat_checksum) {
@@ -517,15 +511,13 @@ class ImageSpace::Loader {
return nullptr;
}
- MemMap map;
-
// GetImageBegin is the preferred address to map the image. If we manage to map the
// image at the image begin, the amount of fixup work required is minimized.
// If it is pic we will retry with error_msg for the failure case. Pass a null error_msg to
// avoid reading proc maps for a mapping failure and slowing everything down.
// For the boot image, we have already reserved the memory and we load the image
// into the `image_reservation`.
- map = LoadImageFile(
+ MemMap map = LoadImageFile(
image_filename,
image_location,
*image_header,
@@ -583,33 +575,7 @@ class ImageSpace::Loader {
std::move(map),
std::move(bitmap),
image_end));
-
- // VerifyImageAllocations() will be called later in Runtime::Init()
- // as some class roots like ArtMethod::java_lang_reflect_ArtMethod_
- // and ArtField::java_lang_reflect_ArtField_, which are used from
- // Object::SizeOf() which VerifyImageAllocations() calls, are not
- // set yet at this point.
- if (oat_file == nullptr) {
- TimingLogger::ScopedTiming timing("OpenOatFile", logger);
- space->oat_file_ = OpenOatFile(*space, image_filename, oat_reservation, error_msg);
- if (space->oat_file_ == nullptr) {
- DCHECK(!error_msg->empty());
- return nullptr;
- }
- space->oat_file_non_owned_ = space->oat_file_.get();
- } else {
- space->oat_file_non_owned_ = oat_file;
- }
-
- if (validate_oat_file) {
- TimingLogger::ScopedTiming timing("ValidateOatFile", logger);
- CHECK(space->oat_file_ != nullptr);
- if (!ImageSpace::ValidateOatFile(*space->oat_file_, error_msg)) {
- DCHECK(!error_msg->empty());
- return nullptr;
- }
- }
-
+ space->oat_file_non_owned_ = oat_file;
return space;
}
@@ -700,11 +666,9 @@ class ImageSpace::Loader {
class FixupVisitor : public ValueObject {
public:
FixupVisitor(const RelocationRange& boot_image,
- const RelocationRange& boot_oat,
const RelocationRange& app_image,
const RelocationRange& app_oat)
: boot_image_(boot_image),
- boot_oat_(boot_oat),
app_image_(app_image),
app_oat_(app_oat) {}
@@ -727,8 +691,8 @@ class ImageSpace::Loader {
// Return the relocated address of a code pointer (contained by an oat file).
ALWAYS_INLINE const void* ForwardCode(const void* src) const {
const uintptr_t uint_src = reinterpret_cast<uintptr_t>(src);
- if (boot_oat_.InSource(uint_src)) {
- return reinterpret_cast<const void*>(boot_oat_.ToDest(uint_src));
+ if (boot_image_.InSource(uint_src)) {
+ return reinterpret_cast<const void*>(boot_image_.ToDest(uint_src));
}
if (app_oat_.InSource(uint_src)) {
return reinterpret_cast<const void*>(app_oat_.ToDest(uint_src));
@@ -745,7 +709,6 @@ class ImageSpace::Loader {
protected:
// Source section.
const RelocationRange boot_image_;
- const RelocationRange boot_oat_;
const RelocationRange app_image_;
const RelocationRange app_oat_;
};
@@ -893,7 +856,7 @@ class ImageSpace::Loader {
// We want to use our own class loader and not the one in the image.
if (obj->IsClass<kVerifyNone>()) {
mirror::Class* as_klass = obj->AsClass<kVerifyNone>();
- FixupObjectAdapter visitor(boot_image_, boot_oat_, app_image_, app_oat_);
+ FixupObjectAdapter visitor(boot_image_, app_image_, app_oat_);
as_klass->FixupNativePointers<kVerifyNone>(as_klass, pointer_size_, visitor);
// Deal with the pointer arrays. Use the helper function since multiple classes can reference
// the same arrays.
@@ -1025,10 +988,8 @@ class ImageSpace::Loader {
*error_msg = "Can not relocate app image without boot oat file";
return false;
}
- const uint32_t boot_image_size = boot_image_end - boot_image_begin;
- const uint32_t boot_oat_size = boot_oat_end - boot_oat_begin;
+ const uint32_t boot_image_size = boot_oat_end - boot_image_begin;
const uint32_t image_header_boot_image_size = image_header.GetBootImageSize();
- const uint32_t image_header_boot_oat_size = image_header.GetBootOatSize();
if (boot_image_size != image_header_boot_image_size) {
*error_msg = StringPrintf("Boot image size %" PRIu64 " does not match expected size %"
PRIu64,
@@ -1036,20 +997,10 @@ class ImageSpace::Loader {
static_cast<uint64_t>(image_header_boot_image_size));
return false;
}
- if (boot_oat_size != image_header_boot_oat_size) {
- *error_msg = StringPrintf("Boot oat size %" PRIu64 " does not match expected size %"
- PRIu64,
- static_cast<uint64_t>(boot_oat_size),
- static_cast<uint64_t>(image_header_boot_oat_size));
- return false;
- }
TimingLogger logger(__FUNCTION__, true, false);
RelocationRange boot_image(image_header.GetBootImageBegin(),
boot_image_begin,
boot_image_size);
- RelocationRange boot_oat(image_header.GetBootOatBegin(),
- boot_oat_begin,
- boot_oat_size);
RelocationRange app_image(reinterpret_cast<uintptr_t>(image_header.GetImageBegin()),
reinterpret_cast<uintptr_t>(target_base),
image_header.GetImageSize());
@@ -1061,11 +1012,9 @@ class ImageSpace::Loader {
VLOG(image) << "App image " << app_image;
VLOG(image) << "App oat " << app_oat;
VLOG(image) << "Boot image " << boot_image;
- VLOG(image) << "Boot oat " << boot_oat;
- // True if we need to fixup any heap pointers, otherwise only code pointers.
+ // True if we need to fixup any heap pointers.
const bool fixup_image = boot_image.Delta() != 0 || app_image.Delta() != 0;
- const bool fixup_code = boot_oat.Delta() != 0 || app_oat.Delta() != 0;
- if (!fixup_image && !fixup_code) {
+ if (!fixup_image) {
// Nothing to fix up.
return true;
}
@@ -1074,7 +1023,7 @@ class ImageSpace::Loader {
const ImageSection& objects_section = image_header.GetObjectsSection();
uintptr_t objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset());
uintptr_t objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End());
- FixupObjectAdapter fixup_adapter(boot_image, boot_oat, app_image, app_oat);
+ FixupObjectAdapter fixup_adapter(boot_image, app_image, app_oat);
if (fixup_image) {
// Two pass approach, fix up all classes first, then fix up non class-objects.
// The visited bitmap is used to ensure that pointer arrays are not forwarded twice.
@@ -1085,7 +1034,6 @@ class ImageSpace::Loader {
FixupObjectVisitor fixup_object_visitor(visited_bitmap.get(),
pointer_size,
boot_image,
- boot_oat,
app_image,
app_oat);
TimingLogger::ScopedTiming timing("Fixup classes", &logger);
@@ -1191,7 +1139,6 @@ class ImageSpace::Loader {
FixupArtMethodVisitor method_visitor(fixup_image,
pointer_size,
boot_image,
- boot_oat,
app_image,
app_oat);
image_header.VisitPackedArtMethods(&method_visitor, target_base, pointer_size);
@@ -1200,7 +1147,7 @@ class ImageSpace::Loader {
{
// Only touches objects in the app image, no need for mutator lock.
TimingLogger::ScopedTiming timing("Fixup fields", &logger);
- FixupArtFieldVisitor field_visitor(boot_image, boot_oat, app_image, app_oat);
+ FixupArtFieldVisitor field_visitor(boot_image, app_image, app_oat);
image_header.VisitPackedArtFields(&field_visitor, target_base);
}
{
@@ -1222,7 +1169,7 @@ class ImageSpace::Loader {
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
ClassTable temp_table;
temp_table.ReadFromMemory(target_base + class_table_section.Offset());
- FixupRootVisitor root_visitor(boot_image, boot_oat, app_image, app_oat);
+ FixupRootVisitor root_visitor(boot_image, app_image, app_oat);
temp_table.VisitRoots(root_visitor);
}
// Fix up the intern table.
@@ -1234,7 +1181,7 @@ class ImageSpace::Loader {
InternTable temp_intern_table;
// Note that we require that ReadFromMemory does not make an internal copy of the elements
// so that the VisitRoots() will update the memory directly rather than the copies.
- FixupRootVisitor root_visitor(boot_image, boot_oat, app_image, app_oat);
+ FixupRootVisitor root_visitor(boot_image, app_image, app_oat);
temp_intern_table.AddTableFromMemory(target_base + intern_table_section.Offset(),
[&](InternTable::UnorderedSet& strings)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1249,49 +1196,6 @@ class ImageSpace::Loader {
}
return true;
}
-
- static std::unique_ptr<OatFile> OpenOatFile(const ImageSpace& image,
- const char* image_path,
- /*inout*/MemMap* oat_reservation,
- std::string* error_msg) {
- const ImageHeader& image_header = image.GetImageHeader();
- std::string oat_filename = ImageHeader::GetOatLocationFromImageLocation(image_path);
-
- CHECK(image_header.GetOatDataBegin() != nullptr);
-
- uint8_t* oat_data_begin = image_header.GetOatDataBegin();
- if (oat_reservation != nullptr) {
- oat_data_begin += oat_reservation->Begin() - image_header.GetOatFileBegin();
- }
- std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
- oat_filename,
- oat_filename,
- !Runtime::Current()->IsAotCompiler(),
- /*low_4gb=*/ false,
- /*abs_dex_location=*/ nullptr,
- oat_reservation,
- error_msg));
- if (oat_file == nullptr) {
- *error_msg = StringPrintf("Failed to open oat file '%s' referenced from image %s: %s",
- oat_filename.c_str(),
- image.GetName(),
- error_msg->c_str());
- return nullptr;
- }
- CHECK(oat_data_begin == oat_file->Begin());
- uint32_t oat_checksum = oat_file->GetOatHeader().GetChecksum();
- uint32_t image_oat_checksum = image_header.GetOatChecksum();
- if (oat_checksum != image_oat_checksum) {
- *error_msg = StringPrintf("Failed to match oat file checksum 0x%x to expected oat checksum 0x%x"
- " in image %s",
- oat_checksum,
- image_oat_checksum,
- image.GetName());
- return nullptr;
- }
-
- return oat_file;
- }
};
class ImageSpace::BootImageLoader {
@@ -1351,26 +1255,22 @@ class ImageSpace::BootImageLoader {
}
uint32_t image_start;
uint32_t image_end;
- uint32_t oat_end;
- if (!GetBootImageAddressRange(filename, &image_start, &image_end, &oat_end, error_msg)) {
+ if (!GetBootImageAddressRange(filename, &image_start, &image_end, error_msg)) {
return false;
}
if (locations.size() > 1u) {
std::string last_filename = GetSystemImageFilename(locations.back().c_str(), image_isa_);
uint32_t dummy;
- if (!GetBootImageAddressRange(last_filename, &dummy, &image_end, &oat_end, error_msg)) {
+ if (!GetBootImageAddressRange(last_filename, &dummy, &image_end, error_msg)) {
return false;
}
}
MemMap image_reservation;
- MemMap oat_reservation;
MemMap local_extra_reservation;
- if (!ReserveBootImageMemory(image_start,
- image_end,
- oat_end,
+ if (!ReserveBootImageMemory(/*reservation_size=*/ image_end - image_start,
+ image_start,
extra_reservation_size,
&image_reservation,
- &oat_reservation,
&local_extra_reservation,
error_msg)) {
return false;
@@ -1380,28 +1280,29 @@ class ImageSpace::BootImageLoader {
spaces.reserve(locations.size());
for (const std::string& location : locations) {
filename = GetSystemImageFilename(location.c_str(), image_isa_);
- spaces.push_back(Load(location,
- filename,
- /*validate_oat_file=*/ false,
- &logger,
- &image_reservation,
- &oat_reservation,
- error_msg));
+ spaces.push_back(Load(location, filename, &logger, &image_reservation, error_msg));
if (spaces.back() == nullptr) {
return false;
}
}
- if (!CheckReservationsExhausted(image_reservation, oat_reservation, error_msg)) {
+ for (std::unique_ptr<ImageSpace>& space : spaces) {
+ static constexpr bool kValidateOatFile = false;
+ if (!OpenOatFile(space.get(), kValidateOatFile, &logger, &image_reservation, error_msg)) {
+ return false;
+ }
+ }
+ if (!CheckReservationExhausted(image_reservation, error_msg)) {
return false;
}
MaybeRelocateSpaces(spaces, &logger);
InitRuntimeMethods(spaces);
- *extra_reservation = std::move(local_extra_reservation);
- VLOG(image) << "ImageSpace::BootImageLoader::InitFromDalvikCache exiting " << *spaces.front();
boot_image_spaces->swap(spaces);
+ *extra_reservation = std::move(local_extra_reservation);
if (VLOG_IS_ON(image)) {
+ LOG(INFO) << "ImageSpace::BootImageLoader::LoadFromSystem exiting "
+ << boot_image_spaces->front();
logger.Dump(LOG_STREAM(INFO));
}
return true;
@@ -1421,8 +1322,7 @@ class ImageSpace::BootImageLoader {
}
uint32_t image_start;
uint32_t image_end;
- uint32_t oat_end;
- if (!GetBootImageAddressRange(cache_filename_, &image_start, &image_end, &oat_end, error_msg)) {
+ if (!GetBootImageAddressRange(cache_filename_, &image_start, &image_end, error_msg)) {
return false;
}
if (locations.size() > 1u) {
@@ -1434,19 +1334,16 @@ class ImageSpace::BootImageLoader {
return false;
}
uint32_t dummy;
- if (!GetBootImageAddressRange(last_filename, &dummy, &image_end, &oat_end, error_msg)) {
+ if (!GetBootImageAddressRange(last_filename, &dummy, &image_end, error_msg)) {
return false;
}
}
MemMap image_reservation;
- MemMap oat_reservation;
MemMap local_extra_reservation;
- if (!ReserveBootImageMemory(image_start,
- image_end,
- oat_end,
+ if (!ReserveBootImageMemory(/*reservation_size=*/ image_end - image_start,
+ image_start,
extra_reservation_size,
&image_reservation,
- &oat_reservation,
&local_extra_reservation,
error_msg)) {
return false;
@@ -1459,28 +1356,28 @@ class ImageSpace::BootImageLoader {
if (!GetDalvikCacheFilename(location.c_str(), dalvik_cache_.c_str(), &filename, error_msg)) {
return false;
}
- spaces.push_back(Load(location,
- filename,
- validate_oat_file,
- &logger,
- &image_reservation,
- &oat_reservation,
- error_msg));
+ spaces.push_back(Load(location, filename, &logger, &image_reservation, error_msg));
if (spaces.back() == nullptr) {
return false;
}
}
- if (!CheckReservationsExhausted(image_reservation, oat_reservation, error_msg)) {
+ for (std::unique_ptr<ImageSpace>& space : spaces) {
+ if (!OpenOatFile(space.get(), validate_oat_file, &logger, &image_reservation, error_msg)) {
+ return false;
+ }
+ }
+ if (!CheckReservationExhausted(image_reservation, error_msg)) {
return false;
}
MaybeRelocateSpaces(spaces, &logger);
InitRuntimeMethods(spaces);
- *extra_reservation = std::move(local_extra_reservation);
boot_image_spaces->swap(spaces);
+ *extra_reservation = std::move(local_extra_reservation);
- VLOG(image) << "ImageSpace::BootImageLoader::InitFromDalvikCache exiting " << *spaces.front();
if (VLOG_IS_ON(image)) {
+ LOG(INFO) << "ImageSpace::BootImageLoader::LoadFromDalvikCache exiting "
+ << boot_image_spaces->front();
logger.Dump(LOG_STREAM(INFO));
}
return true;
@@ -2013,10 +1910,8 @@ class ImageSpace::BootImageLoader {
std::unique_ptr<ImageSpace> Load(const std::string& image_location,
const std::string& image_filename,
- bool validate_oat_file,
TimingLogger* logger,
/*inout*/MemMap* image_reservation,
- /*inout*/MemMap* oat_reservation,
/*out*/std::string* error_msg)
REQUIRES_SHARED(Locks::mutator_lock_) {
// Should this be a RDWR lock? This is only a defensive measure, as at
@@ -2045,14 +1940,80 @@ class ImageSpace::BootImageLoader {
// file name.
return Loader::Init(image_filename.c_str(),
image_location.c_str(),
- validate_oat_file,
/*oat_file=*/ nullptr,
logger,
image_reservation,
- oat_reservation,
error_msg);
}
+ bool OpenOatFile(ImageSpace* space,
+ bool validate_oat_file,
+ TimingLogger* logger,
+ /*inout*/MemMap* image_reservation,
+ /*out*/std::string* error_msg) {
+ // VerifyImageAllocations() will be called later in Runtime::Init()
+ // as some class roots like ArtMethod::java_lang_reflect_ArtMethod_
+ // and ArtField::java_lang_reflect_ArtField_, which are used from
+ // Object::SizeOf() which VerifyImageAllocations() calls, are not
+ // set yet at this point.
+ DCHECK(image_reservation != nullptr);
+ std::unique_ptr<OatFile> oat_file;
+ {
+ TimingLogger::ScopedTiming timing("OpenOatFile", logger);
+ std::string oat_filename =
+ ImageHeader::GetOatLocationFromImageLocation(space->GetImageFilename());
+
+ oat_file.reset(OatFile::Open(/*zip_fd=*/ -1,
+ oat_filename,
+ oat_filename,
+ !Runtime::Current()->IsAotCompiler(),
+ /*low_4gb=*/ false,
+ /*abs_dex_location=*/ nullptr,
+ image_reservation,
+ error_msg));
+ if (oat_file == nullptr) {
+ *error_msg = StringPrintf("Failed to open oat file '%s' referenced from image %s: %s",
+ oat_filename.c_str(),
+ space->GetName(),
+ error_msg->c_str());
+ return false;
+ }
+ const ImageHeader& image_header = space->GetImageHeader();
+ uint32_t oat_checksum = oat_file->GetOatHeader().GetChecksum();
+ uint32_t image_oat_checksum = image_header.GetOatChecksum();
+ if (oat_checksum != image_oat_checksum) {
+ *error_msg = StringPrintf("Failed to match oat file checksum 0x%x to expected oat checksum"
+ " 0x%x in image %s",
+ oat_checksum,
+ image_oat_checksum,
+ space->GetName());
+ return false;
+ }
+ ptrdiff_t relocation_diff = space->Begin() - image_header.GetImageBegin();
+ CHECK(image_header.GetOatDataBegin() != nullptr);
+ uint8_t* oat_data_begin = image_header.GetOatDataBegin() + relocation_diff;
+ if (oat_file->Begin() != oat_data_begin) {
+ *error_msg = StringPrintf("Oat file '%s' referenced from image %s has unexpected begin"
+ " %p v. %p",
+ oat_filename.c_str(),
+ space->GetName(),
+ oat_file->Begin(),
+ oat_data_begin);
+ return false;
+ }
+ }
+ if (validate_oat_file) {
+ TimingLogger::ScopedTiming timing("ValidateOatFile", logger);
+ if (!ImageSpace::ValidateOatFile(*oat_file, error_msg)) {
+ DCHECK(!error_msg->empty());
+ return false;
+ }
+ }
+ space->oat_file_ = std::move(oat_file);
+ space->oat_file_non_owned_ = space->oat_file_.get();
+ return true;
+ }
+
// Extract boot class path from oat file associated with `image_filename`
// and list all associated image locations.
static bool GetBootClassPathImageLocations(const std::string& image_location,
@@ -2087,7 +2048,6 @@ class ImageSpace::BootImageLoader {
bool GetBootImageAddressRange(const std::string& filename,
/*out*/uint32_t* start,
/*out*/uint32_t* end,
- /*out*/uint32_t* oat_end,
/*out*/std::string* error_msg) {
ImageHeader system_hdr;
if (!ReadSpecificImageHeader(filename.c_str(), &system_hdr)) {
@@ -2096,22 +2056,19 @@ class ImageSpace::BootImageLoader {
}
*start = reinterpret_cast32<uint32_t>(system_hdr.GetImageBegin());
CHECK_ALIGNED(*start, kPageSize);
- *end = RoundUp(*start + system_hdr.GetImageSize(), kPageSize);
- *oat_end = RoundUp(reinterpret_cast32<uint32_t>(system_hdr.GetOatFileEnd()), kPageSize);
+ *end = RoundUp(reinterpret_cast32<uint32_t>(system_hdr.GetOatFileEnd()), kPageSize);
return true;
}
- bool ReserveBootImageMemory(uint32_t image_start,
- uint32_t image_end,
- uint32_t oat_end,
+ bool ReserveBootImageMemory(uint32_t reservation_size,
+ uint32_t image_start,
size_t extra_reservation_size,
/*out*/MemMap* image_reservation,
- /*out*/MemMap* oat_reservation,
/*out*/MemMap* extra_reservation,
/*out*/std::string* error_msg) {
DCHECK(!image_reservation->IsValid());
- size_t total_size =
- dchecked_integral_cast<size_t>(oat_end - image_start) + extra_reservation_size;
+ DCHECK_LT(extra_reservation_size, std::numeric_limits<uint32_t>::max() - reservation_size);
+ size_t total_size = reservation_size + extra_reservation_size;
bool relocate = Runtime::Current()->ShouldRelocate();
// If relocating, choose a random address for ALSR.
uint32_t addr = relocate ? ART_BASE_ADDRESS + ChooseRelocationOffsetDelta() : image_start;
@@ -2140,37 +2097,17 @@ class ImageSpace::BootImageLoader {
return false;
}
}
- uint32_t diff = reinterpret_cast32<uint32_t>(image_reservation->Begin()) - image_start;
- image_start += diff;
- image_end += diff;
- oat_end += diff;
- DCHECK(!oat_reservation->IsValid());
- *oat_reservation = image_reservation->RemapAtEnd(reinterpret_cast32<uint8_t*>(image_end),
- "Boot image oat reservation",
- PROT_NONE,
- error_msg);
- if (!oat_reservation->IsValid()) {
- return false;
- }
return true;
}
- bool CheckReservationsExhausted(const MemMap& image_reservation,
- const MemMap& oat_reservation,
- /*out*/std::string* error_msg) {
+ bool CheckReservationExhausted(const MemMap& image_reservation, /*out*/std::string* error_msg) {
if (image_reservation.IsValid()) {
*error_msg = StringPrintf("Excessive image reservation after loading boot image: %p-%p",
image_reservation.Begin(),
image_reservation.End());
return false;
}
- if (oat_reservation.IsValid()) {
- *error_msg = StringPrintf("Excessive oat reservation after loading boot image: %p-%p",
- image_reservation.Begin(),
- image_reservation.End());
- return false;
- }
return true;
}
@@ -2374,12 +2311,11 @@ ImageSpace::~ImageSpace() {
std::unique_ptr<ImageSpace> ImageSpace::CreateFromAppImage(const char* image,
const OatFile* oat_file,
std::string* error_msg) {
+ // Note: The oat file has already been validated.
return Loader::InitAppImage(image,
image,
- /*validate_oat_file=*/ false,
oat_file,
/*image_reservation=*/ nullptr,
- /*oat_reservation=*/ nullptr,
error_msg);
}
diff --git a/runtime/hidden_api.cc b/runtime/hidden_api.cc
index 188c5f353b..d3df7fd38d 100644
--- a/runtime/hidden_api.cc
+++ b/runtime/hidden_api.cc
@@ -21,7 +21,6 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/dumpable.h"
-#include "base/sdk_version.h"
#include "dex/class_accessor-inl.h"
#include "scoped_thread_state_change.h"
#include "thread-inl.h"
@@ -76,21 +75,6 @@ enum AccessContextFlags {
kAccessDenied = 1 << 1,
};
-static SdkVersion GetMaxAllowedSdkVersionForApiList(ApiList api_list) {
- switch (api_list) {
- case ApiList::kWhitelist:
- case ApiList::kLightGreylist:
- return SdkVersion::kMax;
- case ApiList::kDarkGreylist:
- return SdkVersion::kO_MR1;
- case ApiList::kBlacklist:
- return SdkVersion::kMin;
- case ApiList::kNoList:
- LOG(FATAL) << "Unexpected value";
- UNREACHABLE();
- }
-}
-
MemberSignature::MemberSignature(ArtField* field) {
class_name_ = field->GetDeclaringClass()->GetDescriptor(&tmp_);
member_name_ = field->GetName();
@@ -264,7 +248,7 @@ uint32_t GetDexFlags(ArtField* field) REQUIRES_SHARED(Locks::mutator_lock_) {
}
uint32_t flags = kInvalidDexFlags;
- DCHECK(!AreValidFlags(flags));
+ DCHECK(!AreValidDexFlags(flags));
ClassAccessor accessor(declaring_class->GetDexFile(),
*class_def,
@@ -277,7 +261,7 @@ uint32_t GetDexFlags(ArtField* field) REQUIRES_SHARED(Locks::mutator_lock_) {
accessor.VisitFields(fn_visit, fn_visit);
CHECK_NE(flags, kInvalidDexFlags) << "Could not find flags for field " << field->PrettyField();
- DCHECK(AreValidFlags(flags));
+ DCHECK(AreValidDexFlags(flags));
return flags;
}
@@ -294,7 +278,7 @@ uint32_t GetDexFlags(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
}
uint32_t flags = kInvalidDexFlags;
- DCHECK(!AreValidFlags(flags));
+ DCHECK(!AreValidDexFlags(flags));
ClassAccessor accessor(declaring_class->GetDexFile(),
*class_def,
@@ -307,7 +291,7 @@ uint32_t GetDexFlags(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
accessor.VisitMethods(fn_visit, fn_visit);
CHECK_NE(flags, kInvalidDexFlags) << "Could not find flags for method " << method->PrettyMethod();
- DCHECK(AreValidFlags(flags));
+ DCHECK(AreValidDexFlags(flags));
return flags;
}
@@ -323,7 +307,7 @@ bool ShouldDenyAccessToMemberImpl(T* member,
const bool deny_access =
(policy == EnforcementPolicy::kEnabled) &&
IsSdkVersionSetAndMoreThan(runtime->GetTargetSdkVersion(),
- GetMaxAllowedSdkVersionForApiList(api_list));
+ api_list.GetMaxAllowedSdkVersion());
MemberSignature member_signature(member);
diff --git a/runtime/hidden_api.h b/runtime/hidden_api.h
index 32bae1127b..eea58e9880 100644
--- a/runtime/hidden_api.h
+++ b/runtime/hidden_api.h
@@ -19,8 +19,8 @@
#include "art_field.h"
#include "art_method.h"
+#include "base/hiddenapi_flags.h"
#include "base/mutex.h"
-#include "dex/hidden_api_access_flags.h"
#include "intrinsics_enum.h"
#include "mirror/class-inl.h"
#include "reflection.h"
@@ -177,10 +177,10 @@ ALWAYS_INLINE inline uint32_t CreateRuntimeFlags(const ClassAccessor::BaseItem&
uint32_t runtime_flags = 0u;
uint32_t dex_flags = member.GetHiddenapiFlags();
- DCHECK(AreValidFlags(dex_flags));
+ DCHECK(AreValidDexFlags(dex_flags));
- ApiList api_list = static_cast<hiddenapi::ApiList>(dex_flags);
- if (api_list == ApiList::kWhitelist) {
+ ApiList api_list = ApiList::FromDexFlags(dex_flags);
+ if (api_list == ApiList::Whitelist()) {
runtime_flags |= kAccPublicApi;
}
@@ -316,7 +316,7 @@ inline bool ShouldDenyAccessToMember(T* member,
// Decode hidden API access flags from the dex file.
// This is an O(N) operation scaling with the number of fields/methods
// in the class. Only do this on slow path and only do it once.
- ApiList api_list = static_cast<hiddenapi::ApiList>(detail::GetDexFlags(member));
+ ApiList api_list = ApiList::FromDexFlags(detail::GetDexFlags(member));
// Member is hidden and caller is not exempted. Enter slow path.
return detail::ShouldDenyAccessToMemberImpl(member, api_list, access_method);
diff --git a/runtime/hidden_api_test.cc b/runtime/hidden_api_test.cc
index 314d878c66..520dc6d935 100644
--- a/runtime/hidden_api_test.cc
+++ b/runtime/hidden_api_test.cc
@@ -101,30 +101,32 @@ TEST_F(HiddenApiTest, CheckGetActionFromRuntimeFlags) {
ScopedObjectAccess soa(self_);
runtime_->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kDisabled);
- ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::kWhitelist), false);
- ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::kLightGreylist), false);
- ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::kDarkGreylist), false);
- ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::kBlacklist), false);
+ ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Whitelist()), false);
+ ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Greylist()), false);
+ ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxO()), false);
+ ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Blacklist()), false);
runtime_->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kJustWarn);
- ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::kWhitelist), false);
- ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::kLightGreylist), false);
- ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::kDarkGreylist), false);
- ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::kBlacklist), false);
+ ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Whitelist()), false);
+ ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Greylist()), false);
+ ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxO()), false);
+ ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Blacklist()), false);
runtime_->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kEnabled);
- runtime_->SetTargetSdkVersion(static_cast<uint32_t>(SdkVersion::kO_MR1));
- ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::kWhitelist), false);
- ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::kLightGreylist), false);
- ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::kDarkGreylist), false);
- ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::kBlacklist), true);
+ runtime_->SetTargetSdkVersion(
+ static_cast<uint32_t>(hiddenapi::ApiList::GreylistMaxO().GetMaxAllowedSdkVersion()));
+ ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Whitelist()), false);
+ ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Greylist()), false);
+ ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxO()), false);
+ ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Blacklist()), true);
runtime_->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kEnabled);
- runtime_->SetTargetSdkVersion(static_cast<uint32_t>(SdkVersion::kP));
- ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::kWhitelist), false);
- ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::kLightGreylist), false);
- ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::kDarkGreylist), true);
- ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::kBlacklist), true);
+ runtime_->SetTargetSdkVersion(
+ static_cast<uint32_t>(hiddenapi::ApiList::GreylistMaxO().GetMaxAllowedSdkVersion()) + 1);
+ ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Whitelist()), false);
+ ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Greylist()), false);
+ ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxO()), true);
+ ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Blacklist()), true);
}
TEST_F(HiddenApiTest, CheckMembersRead) {
diff --git a/runtime/image.cc b/runtime/image.cc
index f4c3fea8e1..f50c39c3d5 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -26,7 +26,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '6', '8', '\0' }; // Image checksums.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '7', '0', '\0' }; // Store ImtIndex.
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
@@ -39,8 +39,6 @@ ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t oat_file_end,
uint32_t boot_image_begin,
uint32_t boot_image_size,
- uint32_t boot_oat_begin,
- uint32_t boot_oat_size,
uint32_t pointer_size,
StorageMode storage_mode,
size_t data_size)
@@ -54,8 +52,6 @@ ImageHeader::ImageHeader(uint32_t image_begin,
oat_file_end_(oat_file_end),
boot_image_begin_(boot_image_begin),
boot_image_size_(boot_image_size),
- boot_oat_begin_(boot_oat_begin),
- boot_oat_size_(boot_oat_size),
image_roots_(image_roots),
pointer_size_(pointer_size),
storage_mode_(storage_mode),
diff --git a/runtime/image.h b/runtime/image.h
index 5245cea091..f33b9b2a2e 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -93,23 +93,7 @@ class PACKED(4) ImageHeader {
};
static constexpr StorageMode kDefaultStorageMode = kStorageModeUncompressed;
- ImageHeader()
- : image_begin_(0U),
- image_size_(0U),
- image_checksum_(0u),
- oat_checksum_(0U),
- oat_file_begin_(0U),
- oat_data_begin_(0U),
- oat_data_end_(0U),
- oat_file_end_(0U),
- boot_image_begin_(0U),
- boot_image_size_(0U),
- boot_oat_begin_(0U),
- boot_oat_size_(0U),
- image_roots_(0U),
- pointer_size_(0U),
- storage_mode_(kDefaultStorageMode),
- data_size_(0) {}
+ ImageHeader() {}
ImageHeader(uint32_t image_begin,
uint32_t image_size,
@@ -122,8 +106,6 @@ class PACKED(4) ImageHeader {
uint32_t oat_file_end,
uint32_t boot_image_begin,
uint32_t boot_image_size,
- uint32_t boot_oat_begin,
- uint32_t boot_oat_size,
uint32_t pointer_size,
StorageMode storage_mode,
size_t data_size);
@@ -322,14 +304,6 @@ class PACKED(4) ImageHeader {
return boot_image_size_;
}
- uint32_t GetBootOatBegin() const {
- return boot_oat_begin_;
- }
-
- uint32_t GetBootOatSize() const {
- return boot_oat_size_;
- }
-
StorageMode GetStorageMode() const {
return storage_mode_;
}
@@ -390,43 +364,39 @@ class PACKED(4) ImageHeader {
uint8_t version_[4];
// Required base address for mapping the image.
- uint32_t image_begin_;
+ uint32_t image_begin_ = 0u;
// Image size, not page aligned.
- uint32_t image_size_;
+ uint32_t image_size_ = 0u;
// Image file checksum (calculated with the checksum field set to 0).
- uint32_t image_checksum_;
+ uint32_t image_checksum_ = 0u;
// Checksum of the oat file we link to for load time sanity check.
- uint32_t oat_checksum_;
+ uint32_t oat_checksum_ = 0u;
// Start address for oat file. Will be before oat_data_begin_ for .so files.
- uint32_t oat_file_begin_;
+ uint32_t oat_file_begin_ = 0u;
// Required oat address expected by image Method::GetCode() pointers.
- uint32_t oat_data_begin_;
+ uint32_t oat_data_begin_ = 0u;
// End of oat data address range for this image file.
- uint32_t oat_data_end_;
+ uint32_t oat_data_end_ = 0u;
// End of oat file address range. will be after oat_data_end_ for
// .so files. Used for positioning a following alloc spaces.
- uint32_t oat_file_end_;
+ uint32_t oat_file_end_ = 0u;
// Boot image begin and end (app image headers only).
- uint32_t boot_image_begin_;
- uint32_t boot_image_size_;
-
- // Boot oat begin and end (app image headers only).
- uint32_t boot_oat_begin_;
- uint32_t boot_oat_size_;
+ uint32_t boot_image_begin_ = 0u;
+ uint32_t boot_image_size_ = 0u; // Includes heap (*.art) and code (.oat).
// Absolute address of an Object[] of objects needed to reinitialize from an image.
- uint32_t image_roots_;
+ uint32_t image_roots_ = 0u;
// Pointer size, this affects the size of the ArtMethods.
- uint32_t pointer_size_;
+ uint32_t pointer_size_ = 0u;
// Image section sizes/offsets correspond to the uncompressed form.
ImageSection sections_[kSectionCount];
@@ -435,11 +405,11 @@ class PACKED(4) ImageHeader {
uint64_t image_methods_[kImageMethodsCount];
// Storage method for the image, the image may be compressed.
- StorageMode storage_mode_;
+ StorageMode storage_mode_ = kDefaultStorageMode;
// Data size for the image data excluding the bitmap and the header. For compressed images, this
// is the compressed size in the file.
- uint32_t data_size_;
+ uint32_t data_size_ = 0u;
friend class linker::ImageWriter;
};
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index cbcaaef260..12f1522386 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -18,6 +18,8 @@
#include <sstream>
+#include <android-base/logging.h>
+
#include "arch/context.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
@@ -1355,65 +1357,66 @@ DeoptimizationMethodType Instrumentation::GetDeoptimizationMethodType(ArtMethod*
}
// Try to get the shorty of a runtime method if it's an invocation stub.
-struct RuntimeMethodShortyVisitor : public StackVisitor {
- explicit RuntimeMethodShortyVisitor(Thread* thread)
- : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- shorty('V') {}
-
- static uint16_t GetMethodIndexOfInvoke(ArtMethod* caller,
- const Instruction& inst,
- uint32_t dex_pc)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- switch (inst.Opcode()) {
- case Instruction::INVOKE_VIRTUAL_RANGE_QUICK:
- case Instruction::INVOKE_VIRTUAL_QUICK: {
- uint16_t method_idx = caller->GetIndexFromQuickening(dex_pc);
- CHECK_NE(method_idx, DexFile::kDexNoIndex16);
- return method_idx;
- }
- default: {
- return inst.VRegB();
- }
- }
- }
-
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = GetMethod();
- if (m == nullptr || m->IsRuntimeMethod()) {
- return true;
- }
- // The first Java method.
- if (m->IsNative()) {
- // Use JNI method's shorty for the jni stub.
- shorty = m->GetShorty()[0];
- } else if (m->IsProxyMethod()) {
- // Proxy method just invokes its proxied method via
- // art_quick_proxy_invoke_handler.
- shorty = m->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty()[0];
- } else {
- const Instruction& instr = m->DexInstructions().InstructionAt(GetDexPc());
- if (instr.IsInvoke()) {
- uint16_t method_index = GetMethodIndexOfInvoke(m, instr, GetDexPc());
- const DexFile* dex_file = m->GetDexFile();
- if (interpreter::IsStringInit(dex_file, method_index)) {
- // Invoking string init constructor is turned into invoking
- // StringFactory.newStringFromChars() which returns a string.
- shorty = 'L';
- } else {
- shorty = dex_file->GetMethodShorty(method_index)[0];
+static char GetRuntimeMethodShorty(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_) {
+ char shorty = 'V';
+ StackVisitor::WalkStack(
+ [&shorty](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* m = stack_visitor->GetMethod();
+ if (m == nullptr || m->IsRuntimeMethod()) {
+ return true;
}
- } else {
- // It could be that a non-invoke opcode invokes a stub, which in turn
- // invokes Java code. In such cases, we should never expect a return
- // value from the stub.
- }
- }
- // Stop stack walking since we've seen a Java frame.
- return false;
- }
+ // The first Java method.
+ if (m->IsNative()) {
+ // Use JNI method's shorty for the jni stub.
+ shorty = m->GetShorty()[0];
+ } else if (m->IsProxyMethod()) {
+ // Proxy method just invokes its proxied method via
+ // art_quick_proxy_invoke_handler.
+ shorty = m->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty()[0];
+ } else {
+ const Instruction& instr = m->DexInstructions().InstructionAt(stack_visitor->GetDexPc());
+ if (instr.IsInvoke()) {
+ auto get_method_index_fn = [](ArtMethod* caller,
+ const Instruction& inst,
+ uint32_t dex_pc)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ switch (inst.Opcode()) {
+ case Instruction::INVOKE_VIRTUAL_RANGE_QUICK:
+ case Instruction::INVOKE_VIRTUAL_QUICK: {
+ uint16_t method_idx = caller->GetIndexFromQuickening(dex_pc);
+ CHECK_NE(method_idx, DexFile::kDexNoIndex16);
+ return method_idx;
+ }
+ default: {
+ return static_cast<uint16_t>(inst.VRegB());
+ }
+ }
+ };
+
+ uint16_t method_index = get_method_index_fn(m, instr, stack_visitor->GetDexPc());
+ const DexFile* dex_file = m->GetDexFile();
+ if (interpreter::IsStringInit(dex_file, method_index)) {
+ // Invoking string init constructor is turned into invoking
+ // StringFactory.newStringFromChars() which returns a string.
+ shorty = 'L';
+ } else {
+ shorty = dex_file->GetMethodShorty(method_index)[0];
+ }
- char shorty;
-};
+ } else {
+ // It could be that a non-invoke opcode invokes a stub, which in turn
+ // invokes Java code. In such cases, we should never expect a return
+ // value from the stub.
+ }
+ }
+ // Stop stack walking since we've seen a Java frame.
+ return false;
+ },
+ thread,
+ /* context= */ nullptr,
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+ return shorty;
+}
TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self,
uintptr_t* return_pc,
@@ -1447,9 +1450,7 @@ TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self,
// for clinit, we need to pass return results to the caller.
// We need the correct shorty to decide whether we need to pass the return
// result for deoptimization below.
- RuntimeMethodShortyVisitor visitor(self);
- visitor.WalkStack();
- return_shorty = visitor.shorty;
+ return_shorty = GetRuntimeMethodShorty(self);
} else {
// Some runtime methods such as allocations, unresolved field getters, etc.
// have return value. We don't need to set return_value since MethodExitEvent()
diff --git a/runtime/interpreter/interpreter_cache.h b/runtime/interpreter/interpreter_cache.h
index 355058f4f6..003ea6c8d3 100644
--- a/runtime/interpreter/interpreter_cache.h
+++ b/runtime/interpreter/interpreter_cache.h
@@ -38,7 +38,6 @@ class Thread;
// iget/iput: The field offset. The field must be non-volatile.
// sget/sput: The ArtField* pointer. The field must be non-volitile.
// invoke: The ArtMethod* pointer (before vtable indirection, etc).
-// ArtMethod*: The ImtIndex of the method.
//
// We ensure consistency of the cache by clearing it
// whenever any dex file is unloaded.
diff --git a/runtime/interpreter/mterp/arm/arithmetic.S b/runtime/interpreter/mterp/arm/arithmetic.S
index 7a373c7e3a..a6ba454882 100644
--- a/runtime/interpreter/mterp/arm/arithmetic.S
+++ b/runtime/interpreter/mterp/arm/arithmetic.S
@@ -157,8 +157,8 @@
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ GET_VREG_WIDE_BY_ADDR r0, r1, r2 @ r0/r1<- vBB/vBB+1
+ GET_VREG_WIDE_BY_ADDR r2, r3, r3 @ r2/r3<- vCC/vCC+1
.if $chkzero
orrs ip, r2, r3 @ second arg (r2-r3) is zero?
beq common_errDivideByZero
@@ -168,7 +168,7 @@
$preinstr @ optional op; may set condition codes
$instr @ result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {$result0,$result1} @ vAA/vAA+1<- $result0/$result1
+ SET_VREG_WIDE_BY_ADDR $result0,$result1,r9 @ vAA/vAA+1<, $result0/$result1
GOTO_OPCODE ip @ jump to next instruction
/* 14-17 instructions */
@@ -192,8 +192,8 @@
ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ GET_VREG_WIDE_BY_ADDR r2, r3, r1 @ r2/r3<- vBB/vBB+1
+ GET_VREG_WIDE_BY_ADDR r0, r1, r9 @ r0/r1<- vAA/vAA+1
.if $chkzero
orrs ip, r2, r3 @ second arg (r2-r3) is zero?
beq common_errDivideByZero
@@ -203,7 +203,7 @@
$preinstr @ optional op; may set condition codes
$instr @ result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {$result0,$result1} @ vAA/vAA+1<- $result0/$result1
+ SET_VREG_WIDE_BY_ADDR $result0,$result1,r9 @ vAA/vAA+1<- $result0/$result1
GOTO_OPCODE ip @ jump to next instruction
/* 12-15 instructions */
@@ -243,7 +243,7 @@
mov r3, rINST, lsr #12 @ r3<- B
ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
- ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1
+ GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- vB/vB+1
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$preinstr @ optional op; may set condition codes
$instr @ r0<- op, r0-r3 changed
@@ -265,13 +265,13 @@
ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
- ldmia r3, {r0-r1} @ r0/r1<- vAA
+ GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- vAA
CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$preinstr @ optional op; may set condition codes
$instr @ r0/r1<- op, r2-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r9 @ vAA<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
/* 10-11 instructions */
@@ -293,7 +293,7 @@
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$instr @ r0<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r9 @ vA/vA+1<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
/* 9-10 instructions */
@@ -345,8 +345,8 @@
mov r3, r0, lsr #8 @ r3<- CC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ GET_VREG_WIDE_BY_ADDR r0, r1, r2 @ r0/r1<- vBB/vBB+1
+ GET_VREG_WIDE_BY_ADDR r2, r3, r3 @ r2/r3<- vCC/vCC+1
cmp r0, r2
sbcs ip, r1, r3 @ Sets correct CCs for checking LT (but not EQ/NE)
mov ip, #0
@@ -541,8 +541,8 @@
mov r3, r0, lsr #8 @ r3<- CC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
- ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
- ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ GET_VREG_WIDE_BY_ADDR r0, r1, r2 @ r0/r1<- vBB/vBB+1
+ GET_VREG_WIDE_BY_ADDR r2, r3, r3 @ r2/r3<- vCC/vCC+1
mul ip, r2, r1 @ ip<- ZxW
umull r1, lr, r2, r0 @ r1/lr <- ZxX
mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
@@ -552,7 +552,7 @@
VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[AA]
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r0, {r1-r2 } @ vAA/vAA+1<- r1/r2
+ SET_VREG_WIDE_BY_ADDR r1, r2 , r0 @ vAA/vAA+1<- r1/r2
GOTO_OPCODE ip @ jump to next instruction
%def op_mul_long_2addr():
@@ -569,8 +569,8 @@
ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
VREG_INDEX_TO_ADDR rINST, r9 @ rINST<- &fp[A]
- ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
- ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1
+ GET_VREG_WIDE_BY_ADDR r2, r3, r1 @ r2/r3<- vBB/vBB+1
+ GET_VREG_WIDE_BY_ADDR r0, r1, rINST @ r0/r1<- vAA/vAA+1
mul ip, r2, r1 @ ip<- ZxW
umull r1, lr, r2, r0 @ r1/lr <- ZxX
mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
@@ -578,7 +578,7 @@
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
add r2, r2, lr @ r2<- r2 + low(ZxW + (YxX))
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r0, {r1-r2} @ vAA/vAA+1<- r1/r2
+ SET_VREG_WIDE_BY_ADDR r1, r2, r0 @ vAA/vAA+1<- r1/r2
GOTO_OPCODE ip @ jump to next instruction
%def op_neg_int():
@@ -781,7 +781,7 @@
mov r0, r0, lsr #8 @ r0<- CC
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
GET_VREG r2, r0 @ r2<- vCC
- ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- vBB/vBB+1
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
and r2, r2, #63 @ r2<- r2 & 0x3f
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
@@ -793,7 +793,7 @@
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
mov r0, r0, asl r2 @ r0<- r0 << r2
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r9 @ vAA/vAA+1<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
%def op_shl_long_2addr():
@@ -808,7 +808,7 @@
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
and r2, r2, #63 @ r2<- r2 & 0x3f
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ GET_VREG_WIDE_BY_ADDR r0, r1, r9 @ r0/r1<- vAA/vAA+1
mov r1, r1, asl r2 @ r1<- r1 << r2
rsb r3, r2, #32 @ r3<- 32 - r2
orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
@@ -817,7 +817,7 @@
movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
mov r0, r0, asl r2 @ r0<- r0 << r2
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r9 @ vAA/vAA+1<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
%def op_shr_int():
@@ -843,7 +843,7 @@
mov r0, r0, lsr #8 @ r0<- CC
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
GET_VREG r2, r0 @ r2<- vCC
- ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- vBB/vBB+1
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
and r2, r2, #63 @ r0<- r0 & 0x3f
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
@@ -855,7 +855,7 @@
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
mov r1, r1, asr r2 @ r1<- r1 >> r2
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r9 @ vAA/vAA+1<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
%def op_shr_long_2addr():
@@ -870,7 +870,7 @@
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
and r2, r2, #63 @ r2<- r2 & 0x3f
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ GET_VREG_WIDE_BY_ADDR r0, r1, r9 @ r0/r1<- vAA/vAA+1
mov r0, r0, lsr r2 @ r0<- r2 >> r2
rsb r3, r2, #32 @ r3<- 32 - r2
orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
@@ -879,7 +879,7 @@
movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
mov r1, r1, asr r2 @ r1<- r1 >> r2
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r9 @ vAA/vAA+1<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
%def op_sub_int():
@@ -917,7 +917,7 @@
mov r0, r0, lsr #8 @ r0<- CC
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BB]
GET_VREG r2, r0 @ r2<- vCC
- ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- vBB/vBB+1
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
and r2, r2, #63 @ r0<- r0 & 0x3f
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
@@ -929,7 +929,7 @@
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
mov r1, r1, lsr r2 @ r1<- r1 >>> r2
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r9 @ vAA/vAA+1<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
%def op_ushr_long_2addr():
@@ -944,7 +944,7 @@
CLEAR_SHADOW_PAIR r9, lr, ip @ Zero out the shadow regs
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[A]
and r2, r2, #63 @ r2<- r2 & 0x3f
- ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ GET_VREG_WIDE_BY_ADDR r0, r1, r9 @ r0/r1<- vAA/vAA+1
mov r0, r0, lsr r2 @ r0<- r2 >> r2
rsb r3, r2, #32 @ r3<- 32 - r2
orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
@@ -953,7 +953,7 @@
movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
mov r1, r1, lsr r2 @ r1<- r1 >>> r2
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r9 @ vAA/vAA+1<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
%def op_xor_int():
diff --git a/runtime/interpreter/mterp/arm/array.S b/runtime/interpreter/mterp/arm/array.S
index 88d89c53a2..7b3db6165b 100644
--- a/runtime/interpreter/mterp/arm/array.S
+++ b/runtime/interpreter/mterp/arm/array.S
@@ -87,7 +87,7 @@
ldrd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3
+ SET_VREG_WIDE_BY_ADDR r2, r3, r9 @ vAA/vAA+1<- r2/r3
GOTO_OPCODE ip @ jump to next instruction
%def op_aput(store="str", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
@@ -169,7 +169,7 @@
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
bcs common_errArrayIndex @ index >= length, bail
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
- ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1
+ GET_VREG_WIDE_BY_ADDR r2, r3, r9 @ r2/r3<- vAA/vAA+1
GET_INST_OPCODE ip @ extract opcode from rINST
strd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
GOTO_OPCODE ip @ jump to next instruction
diff --git a/runtime/interpreter/mterp/arm/control_flow.S b/runtime/interpreter/mterp/arm/control_flow.S
index 51832e10b5..2299ef9332 100644
--- a/runtime/interpreter/mterp/arm/control_flow.S
+++ b/runtime/interpreter/mterp/arm/control_flow.S
@@ -189,7 +189,7 @@
blne MterpSuspendCheck @ (self)
mov r2, rINST, lsr #8 @ r2<- AA
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[AA]
- ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1
+ GET_VREG_WIDE_BY_ADDR r0, r1, r2 @ r0/r1 <- vAA/vAA+1
b MterpReturn
%def op_sparse_switch():
diff --git a/runtime/interpreter/mterp/arm/floating_point.S b/runtime/interpreter/mterp/arm/floating_point.S
index 21c386eb6a..9e4d00cb9a 100644
--- a/runtime/interpreter/mterp/arm/floating_point.S
+++ b/runtime/interpreter/mterp/arm/floating_point.S
@@ -13,8 +13,8 @@
and r2, r0, #255 @ r2<- BB
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- flds s1, [r3] @ s1<- vCC
- flds s0, [r2] @ s0<- vBB
+ GET_VREG_FLOAT_BY_ADDR s1, r3 @ s1<- vCC
+ GET_VREG_FLOAT_BY_ADDR s0, r2 @ s0<- vBB
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
$instr @ s2<- op
@@ -35,12 +35,12 @@
ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- flds s1, [r3] @ s1<- vB
+ GET_VREG_FLOAT_BY_ADDR s1, r3 @ s1<- vB
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- flds s0, [r9] @ s0<- vA
+ GET_VREG_FLOAT_BY_ADDR s0, r9 @ s0<- vA
$instr @ s2<- op
GET_INST_OPCODE ip @ extract opcode from rINST
- fsts s2, [r9] @ vAA<- s2 No need to clear as it's 2addr
+ SET_VREG_FLOAT_BY_ADDR s2, r9 @ vAA<- s2 No need to clear as it's 2addr
GOTO_OPCODE ip @ jump to next instruction
%def fbinopWide(instr=""):
@@ -58,14 +58,14 @@
and r2, r0, #255 @ r2<- BB
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
- fldd d1, [r3] @ d1<- vCC
- fldd d0, [r2] @ d0<- vBB
+ GET_VREG_DOUBLE_BY_ADDR d1, r3 @ d1<- vCC
+ GET_VREG_DOUBLE_BY_ADDR d0, r2 @ d0<- vBB
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
$instr @ s2<- op
CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
GET_INST_OPCODE ip @ extract opcode from rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
- fstd d2, [r9] @ vAA<- d2
+ SET_VREG_DOUBLE_BY_ADDR d2, r9 @ vAA<- d2
GOTO_OPCODE ip @ jump to next instruction
%def fbinopWide2addr(instr=""):
@@ -82,13 +82,13 @@
ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
- fldd d1, [r3] @ d1<- vB
+ GET_VREG_DOUBLE_BY_ADDR d1, r3 @ d1<- vB
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- fldd d0, [r9] @ d0<- vA
+ GET_VREG_DOUBLE_BY_ADDR d0, r9 @ d0<- vA
$instr @ d2<- op
GET_INST_OPCODE ip @ extract opcode from rINST
- fstd d2, [r9] @ vAA<- d2
+ SET_VREG_DOUBLE_BY_ADDR d2, r9 @ vAA<- d2
GOTO_OPCODE ip @ jump to next instruction
%def funop(instr=""):
@@ -101,7 +101,7 @@
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- flds s0, [r3] @ s0<- vB
+ GET_VREG_FLOAT_BY_ADDR s0, r3 @ s0<- vB
ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$instr @ s1<- op
@@ -119,7 +119,7 @@
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- fldd d0, [r3] @ d0<- vB
+ GET_VREG_DOUBLE_BY_ADDR d0, r3 @ d0<- vB
ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$instr @ s0<- op
@@ -137,14 +137,14 @@
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
- flds s0, [r3] @ s0<- vB
+ GET_VREG_FLOAT_BY_ADDR s0, r3 @ s0<- vB
ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$instr @ d0<- op
CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
GET_INST_OPCODE ip @ extract opcode from rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
- fstd d0, [r9] @ vA<- d0
+ SET_VREG_DOUBLE_BY_ADDR d0, r9 @ vA<- d0
GOTO_OPCODE ip @ jump to next instruction
%def op_add_double():
@@ -183,8 +183,8 @@
mov r3, r0, lsr #8 @ r3<- CC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- fldd d0, [r2] @ d0<- vBB
- fldd d1, [r3] @ d1<- vCC
+ GET_VREG_DOUBLE_BY_ADDR d0, r2 @ d0<- vBB
+ GET_VREG_DOUBLE_BY_ADDR d1, r3 @ d1<- vCC
vcmpe.f64 d0, d1 @ compare (vBB, vCC)
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
mov r0, #1 @ r0<- 1 (default)
@@ -219,8 +219,8 @@
mov r3, r0, lsr #8 @ r3<- CC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- flds s0, [r2] @ s0<- vBB
- flds s1, [r3] @ s1<- vCC
+ GET_VREG_FLOAT_BY_ADDR s0, r2 @ s0<- vBB
+ GET_VREG_FLOAT_BY_ADDR s1, r3 @ s1<- vCC
vcmpe.f32 s0, s1 @ compare (vBB, vCC)
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
mov r0, #1 @ r0<- 1 (default)
@@ -255,8 +255,8 @@
mov r3, r0, lsr #8 @ r3<- CC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- fldd d0, [r2] @ d0<- vBB
- fldd d1, [r3] @ d1<- vCC
+ GET_VREG_DOUBLE_BY_ADDR d0, r2 @ d0<- vBB
+ GET_VREG_DOUBLE_BY_ADDR d1, r3 @ d1<- vCC
vcmpe.f64 d0, d1 @ compare (vBB, vCC)
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
mvn r0, #0 @ r0<- -1 (default)
@@ -291,8 +291,8 @@
mov r3, r0, lsr #8 @ r3<- CC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
- flds s0, [r2] @ s0<- vBB
- flds s1, [r3] @ s1<- vCC
+ GET_VREG_FLOAT_BY_ADDR s0, r2 @ s0<- vBB
+ GET_VREG_FLOAT_BY_ADDR s1, r3 @ s1<- vCC
vcmpe.f32 s0, s1 @ compare (vBB, vCC)
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
mvn r0, #0 @ r0<- -1 (default)
diff --git a/runtime/interpreter/mterp/arm/main.S b/runtime/interpreter/mterp/arm/main.S
index 6d6b1901ef..4cf65d1930 100644
--- a/runtime/interpreter/mterp/arm/main.S
+++ b/runtime/interpreter/mterp/arm/main.S
@@ -298,6 +298,25 @@ unspecified registers or condition codes.
add \reg, rFP, \vreg, lsl #2 /* WARNING/FIXME: handle shadow frame vreg zero if store */
.endm
+.macro GET_VREG_WIDE_BY_ADDR reg0, reg1, addr
+ ldmia \addr, {\reg0, \reg1}
+.endm
+.macro SET_VREG_WIDE_BY_ADDR reg0, reg1, addr
+ stmia \addr, {\reg0, \reg1}
+.endm
+.macro GET_VREG_FLOAT_BY_ADDR reg, addr
+ flds \reg, [\addr]
+.endm
+.macro SET_VREG_FLOAT_BY_ADDR reg, addr
+ fsts \reg, [\addr]
+.endm
+.macro GET_VREG_DOUBLE_BY_ADDR reg, addr
+ fldd \reg, [\addr]
+.endm
+.macro SET_VREG_DOUBLE_BY_ADDR reg, addr
+ fstd \reg, [\addr]
+.endm
+
/*
* Refresh handler table.
*/
diff --git a/runtime/interpreter/mterp/arm/object.S b/runtime/interpreter/mterp/arm/object.S
index 092aa9ef4e..7736383e95 100644
--- a/runtime/interpreter/mterp/arm/object.S
+++ b/runtime/interpreter/mterp/arm/object.S
@@ -160,7 +160,7 @@
VREG_INDEX_TO_ADDR r3, r2 @ r3<- &fp[A]
CLEAR_SHADOW_PAIR r2, ip, lr @ Zero out the shadow regs
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r3, {r0-r1} @ fp[A]<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r3 @ fp[A]<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
%def op_instance_of():
@@ -257,7 +257,7 @@
cmp r2, #0 @ check object for null
beq common_errNullObject @ object was null
VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[A]
- ldmia r0, {r0-r1} @ r0/r1<- fp[A]/fp[A+1]
+ GET_VREG_WIDE_BY_ADDR r0, r1, r0 @ r0/r1<- fp[A]/fp[A+1]
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
strd r0, [r2, r3] @ obj.field<- r0/r1
GET_INST_OPCODE ip @ extract opcode from rINST
diff --git a/runtime/interpreter/mterp/arm/other.S b/runtime/interpreter/mterp/arm/other.S
index fcdde1e72a..31b9354530 100644
--- a/runtime/interpreter/mterp/arm/other.S
+++ b/runtime/interpreter/mterp/arm/other.S
@@ -104,7 +104,7 @@
FETCH_ADVANCE_INST 5 @ advance rPC, load rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r9, {r0-r1} @ vAA<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r9 @ vAA<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
%def op_const_wide_16():
@@ -116,7 +116,7 @@
CLEAR_SHADOW_PAIR r3, r2, lr @ Zero out the shadow regs
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r3, {r0-r1} @ vAA<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r3 @ vAA<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
%def op_const_wide_32():
@@ -130,7 +130,7 @@
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
mov r1, r0, asr #31 @ r1<- ssssssss
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r3, {r0-r1} @ vAA<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r3 @ vAA<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
%def op_const_wide_high16():
@@ -143,7 +143,7 @@
CLEAR_SHADOW_PAIR r3, r0, r2 @ Zero shadow regs
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r3, {r0-r1} @ vAA<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r3 @ vAA<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
%def op_monitor_enter():
@@ -279,7 +279,7 @@
ldmia r3, {r0-r1} @ r0/r1<- retval.j
CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
- stmia r2, {r0-r1} @ fp[AA]<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r2 @ fp[AA]<- r0/r1
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
@@ -290,11 +290,11 @@
ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[B]
VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[A]
- ldmia r3, {r0-r1} @ r0/r1<- fp[B]
+ GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- fp[B]
CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r2, {r0-r1} @ fp[A]<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r2 @ fp[A]<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
%def op_move_wide_16():
@@ -304,10 +304,10 @@
FETCH r2, 1 @ r2<- AAAA
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BBBB]
VREG_INDEX_TO_ADDR lr, r2 @ r2<- &fp[AAAA]
- ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
+ GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- fp[BBBB]
FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
CLEAR_SHADOW_PAIR r2, r3, ip @ Zero out the shadow regs
- stmia lr, {r0-r1} @ fp[AAAA]<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, lr @ fp[AAAA]<- r0/r1
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
@@ -318,11 +318,11 @@
mov rINST, rINST, lsr #8 @ rINST<- AA
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[BBBB]
VREG_INDEX_TO_ADDR r2, rINST @ r2<- &fp[AA]
- ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
+ GET_VREG_WIDE_BY_ADDR r0, r1, r3 @ r0/r1<- fp[BBBB]
CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero out the shadow regs
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
GET_INST_OPCODE ip @ extract opcode from rINST
- stmia r2, {r0-r1} @ fp[AA]<- r0/r1
+ SET_VREG_WIDE_BY_ADDR r0, r1, r2 @ fp[AA]<- r0/r1
GOTO_OPCODE ip @ jump to next instruction
%def op_nop():
diff --git a/runtime/interpreter/mterp/arm64/floating_point.S b/runtime/interpreter/mterp/arm64/floating_point.S
index 04ca6949ff..ad42db3f4b 100644
--- a/runtime/interpreter/mterp/arm64/floating_point.S
+++ b/runtime/interpreter/mterp/arm64/floating_point.S
@@ -1,5 +1,5 @@
%def fbinop(instr=""):
- /*:
+ /*
* Generic 32-bit floating-point operation.
*
* For: add-float, sub-float, mul-float, div-float
@@ -15,7 +15,24 @@
lsr w1, wINST, #8 // r1<- AA
FETCH_ADVANCE_INST 2 // advance rPC, load rINST
GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s0, w1
+ SET_VREG_FLOAT s0, w1
+ GOTO_OPCODE ip // jump to next instruction
+
+%def fbinopWide(instr="fadd d0, d1, d2", result="d0", r1="d1", r2="d2"):
+ /*
+ * Generic 64-bit floating-point operation.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH w0, 1 // w0<- CCBB
+ lsr w4, wINST, #8 // w4<- AA
+ lsr w2, w0, #8 // w2<- CC
+ and w1, w0, #255 // w1<- BB
+ GET_VREG_DOUBLE $r2, w2 // w2<- vCC
+ GET_VREG_DOUBLE $r1, w1 // w1<- vBB
+ FETCH_ADVANCE_INST 2 // advance rPC, load rINST
+ $instr // $result<- op, w0-w4 changed
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_DOUBLE $result, w4 // vAA<- $result
GOTO_OPCODE ip // jump to next instruction
%def fbinop2addr(instr=""):
@@ -34,7 +51,22 @@
$instr // s2<- op
FETCH_ADVANCE_INST 1 // advance rPC, load rINST
GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s2, w9
+ SET_VREG_FLOAT s2, w9
+ GOTO_OPCODE ip // jump to next instruction
+
+%def fbinopWide2addr(instr="fadd d0, d0, d1", r0="d0", r1="d1"):
+ /*
+ * Generic 64-bit floating point "/2addr" binary operation.
+ */
+ /* binop/2addr vA, vB */
+ lsr w1, wINST, #12 // w1<- B
+ ubfx w2, wINST, #8, #4 // w2<- A
+ GET_VREG_DOUBLE $r1, w1 // x1<- vB
+ GET_VREG_DOUBLE $r0, w2 // x0<- vA
+ FETCH_ADVANCE_INST 1 // advance rPC, load rINST
+ $instr // result<- op
+ GET_INST_OPCODE ip // extract opcode from rINST
+ SET_VREG_DOUBLE $r0, w2 // vAA<- result
GOTO_OPCODE ip // jump to next instruction
%def fcmp(wide="", r1="s1", r2="s2", cond="lt"):
@@ -47,8 +79,13 @@
lsr w4, wINST, #8 // w4<- AA
and w2, w0, #255 // w2<- BB
lsr w3, w0, #8 // w3<- CC
- GET_VREG$wide $r1, w2
- GET_VREG$wide $r2, w3
+% if r1.startswith("d"):
+ GET_VREG_DOUBLE $r1, w2
+ GET_VREG_DOUBLE $r2, w3
+% else:
+ GET_VREG $r1, w2
+ GET_VREG $r2, w3
+% #endif
fcmp $r1, $r2
cset w0, ne
cneg w0, w0, $cond
@@ -72,7 +109,7 @@
FETCH_ADVANCE_INST 1 // advance rPC, load wINST
$instr // d0<- op
GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG $tgtreg, w4 // vA<- d0
+ SET_VREG_FLOAT $tgtreg, w4 // vA<- d0
GOTO_OPCODE ip // jump to next instruction
%def funopNarrower(srcreg="s0", tgtreg="d0", instr=""):
@@ -85,11 +122,15 @@
/* unop vA, vB */
lsr w3, wINST, #12 // w3<- B
ubfx w4, wINST, #8, #4 // w4<- A
+% if srcreg.startswith("d"):
+ GET_VREG_DOUBLE $srcreg, w3
+% else:
GET_VREG_WIDE $srcreg, w3
+% #endif
FETCH_ADVANCE_INST 1 // advance rPC, load wINST
$instr // d0<- op
GET_INST_OPCODE ip // extract opcode from wINST
- SET_VREG $tgtreg, w4 // vA<- d0
+ SET_VREG_FLOAT $tgtreg, w4 // vA<- d0
GOTO_OPCODE ip // jump to next instruction
%def funopWide(srcreg="s0", tgtreg="d0", instr=""):
@@ -102,11 +143,19 @@
/* unop vA, vB */
lsr w3, wINST, #12 // w3<- B
ubfx w4, wINST, #8, #4 // w4<- A
+% if srcreg.startswith("d"):
+ GET_VREG_DOUBLE $srcreg, w3
+% else:
GET_VREG_WIDE $srcreg, w3
+% #endif
FETCH_ADVANCE_INST 1 // advance rPC, load wINST
$instr // d0<- op
GET_INST_OPCODE ip // extract opcode from wINST
+% if tgtreg.startswith("d"):
+ SET_VREG_DOUBLE $tgtreg, w4 // vA<- d0
+% else:
SET_VREG_WIDE $tgtreg, w4 // vA<- d0
+% #endif
GOTO_OPCODE ip // jump to next instruction
%def funopWider(srcreg="s0", tgtreg="d0", instr=""):
@@ -127,10 +176,10 @@
GOTO_OPCODE ip // jump to next instruction
%def op_add_double():
-% binopWide(instr="fadd d0, d1, d2", result="d0", r1="d1", r2="d2")
+% fbinopWide(instr="fadd d0, d1, d2", result="d0", r1="d1", r2="d2")
%def op_add_double_2addr():
-% binopWide2addr(instr="fadd d0, d0, d1", r0="d0", r1="d1")
+% fbinopWide2addr(instr="fadd d0, d0, d1", r0="d0", r1="d1")
%def op_add_float():
% fbinop(instr="fadd s0, s0, s1")
@@ -151,10 +200,10 @@
% fcmp(wide="", r1="s1", r2="s2", cond="lt")
%def op_div_double():
-% binopWide(instr="fdiv d0, d1, d2", result="d0", r1="d1", r2="d2")
+% fbinopWide(instr="fdiv d0, d1, d2", result="d0", r1="d1", r2="d2")
%def op_div_double_2addr():
-% binopWide2addr(instr="fdiv d0, d0, d1", r0="d0", r1="d1")
+% fbinopWide2addr(instr="fdiv d0, d0, d1", r0="d0", r1="d1")
%def op_div_float():
% fbinop(instr="fdiv s0, s0, s1")
@@ -193,10 +242,10 @@
% funopNarrower(instr="scvtf s0, x0", srcreg="x0", tgtreg="s0")
%def op_mul_double():
-% binopWide(instr="fmul d0, d1, d2", result="d0", r1="d1", r2="d2")
+% fbinopWide(instr="fmul d0, d1, d2", result="d0", r1="d1", r2="d2")
%def op_mul_double_2addr():
-% binopWide2addr(instr="fmul d0, d0, d1", r0="d0", r1="d1")
+% fbinopWide2addr(instr="fmul d0, d0, d1", r0="d0", r1="d1")
%def op_mul_float():
% fbinop(instr="fmul s0, s0, s1")
@@ -215,8 +264,8 @@
FETCH w0, 1 // w0<- CCBB
lsr w2, w0, #8 // w2<- CC
and w1, w0, #255 // w1<- BB
- GET_VREG_WIDE d1, w2 // d1<- vCC
- GET_VREG_WIDE d0, w1 // d0<- vBB
+ GET_VREG_DOUBLE d1, w2 // d1<- vCC
+ GET_VREG_DOUBLE d0, w1 // d0<- vBB
bl fmod
lsr w4, wINST, #8 // w4<- AA
FETCH_ADVANCE_INST 2 // advance rPC, load rINST
@@ -229,8 +278,8 @@
/* rem vA, vB */
lsr w1, wINST, #12 // w1<- B
ubfx w2, wINST, #8, #4 // w2<- A
- GET_VREG_WIDE d1, w1 // d1<- vB
- GET_VREG_WIDE d0, w2 // d0<- vA
+ GET_VREG_DOUBLE d1, w1 // d1<- vB
+ GET_VREG_DOUBLE d0, w2 // d0<- vA
bl fmod
ubfx w2, wINST, #8, #4 // w2<- A (need to reload - killed across call)
FETCH_ADVANCE_INST 1 // advance rPC, load rINST
@@ -253,14 +302,14 @@
ubfx w9, wINST, #8, #4 // w9<- A
FETCH_ADVANCE_INST 1 // advance rPC, load rINST
GET_INST_OPCODE ip // extract opcode from rINST
- SET_VREG s0, w9
+ SET_VREG_FLOAT s0, w9
GOTO_OPCODE ip // jump to next instruction
%def op_sub_double():
-% binopWide(instr="fsub d0, d1, d2", result="d0", r1="d1", r2="d2")
+% fbinopWide(instr="fsub d0, d1, d2", result="d0", r1="d1", r2="d2")
%def op_sub_double_2addr():
-% binopWide2addr(instr="fsub d0, d0, d1", r0="d0", r1="d1")
+% fbinopWide2addr(instr="fsub d0, d0, d1", r0="d0", r1="d1")
%def op_sub_float():
% fbinop(instr="fsub s0, s0, s1")
diff --git a/runtime/interpreter/mterp/arm64/main.S b/runtime/interpreter/mterp/arm64/main.S
index 858cb38697..0cfbbff3aa 100644
--- a/runtime/interpreter/mterp/arm64/main.S
+++ b/runtime/interpreter/mterp/arm64/main.S
@@ -259,10 +259,13 @@ codes.
str \reg, [xFP, \vreg, uxtw #2]
str \reg, [xREFS, \vreg, uxtw #2]
.endm
+.macro SET_VREG_FLOAT reg, vreg
+ str \reg, [xFP, \vreg, uxtw #2]
+ str wzr, [xREFS, \vreg, uxtw #2]
+.endm
/*
* Get/set the 64-bit value from a Dalvik register.
- * TUNING: can we do better here?
*/
.macro GET_VREG_WIDE reg, vreg
add ip2, xFP, \vreg, lsl #2
@@ -274,6 +277,16 @@ codes.
add ip2, xREFS, \vreg, lsl #2
str xzr, [ip2]
.endm
+.macro GET_VREG_DOUBLE reg, vreg
+ add ip2, xFP, \vreg, lsl #2
+ ldr \reg, [ip2]
+.endm
+.macro SET_VREG_DOUBLE reg, vreg
+ add ip2, xFP, \vreg, lsl #2
+ str \reg, [ip2]
+ add ip2, xREFS, \vreg, lsl #2
+ str xzr, [ip2]
+.endm
/*
* Get the 32-bit value from a Dalvik register and sign-extend to 64-bit.
diff --git a/runtime/interpreter/mterp/x86/arithmetic.S b/runtime/interpreter/mterp/x86/arithmetic.S
index 3b5f0beb89..973e5b8a0f 100644
--- a/runtime/interpreter/mterp/x86/arithmetic.S
+++ b/runtime/interpreter/mterp/x86/arithmetic.S
@@ -153,7 +153,7 @@
movzbl 2(rPC), %eax # eax <- BB
movzbl 3(rPC), %ecx # ecx <- CC
GET_VREG %eax, %eax # eax <- vBB
- $instr # ex: addl VREG_ADDRESS(%ecx),%eax
+ $instr VREG_ADDRESS(%ecx), %eax
SET_VREG $result, rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -187,7 +187,7 @@
sarl $$4, rINST # rINST <- B
GET_VREG %eax, rINST # eax <- vB
andb $$0xf, %cl # ecx <- A
- $instr # for ex: addl %eax,VREG_ADDRESS(%ecx)
+ $instr %eax, VREG_ADDRESS(%ecx)
CLEAR_REF %ecx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -240,8 +240,8 @@
movl rIBASE, LOCAL0(%esp) # save rIBASE
GET_VREG rIBASE, %eax # rIBASE <- v[BB+0]
GET_VREG_HIGH %eax, %eax # eax <- v[BB+1]
- $instr1 # ex: addl VREG_ADDRESS(%ecx),rIBASE
- $instr2 # ex: adcl VREG_HIGH_ADDRESS(%ecx),%eax
+ $instr1 VREG_ADDRESS(%ecx), rIBASE
+ $instr2 VREG_HIGH_ADDRESS(%ecx), %eax
SET_VREG rIBASE, rINST # v[AA+0] <- rIBASE
movl LOCAL0(%esp), rIBASE # restore rIBASE
SET_VREG_HIGH %eax, rINST # v[AA+1] <- eax
@@ -257,8 +257,8 @@
GET_VREG %eax, %ecx # eax<- v[B+0]
GET_VREG_HIGH %ecx, %ecx # eax<- v[B+1]
andb $$0xF, rINSTbl # rINST<- A
- $instr1 # ex: addl %eax,(rFP,rINST,4)
- $instr2 # ex: adcl %ecx,4(rFP,rINST,4)
+ $instr1 %eax, VREG_ADDRESS(rINST)
+ $instr2 %ecx, VREG_HIGH_ADDRESS(rINST)
CLEAR_WIDE_REF rINST
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -355,10 +355,10 @@
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
%def op_add_int():
-% binop(instr="addl VREG_ADDRESS(%ecx), %eax")
+% binop(instr="addl")
%def op_add_int_2addr():
-% binop2addr(instr="addl %eax, VREG_ADDRESS(%ecx)")
+% binop2addr(instr="addl")
%def op_add_int_lit16():
% binopLit16(instr="addl %ecx, %eax")
@@ -367,16 +367,16 @@
% binopLit8(instr="addl %ecx, %eax")
%def op_add_long():
-% binopWide(instr1="addl VREG_ADDRESS(%ecx), rIBASE", instr2="adcl VREG_HIGH_ADDRESS(%ecx), %eax")
+% binopWide(instr1="addl", instr2="adcl")
%def op_add_long_2addr():
-% binopWide2addr(instr1="addl %eax, (rFP,rINST,4)", instr2="adcl %ecx, 4(rFP,rINST,4)")
+% binopWide2addr(instr1="addl", instr2="adcl")
%def op_and_int():
-% binop(instr="andl VREG_ADDRESS(%ecx), %eax")
+% binop(instr="andl")
%def op_and_int_2addr():
-% binop2addr(instr="andl %eax, VREG_ADDRESS(%ecx)")
+% binop2addr(instr="andl")
%def op_and_int_lit16():
% binopLit16(instr="andl %ecx, %eax")
@@ -385,10 +385,10 @@
% binopLit8(instr="andl %ecx, %eax")
%def op_and_long():
-% binopWide(instr1="andl VREG_ADDRESS(%ecx), rIBASE", instr2="andl VREG_HIGH_ADDRESS(%ecx), %eax")
+% binopWide(instr1="andl", instr2="andl")
%def op_and_long_2addr():
-% binopWide2addr(instr1="andl %eax, (rFP,rINST,4)", instr2="andl %ecx, 4(rFP,rINST,4)")
+% binopWide2addr(instr1="andl", instr2="andl")
%def op_cmp_long():
/*
@@ -666,10 +666,10 @@
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
%def op_or_int():
-% binop(instr="orl VREG_ADDRESS(%ecx), %eax")
+% binop(instr="orl")
%def op_or_int_2addr():
-% binop2addr(instr="orl %eax, VREG_ADDRESS(%ecx)")
+% binop2addr(instr="orl")
%def op_or_int_lit16():
% binopLit16(instr="orl %ecx, %eax")
@@ -678,10 +678,10 @@
% binopLit8(instr="orl %ecx, %eax")
%def op_or_long():
-% binopWide(instr1="orl VREG_ADDRESS(%ecx), rIBASE", instr2="orl VREG_HIGH_ADDRESS(%ecx), %eax")
+% binopWide(instr1="orl", instr2="orl")
%def op_or_long_2addr():
-% binopWide2addr(instr1="orl %eax, (rFP,rINST,4)", instr2="orl %ecx, 4(rFP,rINST,4)")
+% binopWide2addr(instr1="orl", instr2="orl")
%def op_rem_int():
% bindiv(result="rIBASE", special="$0", rem="1")
@@ -845,16 +845,16 @@
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
%def op_sub_int():
-% binop(instr="subl VREG_ADDRESS(%ecx), %eax")
+% binop(instr="subl")
%def op_sub_int_2addr():
-% binop2addr(instr="subl %eax, VREG_ADDRESS(%ecx)")
+% binop2addr(instr="subl")
%def op_sub_long():
-% binopWide(instr1="subl VREG_ADDRESS(%ecx), rIBASE", instr2="sbbl VREG_HIGH_ADDRESS(%ecx), %eax")
+% binopWide(instr1="subl", instr2="sbbl")
%def op_sub_long_2addr():
-% binopWide2addr(instr1="subl %eax, (rFP,rINST,4)", instr2="sbbl %ecx, 4(rFP,rINST,4)")
+% binopWide2addr(instr1="subl", instr2="sbbl")
%def op_ushr_int():
% binop1(instr="shrl %cl, %eax")
@@ -925,10 +925,10 @@
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
%def op_xor_int():
-% binop(instr="xorl VREG_ADDRESS(%ecx), %eax")
+% binop(instr="xorl")
%def op_xor_int_2addr():
-% binop2addr(instr="xorl %eax, VREG_ADDRESS(%ecx)")
+% binop2addr(instr="xorl")
%def op_xor_int_lit16():
% binopLit16(instr="xorl %ecx, %eax")
@@ -937,7 +937,7 @@
% binopLit8(instr="xorl %ecx, %eax")
%def op_xor_long():
-% binopWide(instr1="xorl VREG_ADDRESS(%ecx), rIBASE", instr2="xorl VREG_HIGH_ADDRESS(%ecx), %eax")
+% binopWide(instr1="xorl", instr2="xorl")
%def op_xor_long_2addr():
-% binopWide2addr(instr1="xorl %eax, (rFP,rINST,4)", instr2="xorl %ecx, 4(rFP,rINST,4)")
+% binopWide2addr(instr1="xorl", instr2="xorl")
diff --git a/runtime/interpreter/mterp/x86/floating_point.S b/runtime/interpreter/mterp/x86/floating_point.S
index 3de1fc8f19..bc7c59dc69 100644
--- a/runtime/interpreter/mterp/x86/floating_point.S
+++ b/runtime/interpreter/mterp/x86/floating_point.S
@@ -18,7 +18,7 @@
/* op vAA, vBB, vCC */
movzbl 3(rPC), %ecx # ecx<- CC
movzbl 2(rPC), %eax # eax<- BB
- movs${suff} VREG_ADDRESS(%eax), %xmm0
+ GET_VREG_XMM${suff} %xmm0, %eax
xor %eax, %eax
ucomis${suff} VREG_ADDRESS(%ecx), %xmm0
jp .L${opcode}_nan_is_${nanval}
@@ -55,9 +55,9 @@
%def sseBinop(instr="", suff=""):
movzbl 2(rPC), %ecx # ecx <- BB
movzbl 3(rPC), %eax # eax <- CC
- movs${suff} VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
+ GET_VREG_XMM${suff} %xmm0, %ecx # %xmm0 <- 1st src
${instr}${suff} VREG_ADDRESS(%eax), %xmm0
- movs${suff} %xmm0, VREG_ADDRESS(rINST) # vAA <- %xmm0
+ SET_VREG_XMM${suff} %xmm0, rINST # vAA <- %xmm0
pxor %xmm0, %xmm0
movs${suff} %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -65,10 +65,10 @@
%def sseBinop2Addr(instr="", suff=""):
movzx rINSTbl, %ecx # ecx <- A+
andl $$0xf, %ecx # ecx <- A
- movs${suff} VREG_ADDRESS(%ecx), %xmm0 # %xmm0 <- 1st src
+ GET_VREG_XMM${suff} %xmm0, %ecx # %xmm0 <- 1st src
sarl $$4, rINST # rINST<- B
${instr}${suff} VREG_ADDRESS(rINST), %xmm0
- movs${suff} %xmm0, VREG_ADDRESS(%ecx) # vAA<- %xmm0
+ SET_VREG_XMM${suff} %xmm0, %ecx # vAA<- %xmm0
pxor %xmm0, %xmm0
movs${suff} %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86/main.S b/runtime/interpreter/mterp/x86/main.S
index 0621fb468a..6eaea6f260 100644
--- a/runtime/interpreter/mterp/x86/main.S
+++ b/runtime/interpreter/mterp/x86/main.S
@@ -318,6 +318,19 @@ unspecified registers or condition codes.
movl MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg)
.endm
+.macro GET_VREG_XMMs _xmmreg _vreg
+ movss VREG_ADDRESS(\_vreg), \_xmmreg
+.endm
+.macro GET_VREG_XMMd _xmmreg _vreg
+ movsd VREG_ADDRESS(\_vreg), \_xmmreg
+.endm
+.macro SET_VREG_XMMs _xmmreg _vreg
+ movss \_xmmreg, VREG_ADDRESS(\_vreg)
+.endm
+.macro SET_VREG_XMMd _xmmreg _vreg
+ movsd \_xmmreg, VREG_ADDRESS(\_vreg)
+.endm
+
/*
* function support macros.
*/
diff --git a/runtime/interpreter/mterp/x86_64/arithmetic.S b/runtime/interpreter/mterp/x86_64/arithmetic.S
index 263f82b9f6..ff64b530b5 100644
--- a/runtime/interpreter/mterp/x86_64/arithmetic.S
+++ b/runtime/interpreter/mterp/x86_64/arithmetic.S
@@ -137,7 +137,7 @@
movzbq 2(rPC), %rax # rax <- BB
movzbq 3(rPC), %rcx # rcx <- CC
GET_VREG %eax, %rax # eax <- vBB
- $instr # ex: addl VREG_ADDRESS(%rcx),%eax
+ $instr VREG_ADDRESS(%rcx),%eax
SET_VREG $result, rINSTq
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -177,7 +177,7 @@
sarl $$4, rINST # rINST <- B
andb $$0xf, %cl # ecx <- A
GET_VREG %eax, rINSTq # eax <- vB
- $instr # for ex: addl %eax,(rFP,%ecx,4)
+ $instr %eax, VREG_ADDRESS(%rcx)
CLEAR_REF %rcx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -228,7 +228,7 @@
movzbq 2(rPC), %rax # eax <- BB
movzbq 3(rPC), %rcx # ecx <- CC
GET_WIDE_VREG %rax, %rax # rax <- v[BB]
- $instr # ex: addq VREG_ADDRESS(%rcx),%rax
+ $instr VREG_ADDRESS(%rcx),%rax
SET_WIDE_VREG %rax, rINSTq # v[AA] <- rax
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -241,7 +241,7 @@
sarl $$4, rINST # rINST <- B
andb $$0xf, %cl # ecx <- A
GET_WIDE_VREG %rax, rINSTq # rax <- vB
- $instr # for ex: addq %rax,VREG_ADDRESS(%rcx)
+ $instr %rax,VREG_ADDRESS(%rcx)
CLEAR_WIDE_REF %rcx
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -255,7 +255,7 @@
movl rINST, %ecx # rcx <- A+
sarl $$4, rINST # rINST <- B
andb $$0xf, %cl # ecx <- A
- movs${fp_suffix} VREG_ADDRESS(rINSTq), %xmm0
+ GET_VREG_XMM${fp_suffix} %xmm0, rINSTq
mov${i_suffix} ${max_const}, ${result_reg}
cvtsi2s${fp_suffix}${i_suffix} ${result_reg}, %xmm1
comis${fp_suffix} %xmm1, %xmm0
@@ -317,10 +317,10 @@ $instr
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
%def op_add_int():
-% binop(instr="addl VREG_ADDRESS(%rcx), %eax")
+% binop(instr="addl")
%def op_add_int_2addr():
-% binop2addr(instr="addl %eax, VREG_ADDRESS(%rcx)")
+% binop2addr(instr="addl")
%def op_add_int_lit16():
% binopLit16(instr="addl %ecx, %eax")
@@ -329,16 +329,16 @@ $instr
% binopLit8(instr="addl %ecx, %eax")
%def op_add_long():
-% binopWide(instr="addq VREG_ADDRESS(%rcx), %rax")
+% binopWide(instr="addq")
%def op_add_long_2addr():
-% binopWide2addr(instr="addq %rax, VREG_ADDRESS(%rcx)")
+% binopWide2addr(instr="addq")
%def op_and_int():
-% binop(instr="andl VREG_ADDRESS(%rcx), %eax")
+% binop(instr="andl")
%def op_and_int_2addr():
-% binop2addr(instr="andl %eax, VREG_ADDRESS(%rcx)")
+% binop2addr(instr="andl")
%def op_and_int_lit16():
% binopLit16(instr="andl %ecx, %eax")
@@ -347,10 +347,10 @@ $instr
% binopLit8(instr="andl %ecx, %eax")
%def op_and_long():
-% binopWide(instr="andq VREG_ADDRESS(%rcx), %rax")
+% binopWide(instr="andq")
%def op_and_long_2addr():
-% binopWide2addr(instr="andq %rax, VREG_ADDRESS(%rcx)")
+% binopWide2addr(instr="andq")
%def op_cmp_long():
/*
@@ -413,7 +413,7 @@ $instr
% op_move()
%def op_mul_int():
-% binop(instr="imull VREG_ADDRESS(%rcx), %eax")
+% binop(instr="imull")
%def op_mul_int_2addr():
/* mul vA, vB */
@@ -432,7 +432,7 @@ $instr
% binopLit8(instr="imull %ecx, %eax")
%def op_mul_long():
-% binopWide(instr="imulq VREG_ADDRESS(%rcx), %rax")
+% binopWide(instr="imulq")
%def op_mul_long_2addr():
/* mul vA, vB */
@@ -457,10 +457,10 @@ $instr
% unop(instr=" notq %rax", wide="1")
%def op_or_int():
-% binop(instr="orl VREG_ADDRESS(%rcx), %eax")
+% binop(instr="orl")
%def op_or_int_2addr():
-% binop2addr(instr="orl %eax, VREG_ADDRESS(%rcx)")
+% binop2addr(instr="orl")
%def op_or_int_lit16():
% binopLit16(instr="orl %ecx, %eax")
@@ -469,10 +469,10 @@ $instr
% binopLit8(instr="orl %ecx, %eax")
%def op_or_long():
-% binopWide(instr="orq VREG_ADDRESS(%rcx), %rax")
+% binopWide(instr="orq")
%def op_or_long_2addr():
-% binopWide2addr(instr="orq %rax, VREG_ADDRESS(%rcx)")
+% binopWide2addr(instr="orq")
%def op_rem_int():
% bindiv(result="%edx", second="%ecx", wide="0", suffix="l", rem="1")
@@ -530,16 +530,16 @@ $instr
% shop2addr(instr="sarq %cl, %rax", wide="1")
%def op_sub_int():
-% binop(instr="subl VREG_ADDRESS(%rcx), %eax")
+% binop(instr="subl")
%def op_sub_int_2addr():
-% binop2addr(instr="subl %eax, VREG_ADDRESS(%rcx)")
+% binop2addr(instr="subl")
%def op_sub_long():
-% binopWide(instr="subq VREG_ADDRESS(%rcx), %rax")
+% binopWide(instr="subq")
%def op_sub_long_2addr():
-% binopWide2addr(instr="subq %rax, VREG_ADDRESS(%rcx)")
+% binopWide2addr(instr="subq")
%def op_ushr_int():
% binop1(instr="shrl %cl, %eax")
@@ -557,10 +557,10 @@ $instr
% shop2addr(instr="shrq %cl, %rax", wide="1")
%def op_xor_int():
-% binop(instr="xorl VREG_ADDRESS(%rcx), %eax")
+% binop(instr="xorl")
%def op_xor_int_2addr():
-% binop2addr(instr="xorl %eax, VREG_ADDRESS(%rcx)")
+% binop2addr(instr="xorl")
%def op_xor_int_lit16():
% binopLit16(instr="xorl %ecx, %eax")
@@ -569,7 +569,7 @@ $instr
% binopLit8(instr="xorl %ecx, %eax")
%def op_xor_long():
-% binopWide(instr="xorq VREG_ADDRESS(%rcx), %rax")
+% binopWide(instr="xorq")
%def op_xor_long_2addr():
-% binopWide2addr(instr="xorq %rax, VREG_ADDRESS(%rcx)")
+% binopWide2addr(instr="xorq")
diff --git a/runtime/interpreter/mterp/x86_64/floating_point.S b/runtime/interpreter/mterp/x86_64/floating_point.S
index b40c0e632e..7fcb7424b6 100644
--- a/runtime/interpreter/mterp/x86_64/floating_point.S
+++ b/runtime/interpreter/mterp/x86_64/floating_point.S
@@ -18,7 +18,7 @@
/* op vAA, vBB, vCC */
movzbq 3(rPC), %rcx # ecx<- CC
movzbq 2(rPC), %rax # eax<- BB
- movs${suff} VREG_ADDRESS(%rax), %xmm0
+ GET_VREG_XMM${suff} %xmm0, %rax
xor %eax, %eax
ucomis${suff} VREG_ADDRESS(%rcx), %xmm0
jp .L${opcode}_nan_is_${nanval}
@@ -44,10 +44,10 @@
andb $$0xf, %cl # ecx <- A
cvts${source_suffix}2s${dest_suffix} VREG_ADDRESS(rINSTq), %xmm0
.if $wide
- movsd %xmm0, VREG_ADDRESS(%rcx)
+ SET_VREG_XMMd %xmm0, %rcx
CLEAR_WIDE_REF %rcx
.else
- movss %xmm0, VREG_ADDRESS(%rcx)
+ SET_VREG_XMMs %xmm0, %rcx
CLEAR_REF %rcx
.endif
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
@@ -55,9 +55,9 @@
%def sseBinop(instr="", suff=""):
movzbq 2(rPC), %rcx # ecx <- BB
movzbq 3(rPC), %rax # eax <- CC
- movs${suff} VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
+ GET_VREG_XMM${suff} %xmm0, %rcx # %xmm0 <- 1st src
${instr}${suff} VREG_ADDRESS(%rax), %xmm0
- movs${suff} %xmm0, VREG_ADDRESS(rINSTq) # vAA <- %xmm0
+ SET_VREG_XMM${suff} %xmm0, rINSTq # vAA <- %xmm0
pxor %xmm0, %xmm0
movs${suff} %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
@@ -65,10 +65,10 @@
%def sseBinop2Addr(instr="", suff=""):
movl rINST, %ecx # ecx <- A+
andl $$0xf, %ecx # ecx <- A
- movs${suff} VREG_ADDRESS(%rcx), %xmm0 # %xmm0 <- 1st src
+ GET_VREG_XMM${suff} %xmm0, %rcx # %xmm0 <- 1st src
sarl $$4, rINST # rINST<- B
${instr}${suff} VREG_ADDRESS(rINSTq), %xmm0
- movs${suff} %xmm0, VREG_ADDRESS(%rcx) # vAA<- %xmm0
+ SET_VREG_XMM${suff} %xmm0, %rcx # vAA <- %xmm0
pxor %xmm0, %xmm0
movs${suff} %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
diff --git a/runtime/interpreter/mterp/x86_64/main.S b/runtime/interpreter/mterp/x86_64/main.S
index 4609067a5a..5900220750 100644
--- a/runtime/interpreter/mterp/x86_64/main.S
+++ b/runtime/interpreter/mterp/x86_64/main.S
@@ -306,6 +306,19 @@ unspecified registers or condition codes.
movl MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg)
.endm
+.macro GET_VREG_XMMs _xmmreg _vreg
+ movss VREG_ADDRESS(\_vreg), \_xmmreg
+.endm
+.macro GET_VREG_XMMd _xmmreg _vreg
+ movsd VREG_ADDRESS(\_vreg), \_xmmreg
+.endm
+.macro SET_VREG_XMMs _xmmreg _vreg
+ movss \_xmmreg, VREG_ADDRESS(\_vreg)
+.endm
+.macro SET_VREG_XMMd _xmmreg _vreg
+ movsd \_xmmreg, VREG_ADDRESS(\_vreg)
+.endm
+
/*
* function support macros.
*/
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index d67d9dced8..4a3ef07819 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -56,10 +56,12 @@ static constexpr size_t kJitSlowStressDefaultCompileThreshold = 2; // Slow-
// JIT compiler
void* Jit::jit_library_handle_ = nullptr;
void* Jit::jit_compiler_handle_ = nullptr;
-void* (*Jit::jit_load_)(bool*) = nullptr;
+void* (*Jit::jit_load_)(void) = nullptr;
void (*Jit::jit_unload_)(void*) = nullptr;
bool (*Jit::jit_compile_method_)(void*, ArtMethod*, Thread*, bool) = nullptr;
void (*Jit::jit_types_loaded_)(void*, mirror::Class**, size_t count) = nullptr;
+bool (*Jit::jit_generate_debug_info_)(void*) = nullptr;
+void (*Jit::jit_update_options_)(void*) = nullptr;
struct StressModeHelper {
DECLARE_RUNTIME_DEBUG_FLAG(kSlowMode);
@@ -179,20 +181,21 @@ Jit* Jit::Create(JitCodeCache* code_cache, JitOptions* options) {
LOG(WARNING) << "Not creating JIT: library not loaded";
return nullptr;
}
- bool will_generate_debug_symbols = false;
- jit_compiler_handle_ = (jit_load_)(&will_generate_debug_symbols);
+ jit_compiler_handle_ = (jit_load_)();
if (jit_compiler_handle_ == nullptr) {
LOG(WARNING) << "Not creating JIT: failed to allocate a compiler";
return nullptr;
}
std::unique_ptr<Jit> jit(new Jit(code_cache, options));
- jit->generate_debug_info_ = will_generate_debug_symbols;
+ // If the code collector is enabled, check if that still holds:
// With 'perf', we want a 1-1 mapping between an address and a method.
// We aren't able to keep method pointers live during the instrumentation method entry trampoline
// so we will just disable jit-gc if we are doing that.
- code_cache->SetGarbageCollectCode(!jit->generate_debug_info_ &&
- !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled());
+ if (code_cache->GetGarbageCollectCode()) {
+ code_cache->SetGarbageCollectCode(!jit_generate_debug_info_(jit_compiler_handle_) &&
+ !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled());
+ }
VLOG(jit) << "JIT created with initial_capacity="
<< PrettySize(options->GetCodeCacheInitialCapacity())
@@ -200,13 +203,21 @@ Jit* Jit::Create(JitCodeCache* code_cache, JitOptions* options) {
<< ", compile_threshold=" << options->GetCompileThreshold()
<< ", profile_saver_options=" << options->GetProfileSaverOptions();
- jit->CreateThreadPool();
-
// Notify native debugger about the classes already loaded before the creation of the jit.
jit->DumpTypeInfoForLoadedTypes(Runtime::Current()->GetClassLinker());
return jit.release();
}
+template <typename T>
+bool Jit::LoadSymbol(T* address, const char* name, std::string* error_msg) {
+ *address = reinterpret_cast<T>(dlsym(jit_library_handle_, name));
+ if (*address == nullptr) {
+ *error_msg = std::string("JIT couldn't find ") + name + std::string(" entry point");
+ return false;
+ }
+ return true;
+}
+
bool Jit::LoadCompilerLibrary(std::string* error_msg) {
jit_library_handle_ = dlopen(
kIsDebugBuild ? "libartd-compiler.so" : "libart-compiler.so", RTLD_NOW);
@@ -216,31 +227,16 @@ bool Jit::LoadCompilerLibrary(std::string* error_msg) {
*error_msg = oss.str();
return false;
}
- jit_load_ = reinterpret_cast<void* (*)(bool*)>(dlsym(jit_library_handle_, "jit_load"));
- if (jit_load_ == nullptr) {
- dlclose(jit_library_handle_);
- *error_msg = "JIT couldn't find jit_load entry point";
- return false;
- }
- jit_unload_ = reinterpret_cast<void (*)(void*)>(
- dlsym(jit_library_handle_, "jit_unload"));
- if (jit_unload_ == nullptr) {
+ bool all_resolved = true;
+ all_resolved = all_resolved && LoadSymbol(&jit_load_, "jit_load", error_msg);
+ all_resolved = all_resolved && LoadSymbol(&jit_unload_, "jit_unload", error_msg);
+ all_resolved = all_resolved && LoadSymbol(&jit_compile_method_, "jit_compile_method", error_msg);
+ all_resolved = all_resolved && LoadSymbol(&jit_types_loaded_, "jit_types_loaded", error_msg);
+ all_resolved = all_resolved && LoadSymbol(&jit_update_options_, "jit_update_options", error_msg);
+ all_resolved = all_resolved &&
+ LoadSymbol(&jit_generate_debug_info_, "jit_generate_debug_info", error_msg);
+ if (!all_resolved) {
dlclose(jit_library_handle_);
- *error_msg = "JIT couldn't find jit_unload entry point";
- return false;
- }
- jit_compile_method_ = reinterpret_cast<bool (*)(void*, ArtMethod*, Thread*, bool)>(
- dlsym(jit_library_handle_, "jit_compile_method"));
- if (jit_compile_method_ == nullptr) {
- dlclose(jit_library_handle_);
- *error_msg = "JIT couldn't find jit_compile_method entry point";
- return false;
- }
- jit_types_loaded_ = reinterpret_cast<void (*)(void*, mirror::Class**, size_t)>(
- dlsym(jit_library_handle_, "jit_types_loaded"));
- if (jit_types_loaded_ == nullptr) {
- dlclose(jit_library_handle_);
- *error_msg = "JIT couldn't find jit_types_loaded entry point";
return false;
}
return true;
@@ -296,7 +292,11 @@ bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool osr) {
}
void Jit::CreateThreadPool() {
- // There is a DCHECK in the 'AddSamples' method to ensure the tread pool
+ if (Runtime::Current()->IsSafeMode()) {
+ // Never create the pool in safe mode.
+ return;
+ }
+ // There is a DCHECK in the 'AddSamples' method to ensure the thread pool
// is not null when we instrument.
// We need peers as we may report the JIT thread, e.g., in the debugger.
@@ -375,7 +375,7 @@ void Jit::NewTypeLoadedIfUsingJit(mirror::Class* type) {
return;
}
jit::Jit* jit = Runtime::Current()->GetJit();
- if (jit->generate_debug_info_) {
+ if (jit_generate_debug_info_(jit->jit_compiler_handle_)) {
DCHECK(jit->jit_types_loaded_ != nullptr);
jit->jit_types_loaded_(jit->jit_compiler_handle_, &type, 1);
}
@@ -390,7 +390,7 @@ void Jit::DumpTypeInfoForLoadedTypes(ClassLinker* linker) {
std::vector<mirror::Class*> classes_;
};
- if (generate_debug_info_) {
+ if (jit_generate_debug_info_(jit_compiler_handle_)) {
ScopedObjectAccess so(Thread::Current());
CollectClasses visitor;
@@ -630,8 +630,11 @@ static bool IgnoreSamplesForMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mut
void Jit::AddSamples(Thread* self, ArtMethod* method, uint16_t count, bool with_backedges) {
if (thread_pool_ == nullptr) {
- // Should only see this when shutting down.
- DCHECK(Runtime::Current()->IsShuttingDown(self));
+ // Should only see this when shutting down, starting up, or in zygote, which doesn't
+ // have a thread pool.
+ DCHECK(Runtime::Current()->IsShuttingDown(self) ||
+ !Runtime::Current()->IsFinishedStarting() ||
+ Runtime::Current()->IsZygote());
return;
}
if (IgnoreSamplesForMethod(method)) {
@@ -795,5 +798,15 @@ ScopedJitSuspend::~ScopedJitSuspend() {
}
}
+void Jit::PostForkChildAction() {
+ // At this point, the compiler options have been adjusted to the particular configuration
+ // of the forked child. Parse them again.
+ jit_update_options_(jit_compiler_handle_);
+
+ // Adjust the status of code cache collection: the status from zygote was to not collect.
+ code_cache_->SetGarbageCollectCode(!jit_generate_debug_info_(jit_compiler_handle_) &&
+ !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled());
+}
+
} // namespace jit
} // namespace art
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 46b0762629..e12b032feb 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -100,10 +100,6 @@ class JitOptions {
return use_jit_compilation_;
}
- bool RWXMemoryAllowed() const {
- return rwx_memory_allowed_;
- }
-
void SetUseJitCompilation(bool b) {
use_jit_compilation_ = b;
}
@@ -125,10 +121,6 @@ class JitOptions {
compile_threshold_ = 0;
}
- void SetRWXMemoryAllowed(bool rwx_allowed) {
- rwx_memory_allowed_ = rwx_allowed;
- }
-
private:
bool use_jit_compilation_;
size_t code_cache_initial_capacity_;
@@ -140,7 +132,6 @@ class JitOptions {
uint16_t invoke_transition_weight_;
bool dump_info_on_shutdown_;
int thread_pool_pthread_priority_;
- bool rwx_memory_allowed_;
ProfileSaverOptions profile_saver_options_;
JitOptions()
@@ -153,8 +144,7 @@ class JitOptions {
priority_thread_weight_(0),
invoke_transition_weight_(0),
dump_info_on_shutdown_(false),
- thread_pool_pthread_priority_(kJitPoolThreadPthreadDefaultPriority),
- rwx_memory_allowed_(true) {}
+ thread_pool_pthread_priority_(kJitPoolThreadPthreadDefaultPriority) {}
DISALLOW_COPY_AND_ASSIGN(JitOptions);
};
@@ -295,6 +285,9 @@ class Jit {
// Start JIT threads.
void Start();
+ // Transition to a zygote child state.
+ void PostForkChildAction();
+
private:
Jit(JitCodeCache* code_cache, JitOptions* options);
@@ -303,13 +296,13 @@ class Jit {
// JIT compiler
static void* jit_library_handle_;
static void* jit_compiler_handle_;
- static void* (*jit_load_)(bool*);
+ static void* (*jit_load_)(void);
static void (*jit_unload_)(void*);
static bool (*jit_compile_method_)(void*, ArtMethod*, Thread*, bool);
static void (*jit_types_loaded_)(void*, mirror::Class**, size_t count);
-
- // Whether we should generate debug info when compiling.
- bool generate_debug_info_;
+ static void (*jit_update_options_)(void*);
+ static bool (*jit_generate_debug_info_)(void*);
+ template <typename T> static bool LoadSymbol(T*, const char* symbol, std::string* error_msg);
// JIT resources owned by runtime.
jit::JitCodeCache* const code_cache_;
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 1701ca8a78..97887ccbc9 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -18,7 +18,8 @@
#include <sstream>
-#include "android-base/unique_fd.h"
+#include <android-base/logging.h>
+#include <android-base/unique_fd.h>
#include "arch/context.h"
#include "art_method-inl.h"
@@ -64,6 +65,11 @@ namespace jit {
static constexpr size_t kCodeSizeLogThreshold = 50 * KB;
static constexpr size_t kStackMapSizeLogThreshold = 50 * KB;
+// Data cache will be half of the capacity
+// Code cache will be the other half of the capacity.
+// TODO: Make this variable?
+static constexpr size_t kCodeAndDataCapacityDivider = 2;
+
static constexpr int kProtR = PROT_READ;
static constexpr int kProtRW = PROT_READ | PROT_WRITE;
static constexpr int kProtRWX = PROT_READ | PROT_WRITE | PROT_EXEC;
@@ -183,69 +189,45 @@ class JitCodeCache::JniStubData {
std::vector<ArtMethod*> methods_;
};
-JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
- size_t max_capacity,
- bool used_only_for_profile_data,
- bool rwx_memory_allowed,
- std::string* error_msg) {
+bool JitCodeCache::InitializeMappings(bool rwx_memory_allowed,
+ bool is_zygote,
+ std::string* error_msg) {
ScopedTrace trace(__PRETTY_FUNCTION__);
- CHECK_GE(max_capacity, initial_capacity);
- // We need to have 32 bit offsets from method headers in code cache which point to things
- // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
- // Ensure we're below 1 GB to be safe.
- if (max_capacity > 1 * GB) {
- std::ostringstream oss;
- oss << "Maxium code cache capacity is limited to 1 GB, "
- << PrettySize(max_capacity) << " is too big";
- *error_msg = oss.str();
- return nullptr;
- }
-
- // Register for membarrier expedited sync core if JIT will be generating code.
- if (!used_only_for_profile_data) {
- if (art::membarrier(art::MembarrierCommand::kRegisterPrivateExpeditedSyncCore) != 0) {
- // MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE ensures that CPU instruction pipelines are
- // flushed and it's used when adding code to the JIT. The memory used by the new code may
- // have just been released and, in theory, the old code could still be in a pipeline.
- VLOG(jit) << "Kernel does not support membarrier sync-core";
- }
- }
+ const size_t capacity = max_capacity_;
+ const size_t data_capacity = capacity / kCodeAndDataCapacityDivider;
+ const size_t exec_capacity = capacity - data_capacity;
// File descriptor enabling dual-view mapping of code section.
unique_fd mem_fd;
- // Bionic supports memfd_create, but the call may fail on older kernels.
- mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags= */ 0));
- if (mem_fd.get() < 0) {
- std::ostringstream oss;
- oss << "Failed to initialize dual view JIT. memfd_create() error: " << strerror(errno);
- if (!rwx_memory_allowed) {
- // Without using RWX page permissions, the JIT can not fallback to single mapping as it
- // requires tranitioning the code pages to RWX for updates.
- *error_msg = oss.str();
- return nullptr;
+ // Zygote shouldn't create a shared mapping for JIT, so we cannot use dual view
+ // for it.
+ if (!is_zygote) {
+ // Bionic supports memfd_create, but the call may fail on older kernels.
+ mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags= */ 0));
+ if (mem_fd.get() < 0) {
+ std::ostringstream oss;
+ oss << "Failed to initialize dual view JIT. memfd_create() error: " << strerror(errno);
+ if (!rwx_memory_allowed) {
+ // Without using RWX page permissions, the JIT can not fallback to single mapping as it
+ // requires tranitioning the code pages to RWX for updates.
+ *error_msg = oss.str();
+ return false;
+ }
+ VLOG(jit) << oss.str();
}
- VLOG(jit) << oss.str();
}
- if (mem_fd.get() >= 0 && ftruncate(mem_fd, max_capacity) != 0) {
+ if (mem_fd.get() >= 0 && ftruncate(mem_fd, capacity) != 0) {
std::ostringstream oss;
oss << "Failed to initialize memory file: " << strerror(errno);
*error_msg = oss.str();
- return nullptr;
+ return false;
}
- // Data cache will be half of the initial allocation.
- // Code cache will be the other half of the initial allocation.
- // TODO: Make this variable?
-
- // Align both capacities to page size, as that's the unit mspaces use.
- initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
- max_capacity = RoundDown(max_capacity, 2 * kPageSize);
- const size_t data_capacity = max_capacity / 2;
- const size_t exec_capacity = used_only_for_profile_data ? 0 : max_capacity - data_capacity;
- DCHECK_LE(data_capacity + exec_capacity, max_capacity);
+ std::string data_cache_name = is_zygote ? "zygote-data-code-cache" : "data-code-cache";
+ std::string exec_cache_name = is_zygote ? "zygote-jit-code-cache" : "jit-code-cache";
std::string error_str;
// Map name specific for android_os_Debug.cpp accounting.
@@ -285,7 +267,7 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
mem_fd,
/* start= */ 0,
/* low_4gb= */ true,
- "data-code-cache",
+ data_cache_name.c_str(),
&error_str);
} else {
// Single view of JIT code cache case. Create an initial mapping of data pages large enough
@@ -304,7 +286,7 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
// back to RX after the update.
base_flags = MAP_PRIVATE | MAP_ANON;
data_pages = MemMap::MapAnonymous(
- "data-code-cache",
+ data_cache_name.c_str(),
data_capacity + exec_capacity,
kProtRW,
/* low_4gb= */ true,
@@ -313,9 +295,9 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
if (!data_pages.IsValid()) {
std::ostringstream oss;
- oss << "Failed to create read write cache: " << error_str << " size=" << max_capacity;
+ oss << "Failed to create read write cache: " << error_str << " size=" << capacity;
*error_msg = oss.str();
- return nullptr;
+ return false;
}
MemMap exec_pages;
@@ -326,7 +308,7 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
// (for processes that cannot map WX pages). Otherwise, this region does not need to be
// executable as there is no code in the cache yet.
exec_pages = data_pages.RemapAtEnd(divider,
- "jit-code-cache",
+ exec_cache_name.c_str(),
kProtRX,
base_flags | MAP_FIXED,
mem_fd.get(),
@@ -334,21 +316,22 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
&error_str);
if (!exec_pages.IsValid()) {
std::ostringstream oss;
- oss << "Failed to create read execute code cache: " << error_str << " size=" << max_capacity;
+ oss << "Failed to create read execute code cache: " << error_str << " size=" << capacity;
*error_msg = oss.str();
- return nullptr;
+ return false;
}
if (mem_fd.get() >= 0) {
// For dual view, create the secondary view of code memory used for updating code. This view
// is never executable.
+ std::string name = exec_cache_name + "-rw";
non_exec_pages = MemMap::MapFile(exec_capacity,
kProtR,
base_flags,
mem_fd,
/* start= */ data_capacity,
/* low_4GB= */ false,
- "jit-code-cache-rw",
+ name.c_str(),
&error_str);
if (!non_exec_pages.IsValid()) {
static const char* kFailedNxView = "Failed to map non-executable view of JIT code cache";
@@ -357,44 +340,77 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
VLOG(jit) << kFailedNxView;
} else {
*error_msg = kFailedNxView;
- return nullptr;
+ return false;
}
}
}
} else {
// Profiling only. No memory for code required.
- DCHECK(used_only_for_profile_data);
}
- const size_t initial_data_capacity = initial_capacity / 2;
- const size_t initial_exec_capacity =
- (exec_capacity == 0) ? 0 : (initial_capacity - initial_data_capacity);
+ data_pages_ = std::move(data_pages);
+ exec_pages_ = std::move(exec_pages);
+ non_exec_pages_ = std::move(non_exec_pages);
+ return true;
+}
+
+JitCodeCache* JitCodeCache::Create(bool used_only_for_profile_data,
+ bool rwx_memory_allowed,
+ bool is_zygote,
+ std::string* error_msg) {
+ // Register for membarrier expedited sync core if JIT will be generating code.
+ if (!used_only_for_profile_data) {
+ if (art::membarrier(art::MembarrierCommand::kRegisterPrivateExpeditedSyncCore) != 0) {
+ // MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE ensures that CPU instruction pipelines are
+ // flushed and it's used when adding code to the JIT. The memory used by the new code may
+ // have just been released and, in theory, the old code could still be in a pipeline.
+ VLOG(jit) << "Kernel does not support membarrier sync-core";
+ }
+ }
- return new JitCodeCache(
- std::move(data_pages),
- std::move(exec_pages),
- std::move(non_exec_pages),
- initial_data_capacity,
- initial_exec_capacity,
- max_capacity);
+ // Check whether the provided max capacity in options is below 1GB.
+ size_t max_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheMaxCapacity();
+ // We need to have 32 bit offsets from method headers in code cache which point to things
+ // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
+ // Ensure we're below 1 GB to be safe.
+ if (max_capacity > 1 * GB) {
+ std::ostringstream oss;
+ oss << "Maxium code cache capacity is limited to 1 GB, "
+ << PrettySize(max_capacity) << " is too big";
+ *error_msg = oss.str();
+ return nullptr;
+ }
+
+ size_t initial_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheInitialCapacity();
+
+ std::unique_ptr<JitCodeCache> jit_code_cache(new JitCodeCache());
+
+ MutexLock mu(Thread::Current(), jit_code_cache->lock_);
+ jit_code_cache->InitializeState(initial_capacity, max_capacity);
+
+ // Zygote should never collect code to share the memory with the children.
+ if (is_zygote) {
+ jit_code_cache->SetGarbageCollectCode(false);
+ }
+
+ if (!jit_code_cache->InitializeMappings(rwx_memory_allowed, is_zygote, error_msg)) {
+ return nullptr;
+ }
+
+ jit_code_cache->InitializeSpaces();
+
+ VLOG(jit) << "Created jit code cache: initial capacity="
+ << PrettySize(initial_capacity)
+ << ", maximum capacity="
+ << PrettySize(max_capacity);
+
+ return jit_code_cache.release();
}
-JitCodeCache::JitCodeCache(MemMap&& data_pages,
- MemMap&& exec_pages,
- MemMap&& non_exec_pages,
- size_t initial_data_capacity,
- size_t initial_exec_capacity,
- size_t max_capacity)
+JitCodeCache::JitCodeCache()
: lock_("Jit code cache", kJitCodeCacheLock),
lock_cond_("Jit code cache condition variable", lock_),
collection_in_progress_(false),
- data_pages_(std::move(data_pages)),
- exec_pages_(std::move(exec_pages)),
- non_exec_pages_(std::move(non_exec_pages)),
- max_capacity_(max_capacity),
- current_capacity_(initial_exec_capacity + initial_data_capacity),
- data_end_(initial_data_capacity),
- exec_end_(initial_exec_capacity),
last_collection_increased_code_cache_(false),
garbage_collect_code_(true),
used_memory_for_data_(0),
@@ -406,10 +422,31 @@ JitCodeCache::JitCodeCache(MemMap&& data_pages,
histogram_code_memory_use_("Memory used for compiled code", 16),
histogram_profiling_info_memory_use_("Memory used for profiling info", 16),
is_weak_access_enabled_(true),
- inline_cache_cond_("Jit inline cache condition variable", lock_) {
+ inline_cache_cond_("Jit inline cache condition variable", lock_),
+ zygote_data_pages_(),
+ zygote_exec_pages_(),
+ zygote_data_mspace_(nullptr),
+ zygote_exec_mspace_(nullptr) {
+}
+
+void JitCodeCache::InitializeState(size_t initial_capacity, size_t max_capacity) {
+ CHECK_GE(max_capacity, initial_capacity);
+ CHECK(max_capacity <= 1 * GB) << "The max supported size for JIT code cache is 1GB";
+ // Align both capacities to page size, as that's the unit mspaces use.
+ initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
+ max_capacity = RoundDown(max_capacity, 2 * kPageSize);
- DCHECK_GE(max_capacity, initial_exec_capacity + initial_data_capacity);
+ data_pages_ = MemMap();
+ exec_pages_ = MemMap();
+ non_exec_pages_ = MemMap();
+ initial_capacity_ = initial_capacity;
+ max_capacity_ = max_capacity;
+ current_capacity_ = initial_capacity,
+ data_end_ = initial_capacity / kCodeAndDataCapacityDivider;
+ exec_end_ = initial_capacity - data_end_;
+}
+void JitCodeCache::InitializeSpaces() {
// Initialize the data heap
data_mspace_ = create_mspace_with_base(data_pages_.Begin(), data_end_, false /*locked*/);
CHECK(data_mspace_ != nullptr) << "create_mspace_with_base (data) failed";
@@ -427,19 +464,14 @@ JitCodeCache::JitCodeCache(MemMap&& data_pages,
CheckedCall(mprotect, "create code heap", code_heap->Begin(), code_heap->Size(), kProtRW);
exec_mspace_ = create_mspace_with_base(code_heap->Begin(), exec_end_, false /*locked*/);
CHECK(exec_mspace_ != nullptr) << "create_mspace_with_base (exec) failed";
- SetFootprintLimit(current_capacity_);
+ SetFootprintLimit(initial_capacity_);
// Protect pages containing heap metadata. Updates to the code heap toggle write permission to
// perform the update and there are no other times write access is required.
CheckedCall(mprotect, "protect code heap", code_heap->Begin(), code_heap->Size(), kProtR);
} else {
exec_mspace_ = nullptr;
- SetFootprintLimit(current_capacity_);
+ SetFootprintLimit(initial_capacity_);
}
-
- VLOG(jit) << "Created jit code cache: initial data size="
- << PrettySize(initial_data_capacity)
- << ", initial code size="
- << PrettySize(initial_exec_capacity);
}
JitCodeCache::~JitCodeCache() {}
@@ -861,7 +893,8 @@ void JitCodeCache::CopyInlineCacheInto(const InlineCache& ic,
}
}
-static void ClearMethodCounter(ArtMethod* method, bool was_warm) {
+static void ClearMethodCounter(ArtMethod* method, bool was_warm)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (was_warm) {
method->SetPreviouslyWarm();
}
@@ -1097,7 +1130,7 @@ bool JitCodeCache::RemoveMethod(ArtMethod* method, bool release_memory) {
return false;
}
- method->ClearCounter();
+ method->SetCounter(0);
Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
method, GetQuickToInterpreterBridge());
VLOG(jit)
@@ -1272,31 +1305,6 @@ size_t JitCodeCache::ReserveData(Thread* self,
}
}
-class MarkCodeVisitor final : public StackVisitor {
- public:
- MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in, CodeCacheBitmap* bitmap)
- : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
- code_cache_(code_cache_in),
- bitmap_(bitmap) {}
-
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
- if (method_header == nullptr) {
- return true;
- }
- const void* code = method_header->GetCode();
- if (code_cache_->ContainsPc(code)) {
- // Use the atomic set version, as multiple threads are executing this code.
- bitmap_->AtomicTestAndSet(FromCodeToAllocation(code));
- }
- return true;
- }
-
- private:
- JitCodeCache* const code_cache_;
- CodeCacheBitmap* const bitmap_;
-};
-
class MarkCodeClosure final : public Closure {
public:
MarkCodeClosure(JitCodeCache* code_cache, CodeCacheBitmap* bitmap, Barrier* barrier)
@@ -1305,8 +1313,24 @@ class MarkCodeClosure final : public Closure {
void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedTrace trace(__PRETTY_FUNCTION__);
DCHECK(thread == Thread::Current() || thread->IsSuspended());
- MarkCodeVisitor visitor(thread, code_cache_, bitmap_);
- visitor.WalkStack();
+ StackVisitor::WalkStack(
+ [&](const art::StackVisitor* stack_visitor) {
+ const OatQuickMethodHeader* method_header =
+ stack_visitor->GetCurrentOatQuickMethodHeader();
+ if (method_header == nullptr) {
+ return true;
+ }
+ const void* code = method_header->GetCode();
+ if (code_cache_->ContainsPc(code)) {
+ // Use the atomic set version, as multiple threads are executing this code.
+ bitmap_->AtomicTestAndSet(FromCodeToAllocation(code));
+ }
+ return true;
+ },
+ thread,
+ /* context= */ nullptr,
+ art::StackVisitor::StackWalkKind::kSkipInlinedFrames);
+
if (kIsDebugBuild) {
// The stack walking code queries the side instrumentation stack if it
// sees an instrumentation exit pc, so the JIT code of methods in that stack
@@ -1339,13 +1363,13 @@ void JitCodeCache::NotifyCollectionDone(Thread* self) {
}
void JitCodeCache::SetFootprintLimit(size_t new_footprint) {
- size_t per_space_footprint = new_footprint / 2;
- DCHECK(IsAlignedParam(per_space_footprint, kPageSize));
- DCHECK_EQ(per_space_footprint * 2, new_footprint);
- mspace_set_footprint_limit(data_mspace_, per_space_footprint);
+ size_t data_space_footprint = new_footprint / kCodeAndDataCapacityDivider;
+ DCHECK(IsAlignedParam(data_space_footprint, kPageSize));
+ DCHECK_EQ(data_space_footprint * kCodeAndDataCapacityDivider, new_footprint);
+ mspace_set_footprint_limit(data_mspace_, data_space_footprint);
if (HasCodeMapping()) {
ScopedCodeCacheWrite scc(this);
- mspace_set_footprint_limit(exec_mspace_, per_space_footprint);
+ mspace_set_footprint_limit(exec_mspace_, new_footprint - data_space_footprint);
}
}
@@ -1806,8 +1830,10 @@ void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) NO_THREAD_S
void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_locations,
std::vector<ProfileMethodInfo>& methods) {
+ Thread* self = Thread::Current();
+ WaitUntilInlineCacheAccessible(self);
+ MutexLock mu(self, lock_);
ScopedTrace trace(__FUNCTION__);
- MutexLock mu(Thread::Current(), lock_);
uint16_t jit_compile_threshold = Runtime::Current()->GetJITOptions()->GetCompileThreshold();
for (const ProfilingInfo* info : profiling_infos_) {
ArtMethod* method = info->GetMethod();
@@ -2064,5 +2090,33 @@ void JitCodeCache::Dump(std::ostream& os) {
histogram_profiling_info_memory_use_.PrintMemoryUse(os);
}
+void JitCodeCache::PostForkChildAction(bool is_system_server, bool is_zygote) {
+ MutexLock mu(Thread::Current(), lock_);
+ // Currently, we don't expect any compilations from zygote.
+ CHECK_EQ(number_of_compilations_, 0u);
+ CHECK_EQ(number_of_osr_compilations_, 0u);
+ CHECK(jni_stubs_map_.empty());
+ CHECK(method_code_map_.empty());
+ CHECK(osr_code_map_.empty());
+
+ zygote_data_pages_ = std::move(data_pages_);
+ zygote_exec_pages_ = std::move(exec_pages_);
+ zygote_data_mspace_ = data_mspace_;
+ zygote_exec_mspace_ = exec_mspace_;
+
+ size_t initial_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheInitialCapacity();
+ size_t max_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheMaxCapacity();
+
+ InitializeState(initial_capacity, max_capacity);
+
+ std::string error_msg;
+ if (!InitializeMappings(/* rwx_memory_allowed= */ !is_system_server, is_zygote, &error_msg)) {
+ LOG(WARNING) << "Could not reset JIT state after zygote fork: " << error_msg;
+ return;
+ }
+
+ InitializeSpaces();
+}
+
} // namespace jit
} // namespace art
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index a5075638f2..7a838fddd6 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -89,10 +89,9 @@ class JitCodeCache {
// Create the code cache with a code + data capacity equal to "capacity", error message is passed
// in the out arg error_msg.
- static JitCodeCache* Create(size_t initial_capacity,
- size_t max_capacity,
- bool used_only_for_profile_data,
+ static JitCodeCache* Create(bool used_only_for_profile_data,
bool rwx_memory_allowed,
+ bool is_zygote,
std::string* error_msg);
~JitCodeCache();
@@ -262,14 +261,17 @@ class JitCodeCache {
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ void PostForkChildAction(bool is_system_server, bool is_zygote);
+
private:
- // Take ownership of maps.
- JitCodeCache(MemMap&& data_pages,
- MemMap&& exec_pages,
- MemMap&& non_exec_pages,
- size_t initial_data_capacity,
- size_t initial_exec_capacity,
- size_t max_capacity);
+ JitCodeCache();
+
+ void InitializeState(size_t initial_capacity, size_t max_capacity) REQUIRES(lock_);
+
+ bool InitializeMappings(bool rwx_memory_allowed, bool is_zygote, std::string* error_msg)
+ REQUIRES(lock_);
+
+ void InitializeSpaces() REQUIRES(lock_);
// Internal version of 'CommitCode' that will not retry if the
// allocation fails. Return null if the allocation fails.
@@ -421,6 +423,9 @@ class JitCodeCache {
// ProfilingInfo objects we have allocated.
std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(lock_);
+ // The initial capacity in bytes this code cache starts with.
+ size_t initial_capacity_ GUARDED_BY(lock_);
+
// The maximum capacity in bytes this code cache can go to.
size_t max_capacity_ GUARDED_BY(lock_);
@@ -471,10 +476,19 @@ class JitCodeCache {
// Condition to wait on for accessing inline caches.
ConditionVariable inline_cache_cond_ GUARDED_BY(lock_);
+ // Mem map which holds zygote data (stack maps and profiling info).
+ MemMap zygote_data_pages_;
+ // Mem map which holds zygote code and has executable permission.
+ MemMap zygote_exec_pages_;
+ // The opaque mspace for allocating zygote data.
+ void* zygote_data_mspace_ GUARDED_BY(lock_);
+ // The opaque mspace for allocating zygote code.
+ void* zygote_exec_mspace_ GUARDED_BY(lock_);
+
friend class art::JitJniStubTestHelper;
friend class ScopedCodeCacheWrite;
- DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
+ DISALLOW_COPY_AND_ASSIGN(JitCodeCache);
};
} // namespace jit
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index e3248eaf24..c8d4728589 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -362,7 +362,7 @@ static void SampleClassesAndExecutedMethods(pthread_t profiler_pthread,
}
// Visit all of the methods in the class to see which ones were executed.
for (ArtMethod& method : klass->GetMethods(kRuntimePointerSize)) {
- if (!method.IsNative()) {
+ if (!method.IsNative() && !method.IsAbstract()) {
DCHECK(!method.IsProxyMethod());
const uint16_t counter = method.GetCounter();
// Mark startup methods as hot if they have more than hot_method_sample_threshold
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index df2a8e29cb..647928391b 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -277,43 +277,6 @@ void Monitor::SetObject(mirror::Object* object) {
obj_ = GcRoot<mirror::Object>(object);
}
-// Note: Adapted from CurrentMethodVisitor in thread.cc. We must not resolve here.
-
-struct NthCallerWithDexPcVisitor final : public StackVisitor {
- explicit NthCallerWithDexPcVisitor(Thread* thread, size_t frame)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- method_(nullptr),
- dex_pc_(0),
- current_frame_number_(0),
- wanted_frame_number_(frame) {}
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = GetMethod();
- if (m == nullptr || m->IsRuntimeMethod()) {
- // Runtime method, upcall, or resolution issue. Skip.
- return true;
- }
-
- // Is this the requested frame?
- if (current_frame_number_ == wanted_frame_number_) {
- method_ = m;
- dex_pc_ = GetDexPc(/* abort_on_failure=*/ false);
- return false;
- }
-
- // Look for more.
- current_frame_number_++;
- return true;
- }
-
- ArtMethod* method_;
- uint32_t dex_pc_;
-
- private:
- size_t current_frame_number_;
- const size_t wanted_frame_number_;
-};
-
// This function is inlined and just helps to not have the VLOG and ATRACE check at all the
// potential tracing points.
void Monitor::AtraceMonitorLock(Thread* self, mirror::Object* obj, bool is_wait) {
@@ -326,13 +289,41 @@ void Monitor::AtraceMonitorLockImpl(Thread* self, mirror::Object* obj, bool is_w
// Wait() requires a deeper call stack to be useful. Otherwise you'll see "Waiting at
// Object.java". Assume that we'll wait a nontrivial amount, so it's OK to do a longer
// stack walk than if !is_wait.
- NthCallerWithDexPcVisitor visitor(self, is_wait ? 1U : 0U);
- visitor.WalkStack(false);
+ const size_t wanted_frame_number = is_wait ? 1U : 0U;
+
+ ArtMethod* method = nullptr;
+ uint32_t dex_pc = 0u;
+
+ size_t current_frame_number = 0u;
+ StackVisitor::WalkStack(
+ // Note: Adapted from CurrentMethodVisitor in thread.cc. We must not resolve here.
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* m = stack_visitor->GetMethod();
+ if (m == nullptr || m->IsRuntimeMethod()) {
+ // Runtime method, upcall, or resolution issue. Skip.
+ return true;
+ }
+
+ // Is this the requested frame?
+ if (current_frame_number == wanted_frame_number) {
+ method = m;
+ dex_pc = stack_visitor->GetDexPc(false /* abort_on_error*/);
+ return false;
+ }
+
+ // Look for more.
+ current_frame_number++;
+ return true;
+ },
+ self,
+ /* context= */ nullptr,
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+
const char* prefix = is_wait ? "Waiting on " : "Locking ";
const char* filename;
int32_t line_number;
- TranslateLocation(visitor.method_, visitor.dex_pc_, &filename, &line_number);
+ TranslateLocation(method, dex_pc, &filename, &line_number);
// It would be nice to have a stable "ID" for the object here. However, the only stable thing
// would be the identity hashcode. But we cannot use IdentityHashcode here: For one, there are
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 56e9094983..530371d4c4 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -29,6 +29,7 @@
#include "debugger.h"
#include "hidden_api.h"
#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
#include "jni/java_vm_ext.h"
#include "jni/jni_internal.h"
#include "native_util.h"
@@ -292,7 +293,10 @@ static void ZygoteHooks_nativePostForkSystemServer(JNIEnv* env ATTRIBUTE_UNUSED,
// System server has a window where it can create executable pages for this purpose, but this is
// turned off after this hook. Consequently, the only JIT mode supported is the dual-view JIT
// where one mapping is R->RW and the other is RX. Single view requires RX->RWX->RX.
- Runtime::Current()->CreateJitCodeCache(/*rwx_memory_allowed=*/false);
+ if (Runtime::Current()->GetJit() != nullptr) {
+ Runtime::Current()->GetJit()->GetCodeCache()->PostForkChildAction(
+ /* is_system_server= */ true, /* is_zygote= */ false);
+ }
}
static void ZygoteHooks_nativePostForkChild(JNIEnv* env,
@@ -332,6 +336,15 @@ static void ZygoteHooks_nativePostForkChild(JNIEnv* env,
}
Runtime::Current()->GetHeap()->PostForkChildAction(thread);
+ if (Runtime::Current()->GetJit() != nullptr) {
+ if (!is_system_server) {
+ // System server already called the JIT cache post fork action in `nativePostForkSystemServer`.
+ Runtime::Current()->GetJit()->GetCodeCache()->PostForkChildAction(
+ /* is_system_server= */ false, is_zygote);
+ }
+ // This must be called after EnableDebugFeatures.
+ Runtime::Current()->GetJit()->PostForkChildAction();
+ }
// Update tracing.
if (Trace::GetMethodTracingMode() != TracingMode::kTracingInactive) {
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 33c85973b3..29b569050c 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -586,26 +586,7 @@ bool ParsedOptions::DoParse(const RuntimeOptions& options,
args.Set(M::BackgroundGc, BackgroundGcOption { background_collector_type_ });
}
- // If a reference to the dalvik core.jar snuck in, replace it with
- // the art specific version. This can happen with on device
- // boot.art/boot.oat generation by GenerateImage which relies on the
- // value of BOOTCLASSPATH.
-#if defined(ART_TARGET)
- std::string core_jar("/core.jar");
- std::string core_libart_jar("/core-libart.jar");
-#else
- // The host uses hostdex files.
- std::string core_jar("/core-hostdex.jar");
- std::string core_libart_jar("/core-libart-hostdex.jar");
-#endif
auto boot_class_path_string = args.GetOrDefault(M::BootClassPath);
-
- size_t core_jar_pos = boot_class_path_string.find(core_jar);
- if (core_jar_pos != std::string::npos) {
- boot_class_path_string.replace(core_jar_pos, core_jar.size(), core_libart_jar);
- args.Set(M::BootClassPath, boot_class_path_string);
- }
-
{
auto&& boot_class_path = args.GetOrDefault(M::BootClassPath);
auto&& boot_class_path_locations = args.GetOrDefault(M::BootClassPathLocations);
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index afdfefaffa..d4e3d54a99 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -154,46 +154,36 @@ class CatchBlockStackVisitor final : public StackVisitor {
DISALLOW_COPY_AND_ASSIGN(CatchBlockStackVisitor);
};
-// Counts instrumentation stack frame prior to catch handler or upcall.
-class InstrumentationStackVisitor : public StackVisitor {
- public:
- InstrumentationStackVisitor(Thread* self, size_t frame_depth)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- frame_depth_(frame_depth),
- instrumentation_frames_to_pop_(0) {
- CHECK_NE(frame_depth_, kInvalidFrameDepth);
- }
-
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- size_t current_frame_depth = GetFrameDepth();
- if (current_frame_depth < frame_depth_) {
- CHECK(GetMethod() != nullptr);
- if (UNLIKELY(reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) == GetReturnPc())) {
- if (!IsInInlinedFrame()) {
- // We do not count inlined frames, because we do not instrument them. The reason we
- // include them in the stack walking is the check against `frame_depth_`, which is
- // given to us by a visitor that visits inlined frames.
- ++instrumentation_frames_to_pop_;
+static size_t GetInstrumentationFramesToPop(Thread* self, size_t frame_depth)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ CHECK_NE(frame_depth, kInvalidFrameDepth);
+ size_t instrumentation_frames_to_pop = 0;
+ StackVisitor::WalkStack(
+ [&](art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ size_t current_frame_depth = stack_visitor->GetFrameDepth();
+ if (current_frame_depth < frame_depth) {
+ CHECK(stack_visitor->GetMethod() != nullptr);
+ if (UNLIKELY(reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) ==
+ stack_visitor->GetReturnPc())) {
+ if (!stack_visitor->IsInInlinedFrame()) {
+ // We do not count inlined frames, because we do not instrument them. The reason we
+ // include them in the stack walking is the check against `frame_depth_`, which is
+ // given to us by a visitor that visits inlined frames.
+ ++instrumentation_frames_to_pop;
+ }
+ }
+ return true;
}
- }
- return true;
- } else {
- // We reached the frame of the catch handler or the upcall.
- return false;
- }
- }
-
- size_t GetInstrumentationFramesToPop() const {
- return instrumentation_frames_to_pop_;
- }
-
- private:
- const size_t frame_depth_;
- size_t instrumentation_frames_to_pop_;
-
- DISALLOW_COPY_AND_ASSIGN(InstrumentationStackVisitor);
-};
+ // We reached the frame of the catch handler or the upcall.
+ return false;
+ },
+ self,
+ /* context= */ nullptr,
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames,
+ /* check_suspended */ true,
+ /* include_transitions */ true);
+ return instrumentation_frames_to_pop;
+}
// Finds the appropriate exception catch after calling all method exit instrumentation functions.
// Note that this might change the exception being thrown.
@@ -229,9 +219,8 @@ void QuickExceptionHandler::FindCatch(ObjPtr<mirror::Throwable> exception) {
// Figure out how many of those frames have instrumentation we need to remove (Should be the
// exact same as number of new_pop_count if there aren't inlined frames).
- InstrumentationStackVisitor instrumentation_visitor(self_, handler_frame_depth_);
- instrumentation_visitor.WalkStack(true);
- size_t instrumentation_frames_to_pop = instrumentation_visitor.GetInstrumentationFramesToPop();
+ size_t instrumentation_frames_to_pop =
+ GetInstrumentationFramesToPop(self_, handler_frame_depth_);
if (kDebugExceptionDelivery) {
if (*handler_quick_frame_ == nullptr) {
@@ -647,10 +636,8 @@ uintptr_t QuickExceptionHandler::UpdateInstrumentationStack() {
DCHECK(is_deoptimization_) << "Non-deoptimization handlers should use FindCatch";
uintptr_t return_pc = 0;
if (method_tracing_active_) {
- InstrumentationStackVisitor visitor(self_, handler_frame_depth_);
- visitor.WalkStack(true);
-
- size_t instrumentation_frames_to_pop = visitor.GetInstrumentationFramesToPop();
+ size_t instrumentation_frames_to_pop =
+ GetInstrumentationFramesToPop(self_, handler_frame_depth_);
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
return_pc = instrumentation->PopFramesForDeoptimization(self_, instrumentation_frames_to_pop);
}
@@ -671,53 +658,41 @@ void QuickExceptionHandler::DoLongJump(bool smash_caller_saves) {
UNREACHABLE();
}
-// Prints out methods with their type of frame.
-class DumpFramesWithTypeStackVisitor final : public StackVisitor {
- public:
- explicit DumpFramesWithTypeStackVisitor(Thread* self, bool show_details = false)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- show_details_(show_details) {}
-
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* method = GetMethod();
- if (show_details_) {
- LOG(INFO) << "|> pc = " << std::hex << GetCurrentQuickFramePc();
- LOG(INFO) << "|> addr = " << std::hex << reinterpret_cast<uintptr_t>(GetCurrentQuickFrame());
- if (GetCurrentQuickFrame() != nullptr && method != nullptr) {
- LOG(INFO) << "|> ret = " << std::hex << GetReturnPc();
- }
- }
- if (method == nullptr) {
- // Transition, do go on, we want to unwind over bridges, all the way.
- if (show_details_) {
- LOG(INFO) << "N <transition>";
- }
- return true;
- } else if (method->IsRuntimeMethod()) {
- if (show_details_) {
- LOG(INFO) << "R " << method->PrettyMethod(true);
- }
- return true;
- } else {
- bool is_shadow = GetCurrentShadowFrame() != nullptr;
- LOG(INFO) << (is_shadow ? "S" : "Q")
- << ((!is_shadow && IsInInlinedFrame()) ? "i" : " ")
- << " "
- << method->PrettyMethod(true);
- return true; // Go on.
- }
- }
-
- private:
- bool show_details_;
-
- DISALLOW_COPY_AND_ASSIGN(DumpFramesWithTypeStackVisitor);
-};
-
void QuickExceptionHandler::DumpFramesWithType(Thread* self, bool details) {
- DumpFramesWithTypeStackVisitor visitor(self, details);
- visitor.WalkStack(true);
+ StackVisitor::WalkStack(
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* method = stack_visitor->GetMethod();
+ if (details) {
+ LOG(INFO) << "|> pc = " << std::hex << stack_visitor->GetCurrentQuickFramePc();
+ LOG(INFO) << "|> addr = " << std::hex
+ << reinterpret_cast<uintptr_t>(stack_visitor->GetCurrentQuickFrame());
+ if (stack_visitor->GetCurrentQuickFrame() != nullptr && method != nullptr) {
+ LOG(INFO) << "|> ret = " << std::hex << stack_visitor->GetReturnPc();
+ }
+ }
+ if (method == nullptr) {
+ // Transition, do go on, we want to unwind over bridges, all the way.
+ if (details) {
+ LOG(INFO) << "N <transition>";
+ }
+ return true;
+ } else if (method->IsRuntimeMethod()) {
+ if (details) {
+ LOG(INFO) << "R " << method->PrettyMethod(true);
+ }
+ return true;
+ } else {
+ bool is_shadow = stack_visitor->GetCurrentShadowFrame() != nullptr;
+ LOG(INFO) << (is_shadow ? "S" : "Q")
+ << ((!is_shadow && stack_visitor->IsInInlinedFrame()) ? "i" : " ")
+ << " "
+ << method->PrettyMethod(true);
+ return true; // Go on.
+ }
+ },
+ self,
+ /* context= */ nullptr,
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
}
} // namespace art
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index f016e874ca..9cbbd4172d 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -801,6 +801,8 @@ bool Runtime::Start() {
if (!jit::Jit::LoadCompilerLibrary(&error_msg)) {
LOG(WARNING) << "Failed to load JIT compiler with error " << error_msg;
}
+ CreateJitCodeCache(/*rwx_memory_allowed=*/true);
+ CreateJit();
}
// Send the start phase event. We have to wait till here as this is when the main thread peer
@@ -904,15 +906,8 @@ void Runtime::InitNonZygoteOrPostFork(
}
}
- if (jit_ == nullptr) {
- // The system server's code cache was initialized specially. For other zygote forks or
- // processes create it now.
- if (!is_system_server) {
- CreateJitCodeCache(/*rwx_memory_allowed=*/true);
- }
- // Note that when running ART standalone (not zygote, nor zygote fork),
- // the jit may have already been created.
- CreateJit();
+ if (jit_ != nullptr) {
+ jit_->CreateThreadPool();
}
// Create the thread pools.
@@ -961,127 +956,12 @@ void Runtime::StartDaemonThreads() {
VLOG(startup) << "Runtime::StartDaemonThreads exiting";
}
-// Attempts to open dex files from image(s). Given the image location, try to find the oat file
-// and open it to get the stored dex file. If the image is the first for a multi-image boot
-// classpath, go on and also open the other images.
-static bool OpenDexFilesFromImage(const std::string& image_location,
- std::vector<std::unique_ptr<const DexFile>>* dex_files,
- size_t* failures) {
- DCHECK(dex_files != nullptr) << "OpenDexFilesFromImage: out-param is nullptr";
-
- // Use a work-list approach, so that we can easily reuse the opening code.
- std::vector<std::string> image_locations;
- image_locations.push_back(image_location);
-
- for (size_t index = 0; index < image_locations.size(); ++index) {
- std::string system_filename;
- bool has_system = false;
- std::string cache_filename_unused;
- bool dalvik_cache_exists_unused;
- bool has_cache_unused;
- bool is_global_cache_unused;
- bool found_image = gc::space::ImageSpace::FindImageFilename(image_locations[index].c_str(),
- kRuntimeISA,
- &system_filename,
- &has_system,
- &cache_filename_unused,
- &dalvik_cache_exists_unused,
- &has_cache_unused,
- &is_global_cache_unused);
-
- if (!found_image || !has_system) {
- return false;
- }
-
- // We are falling back to non-executable use of the oat file because patching failed, presumably
- // due to lack of space.
- std::string vdex_filename =
- ImageHeader::GetVdexLocationFromImageLocation(system_filename.c_str());
- std::string oat_filename =
- ImageHeader::GetOatLocationFromImageLocation(system_filename.c_str());
- std::string oat_location =
- ImageHeader::GetOatLocationFromImageLocation(image_locations[index].c_str());
- // Note: in the multi-image case, the image location may end in ".jar," and not ".art." Handle
- // that here.
- if (android::base::EndsWith(oat_location, ".jar")) {
- oat_location.replace(oat_location.length() - 3, 3, "oat");
- }
- std::string error_msg;
-
- std::unique_ptr<VdexFile> vdex_file(VdexFile::Open(vdex_filename,
- /* writable= */ false,
- /* low_4gb= */ false,
- /* unquicken= */ false,
- &error_msg));
- if (vdex_file.get() == nullptr) {
- return false;
- }
-
- std::unique_ptr<File> file(OS::OpenFileForReading(oat_filename.c_str()));
- if (file.get() == nullptr) {
- return false;
- }
- std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file.get(),
- /* writable= */ false,
- /* program_header_only= */ false,
- /* low_4gb= */ false,
- &error_msg));
- if (elf_file.get() == nullptr) {
- return false;
- }
- std::unique_ptr<const OatFile> oat_file(
- OatFile::OpenWithElfFile(/* zip_fd= */ -1,
- elf_file.release(),
- vdex_file.release(),
- oat_location,
- nullptr,
- &error_msg));
- if (oat_file == nullptr) {
- LOG(WARNING) << "Unable to use '" << oat_filename << "' because " << error_msg;
- return false;
- }
-
- for (const OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) {
- if (oat_dex_file == nullptr) {
- *failures += 1;
- continue;
- }
- std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error_msg);
- if (dex_file.get() == nullptr) {
- *failures += 1;
- } else {
- dex_files->push_back(std::move(dex_file));
- }
- }
-
- if (index == 0) {
- // First file. See if this is a multi-image environment, and if so, enqueue the other images.
- const OatHeader& boot_oat_header = oat_file->GetOatHeader();
- const char* boot_cp = boot_oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey);
- if (boot_cp != nullptr) {
- gc::space::ImageSpace::ExtractMultiImageLocations(image_locations[0],
- boot_cp,
- &image_locations);
- }
- }
-
- Runtime::Current()->GetOatFileManager().RegisterOatFile(std::move(oat_file));
- }
- return true;
-}
-
-
static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames,
const std::vector<std::string>& dex_locations,
- const std::string& image_location,
std::vector<std::unique_ptr<const DexFile>>* dex_files) {
DCHECK(dex_files != nullptr) << "OpenDexFiles: out-param is nullptr";
size_t failure_count = 0;
- if (!image_location.empty() && OpenDexFilesFromImage(image_location, dex_files, &failure_count)) {
- return failure_count;
- }
const ArtDexFileLoader dex_file_loader;
- failure_count = 0;
for (size_t i = 0; i < dex_filenames.size(); i++) {
const char* dex_filename = dex_filenames[i].c_str();
const char* dex_location = dex_locations[i].c_str();
@@ -1527,10 +1407,7 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
if (runtime_options.Exists(Opt::BootClassPathDexList)) {
boot_class_path.swap(*runtime_options.GetOrDefault(Opt::BootClassPathDexList));
} else {
- OpenDexFiles(dex_filenames,
- dex_locations,
- runtime_options.GetOrDefault(Opt::Image),
- &boot_class_path);
+ OpenDexFiles(dex_filenames, dex_locations, &boot_class_path);
}
instruction_set_ = runtime_options.GetOrDefault(Opt::ImageInstructionSet);
if (!class_linker_->InitWithoutImage(std::move(boot_class_path), &error_msg)) {
@@ -2503,16 +2380,11 @@ void Runtime::CreateJitCodeCache(bool rwx_memory_allowed) {
return;
}
- // SystemServer has execmem blocked by SELinux so can not use RWX page permissions after the
- // cache initialized.
- jit_options_->SetRWXMemoryAllowed(rwx_memory_allowed);
-
std::string error_msg;
bool profiling_only = !jit_options_->UseJitCompilation();
- jit_code_cache_.reset(jit::JitCodeCache::Create(jit_options_->GetCodeCacheInitialCapacity(),
- jit_options_->GetCodeCacheMaxCapacity(),
- profiling_only,
- jit_options_->RWXMemoryAllowed(),
+ jit_code_cache_.reset(jit::JitCodeCache::Create(profiling_only,
+ rwx_memory_allowed,
+ IsZygote(),
&error_msg));
if (jit_code_cache_.get() == nullptr) {
LOG(WARNING) << "Failed to create JIT Code Cache: " << error_msg;
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 3c057f3c41..0ccc7b79bf 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -634,13 +634,6 @@ class Runtime {
void DeoptimizeBootImage();
bool IsNativeDebuggable() const {
- CHECK(!is_zygote_ || IsAotCompiler());
- return is_native_debuggable_;
- }
-
- // Note: prefer not to use this method, but the checked version above. The separation exists
- // as the runtime state may change for a zygote child.
- bool IsNativeDebuggableZygoteOK() const {
return is_native_debuggable_;
}
@@ -698,7 +691,6 @@ class Runtime {
double GetHashTableMaxLoadFactor() const;
bool IsSafeMode() const {
- CHECK(!is_zygote_);
return safe_mode_;
}
diff --git a/runtime/stack.h b/runtime/stack.h
index 02578d25b7..9d30115bb1 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -143,6 +143,36 @@ class StackVisitor {
template <CountTransitions kCount = CountTransitions::kYes>
void WalkStack(bool include_transitions = false) REQUIRES_SHARED(Locks::mutator_lock_);
+ // Convenience helper function to walk the stack with a lambda as a visitor.
+ template <CountTransitions kCountTransitions = CountTransitions::kYes,
+ typename T>
+ ALWAYS_INLINE static void WalkStack(const T& fn,
+ Thread* thread,
+ Context* context,
+ StackWalkKind walk_kind,
+ bool check_suspended = true,
+ bool include_transitions = false)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ class LambdaStackVisitor : public StackVisitor {
+ public:
+ LambdaStackVisitor(const T& fn,
+ Thread* thread,
+ Context* context,
+ StackWalkKind walk_kind,
+ bool check_suspended = true)
+ : StackVisitor(thread, context, walk_kind, check_suspended), fn_(fn) {}
+
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
+ return fn_(this);
+ }
+
+ private:
+ T fn_;
+ };
+ LambdaStackVisitor visitor(fn, thread, context, walk_kind, check_suspended);
+ visitor.template WalkStack<kCountTransitions>(include_transitions);
+ }
+
Thread* GetThread() const {
return thread_;
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index e9fed76d6f..33cd9bbb67 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -3607,42 +3607,34 @@ Context* Thread::GetLongJumpContext() {
return result;
}
-ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc,
+ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc_out,
bool check_suspended,
bool abort_on_error) const {
// Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
// so we don't abort in a special situation (thinlocked monitor) when dumping the Java
// stack.
- struct CurrentMethodVisitor final : public StackVisitor {
- CurrentMethodVisitor(Thread* thread, bool check_suspended, bool abort_on_error)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread,
- /* context= */nullptr,
- StackVisitor::StackWalkKind::kIncludeInlinedFrames,
- check_suspended),
- method_(nullptr),
- dex_pc_(0),
- abort_on_error_(abort_on_error) {}
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = GetMethod();
- if (m->IsRuntimeMethod()) {
- // Continue if this is a runtime method.
- return true;
- }
- method_ = m;
- dex_pc_ = GetDexPc(abort_on_error_);
- return false;
- }
- ArtMethod* method_;
- uint32_t dex_pc_;
- const bool abort_on_error_;
- };
- CurrentMethodVisitor visitor(const_cast<Thread*>(this), check_suspended, abort_on_error);
- visitor.WalkStack(false);
- if (dex_pc != nullptr) {
- *dex_pc = visitor.dex_pc_;
+ ArtMethod* method = nullptr;
+ uint32_t dex_pc = dex::kDexNoIndex;
+ StackVisitor::WalkStack(
+ [&](const StackVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* m = visitor->GetMethod();
+ if (m->IsRuntimeMethod()) {
+ // Continue if this is a runtime method.
+ return true;
+ }
+ method = m;
+ dex_pc = visitor->GetDexPc(abort_on_error);
+ return false;
+ },
+ const_cast<Thread*>(this),
+ /* context= */ nullptr,
+ StackVisitor::StackWalkKind::kIncludeInlinedFrames,
+ check_suspended);
+
+ if (dex_pc_out != nullptr) {
+ *dex_pc_out = dex_pc;
}
- return visitor.method_;
+ return method;
}
bool Thread::HoldsLock(ObjPtr<mirror::Object> object) const {
diff --git a/runtime/trace.cc b/runtime/trace.cc
index ad58c2ea99..f6c36cf989 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -58,32 +58,6 @@ static constexpr uint8_t kOpNewMethod = 1U;
static constexpr uint8_t kOpNewThread = 2U;
static constexpr uint8_t kOpTraceSummary = 3U;
-class BuildStackTraceVisitor : public StackVisitor {
- public:
- explicit BuildStackTraceVisitor(Thread* thread)
- : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- method_trace_(Trace::AllocStackTrace()) {}
-
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = GetMethod();
- // Ignore runtime frames (in particular callee save).
- if (!m->IsRuntimeMethod()) {
- method_trace_->push_back(m);
- }
- return true;
- }
-
- // Returns a stack trace where the topmost frame corresponds with the first element of the vector.
- std::vector<ArtMethod*>* GetStackTrace() const {
- return method_trace_;
- }
-
- private:
- std::vector<ArtMethod*>* const method_trace_;
-
- DISALLOW_COPY_AND_ASSIGN(BuildStackTraceVisitor);
-};
-
static const char kTraceTokenChar = '*';
static const uint16_t kTraceHeaderLength = 32;
static const uint32_t kTraceMagicValue = 0x574f4c53;
@@ -228,9 +202,19 @@ static void Append8LE(uint8_t* buf, uint64_t val) {
}
static void GetSample(Thread* thread, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
- BuildStackTraceVisitor build_trace_visitor(thread);
- build_trace_visitor.WalkStack();
- std::vector<ArtMethod*>* stack_trace = build_trace_visitor.GetStackTrace();
+ std::vector<ArtMethod*>* const stack_trace = Trace::AllocStackTrace();
+ StackVisitor::WalkStack(
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* m = stack_visitor->GetMethod();
+ // Ignore runtime frames (in particular callee save).
+ if (!m->IsRuntimeMethod()) {
+ stack_trace->push_back(m);
+ }
+ return true;
+ },
+ thread,
+ /* context= */ nullptr,
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
Trace* the_trace = reinterpret_cast<Trace*>(arg);
the_trace->CompareAndUpdateStackTrace(thread, stack_trace);
}
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index bd59e73192..f24711a4a1 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -31,7 +31,6 @@
#include "dex/class_accessor-inl.h"
#include "dex/dex_file.h"
#include "dex/dex_file_loader.h"
-#include "dex/hidden_api_access_flags.h"
#include "dex_to_dex_decompiler.h"
#include "quicken_info.h"
diff --git a/test/021-string2/src/Main.java b/test/021-string2/src/Main.java
index c713aa43a6..141a08983b 100644
--- a/test/021-string2/src/Main.java
+++ b/test/021-string2/src/Main.java
@@ -15,14 +15,13 @@
*/
import junit.framework.Assert;
-import java.lang.reflect.Method;
import java.util.Locale;
/**
* more string tests
*/
public class Main {
- public static void main(String args[]) throws Exception {
+ public static void main(String args[]) {
String test = "0123456789";
String test1 = new String("0123456789"); // different object
String test2 = new String("0123456780"); // different value
@@ -86,9 +85,7 @@ public class Main {
Assert.assertEquals("this is a path", test.replaceAll("/", " "));
Assert.assertEquals("this is a path", test.replace("/", " "));
- Class<?> Strings = Class.forName("com.android.org.bouncycastle.util.Strings");
- Method fromUTF8ByteArray = Strings.getDeclaredMethod("fromUTF8ByteArray", byte[].class);
- String result = (String) fromUTF8ByteArray.invoke(null, new byte[] {'O', 'K'});
+ String result = new String(new char[] { 'O', 'K' });
System.out.println(result);
testCompareToAndEquals();
diff --git a/test/461-get-reference-vreg/get_reference_vreg_jni.cc b/test/461-get-reference-vreg/get_reference_vreg_jni.cc
index ddc86dffa4..817a647805 100644
--- a/test/461-get-reference-vreg/get_reference_vreg_jni.cc
+++ b/test/461-get-reference-vreg/get_reference_vreg_jni.cc
@@ -25,62 +25,50 @@ namespace art {
namespace {
-class TestVisitor : public StackVisitor {
- public:
- TestVisitor(Thread* thread, Context* context, mirror::Object* this_value)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- this_value_(this_value),
- found_method_index_(0) {}
-
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = GetMethod();
- std::string m_name(m->GetName());
-
- if (m_name.compare("$noinline$testThisWithInstanceCall") == 0) {
- found_method_index_ = 1;
- uint32_t value = 0;
- CHECK(GetVReg(m, 1, kReferenceVReg, &value));
- CHECK_EQ(reinterpret_cast<mirror::Object*>(value), this_value_);
- CHECK_EQ(GetThisObject(), this_value_);
- } else if (m_name.compare("$noinline$testThisWithStaticCall") == 0) {
- found_method_index_ = 2;
- uint32_t value = 0;
- CHECK(GetVReg(m, 1, kReferenceVReg, &value));
- } else if (m_name.compare("$noinline$testParameter") == 0) {
- found_method_index_ = 3;
- uint32_t value = 0;
- CHECK(GetVReg(m, 1, kReferenceVReg, &value));
- } else if (m_name.compare("$noinline$testObjectInScope") == 0) {
- found_method_index_ = 4;
- uint32_t value = 0;
- CHECK(GetVReg(m, 0, kReferenceVReg, &value));
- }
-
- return true;
- }
+jint FindMethodIndex(jobject this_value_jobj) {
+ ScopedObjectAccess soa(Thread::Current());
+ std::unique_ptr<Context> context(Context::Create());
+ ObjPtr<mirror::Object> this_value = soa.Decode<mirror::Object>(this_value_jobj);
+ jint found_method_index = 0;
+ StackVisitor::WalkStack(
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* m = stack_visitor->GetMethod();
+ std::string m_name(m->GetName());
- mirror::Object* this_value_;
+ if (m_name.compare("$noinline$testThisWithInstanceCall") == 0) {
+ found_method_index = 1;
+ uint32_t value = 0;
+ CHECK(stack_visitor->GetVReg(m, 1, kReferenceVReg, &value));
+ CHECK_EQ(reinterpret_cast<mirror::Object*>(value), this_value);
+ CHECK_EQ(stack_visitor->GetThisObject(), this_value);
+ } else if (m_name.compare("$noinline$testThisWithStaticCall") == 0) {
+ found_method_index = 2;
+ uint32_t value = 0;
+ CHECK(stack_visitor->GetVReg(m, 1, kReferenceVReg, &value));
+ } else if (m_name.compare("$noinline$testParameter") == 0) {
+ found_method_index = 3;
+ uint32_t value = 0;
+ CHECK(stack_visitor->GetVReg(m, 1, kReferenceVReg, &value));
+ } else if (m_name.compare("$noinline$testObjectInScope") == 0) {
+ found_method_index = 4;
+ uint32_t value = 0;
+ CHECK(stack_visitor->GetVReg(m, 0, kReferenceVReg, &value));
+ }
- // Value returned to Java to ensure the methods testSimpleVReg and testPairVReg
- // have been found and tested.
- jint found_method_index_;
-};
+ return true;
+ },
+ soa.Self(),
+ context.get(),
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+ return found_method_index;
+}
extern "C" JNIEXPORT jint JNICALL Java_Main_doNativeCallRef(JNIEnv*, jobject value) {
- ScopedObjectAccess soa(Thread::Current());
- std::unique_ptr<Context> context(Context::Create());
- TestVisitor visitor(soa.Self(), context.get(), soa.Decode<mirror::Object>(value).Ptr());
- visitor.WalkStack();
- return visitor.found_method_index_;
+ return FindMethodIndex(value);
}
extern "C" JNIEXPORT jint JNICALL Java_Main_doStaticNativeCallRef(JNIEnv*, jclass) {
- ScopedObjectAccess soa(Thread::Current());
- std::unique_ptr<Context> context(Context::Create());
- TestVisitor visitor(soa.Self(), context.get(), nullptr);
- visitor.WalkStack();
- return visitor.found_method_index_;
+ return FindMethodIndex(nullptr);
}
} // namespace
diff --git a/test/543-env-long-ref/env_long_ref.cc b/test/543-env-long-ref/env_long_ref.cc
index 165f5bf412..1885f8d9f5 100644
--- a/test/543-env-long-ref/env_long_ref.cc
+++ b/test/543-env-long-ref/env_long_ref.cc
@@ -23,44 +23,28 @@
namespace art {
-namespace {
-
-class TestVisitor : public StackVisitor {
- public:
- TestVisitor(const ScopedObjectAccess& soa, Context* context, jobject expected_value)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(soa.Self(), context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- expected_value_(expected_value),
- found_(false),
- soa_(soa) {}
-
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = GetMethod();
- std::string m_name(m->GetName());
-
- if (m_name == "testCase") {
- found_ = true;
- uint32_t value = 0;
- CHECK(GetVReg(m, 1, kReferenceVReg, &value));
- CHECK_EQ(reinterpret_cast<mirror::Object*>(value),
- soa_.Decode<mirror::Object>(expected_value_).Ptr());
- }
- return true;
- }
-
- jobject expected_value_;
- bool found_;
- const ScopedObjectAccess& soa_;
-};
-
-} // namespace
-
extern "C" JNIEXPORT void JNICALL Java_Main_lookForMyRegisters(JNIEnv*, jclass, jobject value) {
ScopedObjectAccess soa(Thread::Current());
std::unique_ptr<Context> context(Context::Create());
- TestVisitor visitor(soa, context.get(), value);
- visitor.WalkStack();
- CHECK(visitor.found_);
+ bool found = false;
+ StackVisitor::WalkStack(
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* m = stack_visitor->GetMethod();
+ std::string m_name(m->GetName());
+
+ if (m_name == "testCase") {
+ found = true;
+ uint32_t stack_value = 0;
+ CHECK(stack_visitor->GetVReg(m, 1, kReferenceVReg, &stack_value));
+ CHECK_EQ(reinterpret_cast<mirror::Object*>(stack_value),
+ soa.Decode<mirror::Object>(value).Ptr());
+ }
+ return true;
+ },
+ soa.Self(),
+ context.get(),
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+ CHECK(found);
}
} // namespace art
diff --git a/test/570-checker-osr/osr.cc b/test/570-checker-osr/osr.cc
index 7b88842fcc..b2b363447f 100644
--- a/test/570-checker-osr/osr.cc
+++ b/test/570-checker-osr/osr.cc
@@ -23,39 +23,33 @@
#include "scoped_thread_state_change-inl.h"
#include "stack.h"
#include "stack_map.h"
+#include "thread-current-inl.h"
namespace art {
-class OsrVisitor : public StackVisitor {
- public:
- explicit OsrVisitor(Thread* thread, const char* method_name)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- method_name_(method_name),
- in_osr_method_(false),
- in_interpreter_(false) {}
+namespace {
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = GetMethod();
- std::string m_name(m->GetName());
-
- if (m_name.compare(method_name_) == 0) {
- const OatQuickMethodHeader* header =
- Runtime::Current()->GetJit()->GetCodeCache()->LookupOsrMethodHeader(m);
- if (header != nullptr && header == GetCurrentOatQuickMethodHeader()) {
- in_osr_method_ = true;
- } else if (IsShadowFrame()) {
- in_interpreter_ = true;
- }
- return false;
- }
- return true;
- }
+template <typename Handler>
+void ProcessMethodWithName(JNIEnv* env, jstring method_name, const Handler& handler) {
+ ScopedUtfChars chars(env, method_name);
+ CHECK(chars.c_str() != nullptr);
+ ScopedObjectAccess soa(Thread::Current());
+ StackVisitor::WalkStack(
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ std::string m_name(stack_visitor->GetMethod()->GetName());
+
+ if (m_name.compare(chars.c_str()) == 0) {
+ handler(stack_visitor);
+ return false;
+ }
+ return true;
+ },
+ soa.Self(),
+ /* context= */ nullptr,
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+}
- const char* const method_name_;
- bool in_osr_method_;
- bool in_interpreter_;
-};
+} // namespace
extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInOsrCode(JNIEnv* env,
jclass,
@@ -65,12 +59,19 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInOsrCode(JNIEnv* env,
// Just return true for non-jit configurations to stop the infinite loop.
return JNI_TRUE;
}
- ScopedUtfChars chars(env, method_name);
- CHECK(chars.c_str() != nullptr);
- ScopedObjectAccess soa(Thread::Current());
- OsrVisitor visitor(soa.Self(), chars.c_str());
- visitor.WalkStack();
- return visitor.in_osr_method_;
+ bool in_osr_code = false;
+ ProcessMethodWithName(
+ env,
+ method_name,
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* m = stack_visitor->GetMethod();
+ const OatQuickMethodHeader* header =
+ Runtime::Current()->GetJit()->GetCodeCache()->LookupOsrMethodHeader(m);
+ if (header != nullptr && header == stack_visitor->GetCurrentOatQuickMethodHeader()) {
+ in_osr_code = true;
+ }
+ });
+ return in_osr_code;
}
extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInInterpreter(JNIEnv* env,
@@ -80,86 +81,56 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInInterpreter(JNIEnv* env,
// The return value is irrelevant if we're not using JIT.
return false;
}
- ScopedUtfChars chars(env, method_name);
- CHECK(chars.c_str() != nullptr);
- ScopedObjectAccess soa(Thread::Current());
- OsrVisitor visitor(soa.Self(), chars.c_str());
- visitor.WalkStack();
- return visitor.in_interpreter_;
+ bool in_interpreter = false;
+ ProcessMethodWithName(
+ env,
+ method_name,
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* m = stack_visitor->GetMethod();
+ const OatQuickMethodHeader* header =
+ Runtime::Current()->GetJit()->GetCodeCache()->LookupOsrMethodHeader(m);
+ if ((header == nullptr || header != stack_visitor->GetCurrentOatQuickMethodHeader()) &&
+ stack_visitor->IsShadowFrame()) {
+ in_interpreter = true;
+ }
+ });
+ return in_interpreter;
}
-class ProfilingInfoVisitor : public StackVisitor {
- public:
- explicit ProfilingInfoVisitor(Thread* thread, const char* method_name)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- method_name_(method_name) {}
-
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = GetMethod();
- std::string m_name(m->GetName());
-
- if (m_name.compare(method_name_) == 0) {
- ProfilingInfo::Create(Thread::Current(), m, /* retry_allocation */ true);
- return false;
- }
- return true;
- }
-
- const char* const method_name_;
-};
-
extern "C" JNIEXPORT void JNICALL Java_Main_ensureHasProfilingInfo(JNIEnv* env,
jclass,
jstring method_name) {
if (!Runtime::Current()->UseJitCompilation()) {
return;
}
- ScopedUtfChars chars(env, method_name);
- CHECK(chars.c_str() != nullptr);
- ScopedObjectAccess soa(Thread::Current());
- ProfilingInfoVisitor visitor(soa.Self(), chars.c_str());
- visitor.WalkStack();
+ ProcessMethodWithName(
+ env,
+ method_name,
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* m = stack_visitor->GetMethod();
+ ProfilingInfo::Create(Thread::Current(), m, /* retry_allocation */ true);
+ });
}
-class OsrCheckVisitor : public StackVisitor {
- public:
- OsrCheckVisitor(Thread* thread, const char* method_name)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- method_name_(method_name) {}
-
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = GetMethod();
- std::string m_name(m->GetName());
-
- jit::Jit* jit = Runtime::Current()->GetJit();
- if (m_name.compare(method_name_) == 0) {
- while (jit->GetCodeCache()->LookupOsrMethodHeader(m) == nullptr) {
- // Sleep to yield to the compiler thread.
- usleep(1000);
- // Will either ensure it's compiled or do the compilation itself.
- jit->CompileMethod(m, Thread::Current(), /* osr */ true);
- }
- return false;
- }
- return true;
- }
-
- const char* const method_name_;
-};
-
extern "C" JNIEXPORT void JNICALL Java_Main_ensureHasOsrCode(JNIEnv* env,
jclass,
jstring method_name) {
if (!Runtime::Current()->UseJitCompilation()) {
return;
}
- ScopedUtfChars chars(env, method_name);
- CHECK(chars.c_str() != nullptr);
- ScopedObjectAccess soa(Thread::Current());
- OsrCheckVisitor visitor(soa.Self(), chars.c_str());
- visitor.WalkStack();
+ ProcessMethodWithName(
+ env,
+ method_name,
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* m = stack_visitor->GetMethod();
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ while (jit->GetCodeCache()->LookupOsrMethodHeader(m) == nullptr) {
+ // Sleep to yield to the compiler thread.
+ usleep(1000);
+ // Will either ensure it's compiled or do the compilation itself.
+ jit->CompileMethod(m, Thread::Current(), /* osr */ true);
+ }
+ });
}
} // namespace art
diff --git a/test/674-hiddenapi/hiddenapi.cc b/test/674-hiddenapi/hiddenapi.cc
index d11aa579e5..246426352f 100644
--- a/test/674-hiddenapi/hiddenapi.cc
+++ b/test/674-hiddenapi/hiddenapi.cc
@@ -30,7 +30,8 @@ namespace Test674HiddenApi {
extern "C" JNIEXPORT void JNICALL Java_Main_init(JNIEnv*, jclass) {
Runtime* runtime = Runtime::Current();
runtime->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kEnabled);
- runtime->SetTargetSdkVersion(static_cast<uint32_t>(SdkVersion::kO_MR1));
+ runtime->SetTargetSdkVersion(
+ static_cast<uint32_t>(hiddenapi::ApiList::GreylistMaxO().GetMaxAllowedSdkVersion()));
runtime->SetDedupeHiddenApiWarnings(false);
}
diff --git a/test/common/stack_inspect.cc b/test/common/stack_inspect.cc
index 581aa74d4e..393e773275 100644
--- a/test/common/stack_inspect.cc
+++ b/test/common/stack_inspect.cc
@@ -66,42 +66,30 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInterpretedAt(JNIEnv* env,
// public static native boolean isInterpretedFunction(String smali);
-// TODO Remove 'allow_runtime_frames' option once we have deoptimization through runtime frames.
-struct MethodIsInterpretedVisitor : public StackVisitor {
- public:
- MethodIsInterpretedVisitor(Thread* thread, ArtMethod* goal, bool require_deoptable)
- : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- goal_(goal),
- method_is_interpreted_(true),
- method_found_(false),
- prev_was_runtime_(true),
- require_deoptable_(require_deoptable) {}
-
- bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
- if (goal_ == GetMethod()) {
- method_is_interpreted_ = (require_deoptable_ && prev_was_runtime_) || IsShadowFrame();
- method_found_ = true;
- return false;
- }
- prev_was_runtime_ = GetMethod()->IsRuntimeMethod();
- return true;
- }
-
- bool IsInterpreted() {
- return method_is_interpreted_;
- }
-
- bool IsFound() {
- return method_found_;
- }
-
- private:
- const ArtMethod* goal_;
- bool method_is_interpreted_;
- bool method_found_;
- bool prev_was_runtime_;
- bool require_deoptable_;
-};
+static bool IsMethodInterpreted(Thread* self,
+ const ArtMethod* goal,
+ const bool require_deoptable,
+ /* out */ bool* method_is_interpreted)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ *method_is_interpreted = true;
+ bool method_found = false;
+ bool prev_was_runtime = true;
+ StackVisitor::WalkStack(
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (goal == stack_visitor->GetMethod()) {
+ *method_is_interpreted =
+ (require_deoptable && prev_was_runtime) || stack_visitor->IsShadowFrame();
+ method_found = true;
+ return false;
+ }
+ prev_was_runtime = stack_visitor->GetMethod()->IsRuntimeMethod();
+ return true;
+ },
+ self,
+ /* context= */ nullptr,
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+ return method_found;
+}
// TODO Remove 'require_deoptimizable' option once we have deoptimization through runtime frames.
extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInterpretedFunction(
@@ -119,23 +107,18 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInterpretedFunction(
env->ThrowNew(env->FindClass("java/lang/Error"), "Unable to interpret method argument!");
return JNI_FALSE;
}
- bool result;
- bool found;
{
ScopedObjectAccess soa(env);
ArtMethod* goal = jni::DecodeArtMethod(id);
- MethodIsInterpretedVisitor v(soa.Self(), goal, require_deoptimizable);
- v.WalkStack();
+ bool is_interpreted;
+ if (!IsMethodInterpreted(soa.Self(), goal, require_deoptimizable, &is_interpreted)) {
+ env->ThrowNew(env->FindClass("java/lang/Error"), "Unable to find given method in stack!");
+ return JNI_FALSE;
+ }
bool enters_interpreter = Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(
goal->GetEntryPointFromQuickCompiledCode());
- result = (v.IsInterpreted() || enters_interpreter);
- found = v.IsFound();
- }
- if (!found) {
- env->ThrowNew(env->FindClass("java/lang/Error"), "Unable to find given method in stack!");
- return JNI_FALSE;
+ return (is_interpreted || enters_interpreter);
}
- return result;
}
// public static native void assertIsInterpreted();
@@ -196,24 +179,24 @@ extern "C" JNIEXPORT void JNICALL Java_Main_assertCallerIsManaged(JNIEnv* env, j
}
}
-struct GetCallingFrameVisitor : public StackVisitor {
- GetCallingFrameVisitor(Thread* thread, Context* context)
- REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
-
- bool VisitFrame() override NO_THREAD_SAFETY_ANALYSIS {
- // Discard stubs and Main.getThisOfCaller.
- return GetMethod() == nullptr || GetMethod()->IsNative();
- }
-};
-
extern "C" JNIEXPORT jobject JNICALL Java_Main_getThisOfCaller(
JNIEnv* env, jclass cls ATTRIBUTE_UNUSED) {
ScopedObjectAccess soa(env);
std::unique_ptr<art::Context> context(art::Context::Create());
- GetCallingFrameVisitor visitor(soa.Self(), context.get());
- visitor.WalkStack();
- return soa.AddLocalReference<jobject>(visitor.GetThisObject());
+ jobject result = nullptr;
+ StackVisitor::WalkStack(
+ [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Discard stubs and Main.getThisOfCaller.
+ if (stack_visitor->GetMethod() == nullptr || stack_visitor->GetMethod()->IsNative()) {
+ return true;
+ }
+ result = soa.AddLocalReference<jobject>(stack_visitor->GetThisObject());
+ return false;
+ },
+ soa.Self(),
+ context.get(),
+ art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
+ return result;
}
} // namespace art
diff --git a/tools/hiddenapi/hiddenapi.cc b/tools/hiddenapi/hiddenapi.cc
index 2f016e9d8a..7f4c546df5 100644
--- a/tools/hiddenapi/hiddenapi.cc
+++ b/tools/hiddenapi/hiddenapi.cc
@@ -23,14 +23,14 @@
#include "android-base/strings.h"
#include "base/bit_utils.h"
-#include "base/stl_util.h"
+#include "base/hiddenapi_flags.h"
#include "base/mem_map.h"
#include "base/os.h"
+#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "dex/art_dex_file_loader.h"
#include "dex/class_accessor-inl.h"
#include "dex/dex_file-inl.h"
-#include "dex/hidden_api_access_flags.h"
namespace art {
@@ -601,7 +601,7 @@ class HiddenapiClassDataBuilder final {
// between BeginClassDef and EndClassDef in the order of appearance of
// fields/methods in the class data stream.
void WriteFlags(hiddenapi::ApiList flags) {
- uint32_t uint_flags = static_cast<uint32_t>(flags);
+ uint32_t uint_flags = flags.GetIntValue();
EncodeUnsignedLeb128(&data_, uint_flags);
class_def_has_non_zero_flags_ |= (uint_flags != 0u);
}
@@ -935,9 +935,9 @@ class HiddenApi final {
// Load dex signatures.
std::map<std::string, hiddenapi::ApiList> api_list;
- OpenApiFile(light_greylist_path_, api_list, hiddenapi::ApiList::kLightGreylist);
- OpenApiFile(dark_greylist_path_, api_list, hiddenapi::ApiList::kDarkGreylist);
- OpenApiFile(blacklist_path_, api_list, hiddenapi::ApiList::kBlacklist);
+ OpenApiFile(light_greylist_path_, api_list, hiddenapi::ApiList::Greylist());
+ OpenApiFile(dark_greylist_path_, api_list, hiddenapi::ApiList::GreylistMaxO());
+ OpenApiFile(blacklist_path_, api_list, hiddenapi::ApiList::Blacklist());
// Iterate over input dex files and insert HiddenapiClassData sections.
for (size_t i = 0; i < boot_dex_paths_.size(); ++i) {
@@ -957,7 +957,7 @@ class HiddenApi final {
// TODO: Load whitelist and CHECK that entry was found.
auto it = api_list.find(boot_member.GetApiEntry());
builder.WriteFlags(
- (it == api_list.end()) ? hiddenapi::ApiList::kWhitelist : it->second);
+ (it == api_list.end()) ? hiddenapi::ApiList::Whitelist() : it->second);
};
auto fn_field = [&](const ClassAccessor::Field& boot_field) {
fn_shared(DexMember(boot_class, boot_field));
@@ -988,7 +988,8 @@ class HiddenApi final {
for (std::string line; std::getline(api_file, line);) {
CHECK(api_list.find(line) == api_list.end())
- << "Duplicate entry: " << line << " (" << api_list[line] << " and " << membership << ")";
+ << "Duplicate entry: " << line << " (" << api_list.find(line)->second
+ << " and " << membership << ")";
api_list.emplace(line, membership);
}
api_file.close();
diff --git a/tools/hiddenapi/hiddenapi_test.cc b/tools/hiddenapi/hiddenapi_test.cc
index 0010b78436..66ce2de5c5 100644
--- a/tools/hiddenapi/hiddenapi_test.cc
+++ b/tools/hiddenapi/hiddenapi_test.cc
@@ -132,7 +132,7 @@ class HiddenApiTest : public CommonRuntimeTest {
CHECK(accessor.HasClassData()) << "Class " << accessor.GetDescriptor() << " has no data";
if (!accessor.HasHiddenapiClassData()) {
- return hiddenapi::ApiList::kWhitelist;
+ return hiddenapi::ApiList::Whitelist();
}
for (const ClassAccessor::Field& field : accessor.GetFields()) {
@@ -141,7 +141,7 @@ class HiddenApiTest : public CommonRuntimeTest {
const uint32_t actual_visibility = field.GetAccessFlags() & kAccVisibilityFlags;
CHECK_EQ(actual_visibility, expected_visibility)
<< "Field " << name << " in class " << accessor.GetDescriptor();
- return static_cast<hiddenapi::ApiList>(field.GetHiddenapiFlags());
+ return hiddenapi::ApiList::FromDexFlags(field.GetHiddenapiFlags());
}
}
@@ -159,7 +159,7 @@ class HiddenApiTest : public CommonRuntimeTest {
CHECK(accessor.HasClassData()) << "Class " << accessor.GetDescriptor() << " has no data";
if (!accessor.HasHiddenapiClassData()) {
- return hiddenapi::ApiList::kWhitelist;
+ return hiddenapi::ApiList::Whitelist();
}
for (const ClassAccessor::Method& method : accessor.GetMethods()) {
@@ -170,7 +170,7 @@ class HiddenApiTest : public CommonRuntimeTest {
const uint32_t actual_visibility = method.GetAccessFlags() & kAccVisibilityFlags;
CHECK_EQ(actual_visibility, expected_visibility)
<< "Method " << name << " in class " << accessor.GetDescriptor();
- return static_cast<hiddenapi::ApiList>(method.GetHiddenapiFlags());
+ return hiddenapi::ApiList::FromDexFlags(method.GetHiddenapiFlags());
}
}
@@ -224,7 +224,7 @@ TEST_F(HiddenApiTest, InstanceFieldNoMatch) {
OpenStream(blacklist) << "LMain;->ifield:LBadType3;" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
ASSERT_NE(dex_file.get(), nullptr);
- ASSERT_EQ(hiddenapi::ApiList::kWhitelist, GetIFieldHiddenFlags(*dex_file));
+ ASSERT_EQ(hiddenapi::ApiList::Whitelist(), GetIFieldHiddenFlags(*dex_file));
}
TEST_F(HiddenApiTest, InstanceFieldLightGreylistMatch) {
@@ -234,7 +234,7 @@ TEST_F(HiddenApiTest, InstanceFieldLightGreylistMatch) {
OpenStream(blacklist) << "LMain;->ifield:LBadType3;" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
ASSERT_NE(dex_file.get(), nullptr);
- ASSERT_EQ(hiddenapi::ApiList::kLightGreylist, GetIFieldHiddenFlags(*dex_file));
+ ASSERT_EQ(hiddenapi::ApiList::Greylist(), GetIFieldHiddenFlags(*dex_file));
}
TEST_F(HiddenApiTest, InstanceFieldDarkGreylistMatch) {
@@ -244,7 +244,7 @@ TEST_F(HiddenApiTest, InstanceFieldDarkGreylistMatch) {
OpenStream(blacklist) << "LMain;->ifield:LBadType3;" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
ASSERT_NE(dex_file.get(), nullptr);
- ASSERT_EQ(hiddenapi::ApiList::kDarkGreylist, GetIFieldHiddenFlags(*dex_file));
+ ASSERT_EQ(hiddenapi::ApiList::GreylistMaxO(), GetIFieldHiddenFlags(*dex_file));
}
TEST_F(HiddenApiTest, InstanceFieldBlacklistMatch) {
@@ -254,7 +254,7 @@ TEST_F(HiddenApiTest, InstanceFieldBlacklistMatch) {
OpenStream(blacklist) << "LMain;->ifield:I" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
ASSERT_NE(dex_file.get(), nullptr);
- ASSERT_EQ(hiddenapi::ApiList::kBlacklist, GetIFieldHiddenFlags(*dex_file));
+ ASSERT_EQ(hiddenapi::ApiList::Blacklist(), GetIFieldHiddenFlags(*dex_file));
}
TEST_F(HiddenApiTest, InstanceFieldTwoListsMatch1) {
@@ -291,7 +291,7 @@ TEST_F(HiddenApiTest, StaticFieldNoMatch) {
OpenStream(blacklist) << "LMain;->sfield:LBadType3;" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
ASSERT_NE(dex_file.get(), nullptr);
- ASSERT_EQ(hiddenapi::ApiList::kWhitelist, GetSFieldHiddenFlags(*dex_file));
+ ASSERT_EQ(hiddenapi::ApiList::Whitelist(), GetSFieldHiddenFlags(*dex_file));
}
TEST_F(HiddenApiTest, StaticFieldLightGreylistMatch) {
@@ -301,7 +301,7 @@ TEST_F(HiddenApiTest, StaticFieldLightGreylistMatch) {
OpenStream(blacklist) << "LMain;->sfield:LBadType3;" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
ASSERT_NE(dex_file.get(), nullptr);
- ASSERT_EQ(hiddenapi::ApiList::kLightGreylist, GetSFieldHiddenFlags(*dex_file));
+ ASSERT_EQ(hiddenapi::ApiList::Greylist(), GetSFieldHiddenFlags(*dex_file));
}
TEST_F(HiddenApiTest, StaticFieldDarkGreylistMatch) {
@@ -311,7 +311,7 @@ TEST_F(HiddenApiTest, StaticFieldDarkGreylistMatch) {
OpenStream(blacklist) << "LMain;->sfield:LBadType3;" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
ASSERT_NE(dex_file.get(), nullptr);
- ASSERT_EQ(hiddenapi::ApiList::kDarkGreylist, GetSFieldHiddenFlags(*dex_file));
+ ASSERT_EQ(hiddenapi::ApiList::GreylistMaxO(), GetSFieldHiddenFlags(*dex_file));
}
TEST_F(HiddenApiTest, StaticFieldBlacklistMatch) {
@@ -321,7 +321,7 @@ TEST_F(HiddenApiTest, StaticFieldBlacklistMatch) {
OpenStream(blacklist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
ASSERT_NE(dex_file.get(), nullptr);
- ASSERT_EQ(hiddenapi::ApiList::kBlacklist, GetSFieldHiddenFlags(*dex_file));
+ ASSERT_EQ(hiddenapi::ApiList::Blacklist(), GetSFieldHiddenFlags(*dex_file));
}
TEST_F(HiddenApiTest, StaticFieldTwoListsMatch1) {
@@ -358,7 +358,7 @@ TEST_F(HiddenApiTest, InstanceMethodNoMatch) {
OpenStream(blacklist) << "LMain;->imethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
ASSERT_NE(dex_file.get(), nullptr);
- ASSERT_EQ(hiddenapi::ApiList::kWhitelist, GetIMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(hiddenapi::ApiList::Whitelist(), GetIMethodHiddenFlags(*dex_file));
}
TEST_F(HiddenApiTest, InstanceMethodLightGreylistMatch) {
@@ -368,7 +368,7 @@ TEST_F(HiddenApiTest, InstanceMethodLightGreylistMatch) {
OpenStream(blacklist) << "LMain;->imethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
ASSERT_NE(dex_file.get(), nullptr);
- ASSERT_EQ(hiddenapi::ApiList::kLightGreylist, GetIMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(hiddenapi::ApiList::Greylist(), GetIMethodHiddenFlags(*dex_file));
}
TEST_F(HiddenApiTest, InstanceMethodDarkGreylistMatch) {
@@ -378,7 +378,7 @@ TEST_F(HiddenApiTest, InstanceMethodDarkGreylistMatch) {
OpenStream(blacklist) << "LMain;->imethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
ASSERT_NE(dex_file.get(), nullptr);
- ASSERT_EQ(hiddenapi::ApiList::kDarkGreylist, GetIMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(hiddenapi::ApiList::GreylistMaxO(), GetIMethodHiddenFlags(*dex_file));
}
TEST_F(HiddenApiTest, InstanceMethodBlacklistMatch) {
@@ -388,7 +388,7 @@ TEST_F(HiddenApiTest, InstanceMethodBlacklistMatch) {
OpenStream(blacklist) << "LMain;->imethod(J)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
ASSERT_NE(dex_file.get(), nullptr);
- ASSERT_EQ(hiddenapi::ApiList::kBlacklist, GetIMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(hiddenapi::ApiList::Blacklist(), GetIMethodHiddenFlags(*dex_file));
}
TEST_F(HiddenApiTest, InstanceMethodTwoListsMatch1) {
@@ -425,7 +425,7 @@ TEST_F(HiddenApiTest, StaticMethodNoMatch) {
OpenStream(blacklist) << "LMain;->smethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
ASSERT_NE(dex_file.get(), nullptr);
- ASSERT_EQ(hiddenapi::ApiList::kWhitelist, GetSMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(hiddenapi::ApiList::Whitelist(), GetSMethodHiddenFlags(*dex_file));
}
TEST_F(HiddenApiTest, StaticMethodLightGreylistMatch) {
@@ -435,7 +435,7 @@ TEST_F(HiddenApiTest, StaticMethodLightGreylistMatch) {
OpenStream(blacklist) << "LMain;->smethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
ASSERT_NE(dex_file.get(), nullptr);
- ASSERT_EQ(hiddenapi::ApiList::kLightGreylist, GetSMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(hiddenapi::ApiList::Greylist(), GetSMethodHiddenFlags(*dex_file));
}
TEST_F(HiddenApiTest, StaticMethodDarkGreylistMatch) {
@@ -445,7 +445,7 @@ TEST_F(HiddenApiTest, StaticMethodDarkGreylistMatch) {
OpenStream(blacklist) << "LMain;->smethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
ASSERT_NE(dex_file.get(), nullptr);
- ASSERT_EQ(hiddenapi::ApiList::kDarkGreylist, GetSMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(hiddenapi::ApiList::GreylistMaxO(), GetSMethodHiddenFlags(*dex_file));
}
TEST_F(HiddenApiTest, StaticMethodBlacklistMatch) {
@@ -455,7 +455,7 @@ TEST_F(HiddenApiTest, StaticMethodBlacklistMatch) {
OpenStream(blacklist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
ASSERT_NE(dex_file.get(), nullptr);
- ASSERT_EQ(hiddenapi::ApiList::kBlacklist, GetSMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(hiddenapi::ApiList::Blacklist(), GetSMethodHiddenFlags(*dex_file));
}
TEST_F(HiddenApiTest, StaticMethodTwoListsMatch1) {
@@ -492,7 +492,7 @@ TEST_F(HiddenApiTest, InstanceNativeMethodNoMatch) {
OpenStream(blacklist) << "LMain;->inmethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
ASSERT_NE(dex_file.get(), nullptr);
- ASSERT_EQ(hiddenapi::ApiList::kWhitelist, GetINMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(hiddenapi::ApiList::Whitelist(), GetINMethodHiddenFlags(*dex_file));
}
TEST_F(HiddenApiTest, InstanceNativeMethodLightGreylistMatch) {
@@ -502,7 +502,7 @@ TEST_F(HiddenApiTest, InstanceNativeMethodLightGreylistMatch) {
OpenStream(blacklist) << "LMain;->inmethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
ASSERT_NE(dex_file.get(), nullptr);
- ASSERT_EQ(hiddenapi::ApiList::kLightGreylist, GetINMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(hiddenapi::ApiList::Greylist(), GetINMethodHiddenFlags(*dex_file));
}
TEST_F(HiddenApiTest, InstanceNativeMethodDarkGreylistMatch) {
@@ -512,7 +512,7 @@ TEST_F(HiddenApiTest, InstanceNativeMethodDarkGreylistMatch) {
OpenStream(blacklist) << "LMain;->inmethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
ASSERT_NE(dex_file.get(), nullptr);
- ASSERT_EQ(hiddenapi::ApiList::kDarkGreylist, GetINMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(hiddenapi::ApiList::GreylistMaxO(), GetINMethodHiddenFlags(*dex_file));
}
TEST_F(HiddenApiTest, InstanceNativeMethodBlacklistMatch) {
@@ -522,7 +522,7 @@ TEST_F(HiddenApiTest, InstanceNativeMethodBlacklistMatch) {
OpenStream(blacklist) << "LMain;->inmethod(C)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
ASSERT_NE(dex_file.get(), nullptr);
- ASSERT_EQ(hiddenapi::ApiList::kBlacklist, GetINMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(hiddenapi::ApiList::Blacklist(), GetINMethodHiddenFlags(*dex_file));
}
TEST_F(HiddenApiTest, InstanceNativeMethodTwoListsMatch1) {
@@ -559,7 +559,7 @@ TEST_F(HiddenApiTest, StaticNativeMethodNoMatch) {
OpenStream(blacklist) << "LMain;->snmethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
ASSERT_NE(dex_file.get(), nullptr);
- ASSERT_EQ(hiddenapi::ApiList::kWhitelist, GetSNMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(hiddenapi::ApiList::Whitelist(), GetSNMethodHiddenFlags(*dex_file));
}
TEST_F(HiddenApiTest, StaticNativeMethodLightGreylistMatch) {
@@ -569,7 +569,7 @@ TEST_F(HiddenApiTest, StaticNativeMethodLightGreylistMatch) {
OpenStream(blacklist) << "LMain;->snmethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
ASSERT_NE(dex_file.get(), nullptr);
- ASSERT_EQ(hiddenapi::ApiList::kLightGreylist, GetSNMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(hiddenapi::ApiList::Greylist(), GetSNMethodHiddenFlags(*dex_file));
}
TEST_F(HiddenApiTest, StaticNativeMethodDarkGreylistMatch) {
@@ -579,7 +579,7 @@ TEST_F(HiddenApiTest, StaticNativeMethodDarkGreylistMatch) {
OpenStream(blacklist) << "LMain;->snmethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
ASSERT_NE(dex_file.get(), nullptr);
- ASSERT_EQ(hiddenapi::ApiList::kDarkGreylist, GetSNMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(hiddenapi::ApiList::GreylistMaxO(), GetSNMethodHiddenFlags(*dex_file));
}
TEST_F(HiddenApiTest, StaticNativeMethodBlacklistMatch) {
@@ -589,7 +589,7 @@ TEST_F(HiddenApiTest, StaticNativeMethodBlacklistMatch) {
OpenStream(blacklist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
ASSERT_NE(dex_file.get(), nullptr);
- ASSERT_EQ(hiddenapi::ApiList::kBlacklist, GetSNMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(hiddenapi::ApiList::Blacklist(), GetSNMethodHiddenFlags(*dex_file));
}
TEST_F(HiddenApiTest, StaticNativeMethodTwoListsMatch1) {
diff --git a/tools/veridex/hidden_api.h b/tools/veridex/hidden_api.h
index da9f058cb4..b58332bf52 100644
--- a/tools/veridex/hidden_api.h
+++ b/tools/veridex/hidden_api.h
@@ -17,7 +17,7 @@
#ifndef ART_TOOLS_VERIDEX_HIDDEN_API_H_
#define ART_TOOLS_VERIDEX_HIDDEN_API_H_
-#include "dex/hidden_api_access_flags.h"
+#include "base/hiddenapi_flags.h"
#include "dex/method_reference.h"
#include <ostream>
@@ -45,20 +45,20 @@ class HiddenApi {
hiddenapi::ApiList GetApiList(const std::string& name) const {
if (IsInList(name, blacklist_)) {
- return hiddenapi::ApiList::kBlacklist;
+ return hiddenapi::ApiList::Blacklist();
} else if (IsInList(name, dark_greylist_)) {
- return hiddenapi::ApiList::kDarkGreylist;
+ return hiddenapi::ApiList::GreylistMaxO();
} else if (IsInList(name, light_greylist_)) {
- return hiddenapi::ApiList::kLightGreylist;
+ return hiddenapi::ApiList::Greylist();
} else if (IsInList(name, whitelist_)) {
- return hiddenapi::ApiList::kWhitelist;
+ return hiddenapi::ApiList::Whitelist();
} else {
- return hiddenapi::ApiList::kNoList;
+ return hiddenapi::ApiList::Invalid();
}
}
bool IsInAnyList(const std::string& name) const {
- return GetApiList(name) != hiddenapi::ApiList::kNoList;
+ return GetApiList(name).IsValid();
}
static std::string GetApiMethodName(const DexFile& dex_file, uint32_t method_index);
@@ -92,7 +92,7 @@ struct HiddenApiStats {
uint32_t count = 0;
uint32_t reflection_count = 0;
uint32_t linking_count = 0;
- uint32_t api_counts[5] = { 0, 0, 0, 0, 0 };
+ uint32_t api_counts[hiddenapi::ApiList::kValueCount] = {}; // initialize all to zero
};
} // namespace art
diff --git a/tools/veridex/hidden_api_finder.cc b/tools/veridex/hidden_api_finder.cc
index e24d151069..3cd7c95a23 100644
--- a/tools/veridex/hidden_api_finder.cc
+++ b/tools/veridex/hidden_api_finder.cc
@@ -180,7 +180,7 @@ void HiddenApiFinder::Dump(std::ostream& os,
for (const std::pair<const std::string,
std::vector<MethodReference>>& pair : method_locations_) {
hiddenapi::ApiList api_list = hidden_api_.GetApiList(pair.first);
- stats->api_counts[static_cast<unsigned>(api_list)]++;
+ stats->api_counts[api_list.GetIntValue()]++;
os << "#" << ++stats->count << ": Linking " << api_list << " " << pair.first << " use(s):";
os << std::endl;
HiddenApiFinder::DumpReferences(os, pair.second);
@@ -191,7 +191,7 @@ void HiddenApiFinder::Dump(std::ostream& os,
for (const std::pair<const std::string,
std::vector<MethodReference>>& pair : field_locations_) {
hiddenapi::ApiList api_list = hidden_api_.GetApiList(pair.first);
- stats->api_counts[static_cast<unsigned>(api_list)]++;
+ stats->api_counts[api_list.GetIntValue()]++;
os << "#" << ++stats->count << ": Linking " << api_list << " " << pair.first << " use(s):";
os << std::endl;
HiddenApiFinder::DumpReferences(os, pair.second);
@@ -204,8 +204,8 @@ void HiddenApiFinder::Dump(std::ostream& os,
for (const std::string& name : strings_) {
std::string full_name = cls + "->" + name;
hiddenapi::ApiList api_list = hidden_api_.GetApiList(full_name);
- stats->api_counts[static_cast<unsigned>(api_list)]++;
- if (api_list != hiddenapi::ApiList::kNoList) {
+ if (api_list.IsValid()) {
+ stats->api_counts[api_list.GetIntValue()]++;
stats->reflection_count++;
os << "#" << ++stats->count << ": Reflection " << api_list << " " << full_name
<< " potential use(s):";
diff --git a/tools/veridex/precise_hidden_api_finder.cc b/tools/veridex/precise_hidden_api_finder.cc
index 6aef89f7ee..be99ed29d4 100644
--- a/tools/veridex/precise_hidden_api_finder.cc
+++ b/tools/veridex/precise_hidden_api_finder.cc
@@ -91,8 +91,7 @@ void PreciseHiddenApiFinder::Dump(std::ostream& os, HiddenApiStats* stats) {
std::string cls(info.cls.ToString());
std::string name(info.name.ToString());
std::string full_name = cls + "->" + name;
- hiddenapi::ApiList api_list = hidden_api_.GetApiList(full_name);
- if (api_list != hiddenapi::ApiList::kNoList) {
+ if (hidden_api_.IsInAnyList(full_name)) {
named_uses[full_name].push_back(ref);
}
}
@@ -102,7 +101,7 @@ void PreciseHiddenApiFinder::Dump(std::ostream& os, HiddenApiStats* stats) {
++stats->reflection_count;
const std::string& full_name = it.first;
hiddenapi::ApiList api_list = hidden_api_.GetApiList(full_name);
- stats->api_counts[static_cast<unsigned>(api_list)]++;
+ stats->api_counts[api_list.GetIntValue()]++;
os << "#" << ++stats->count << ": Reflection " << api_list << " " << full_name << " use(s):";
os << std::endl;
for (const MethodReference& ref : it.second) {
diff --git a/tools/veridex/veridex.cc b/tools/veridex/veridex.cc
index 179e391219..2787950323 100644
--- a/tools/veridex/veridex.cc
+++ b/tools/veridex/veridex.cc
@@ -271,17 +271,17 @@ class Veridex {
const VeridexOptions& options) {
static const char* kPrefix = " ";
if (options.only_report_sdk_uses) {
- os << stats.api_counts[static_cast<unsigned>(hiddenapi::ApiList::kWhitelist)]
+ os << stats.api_counts[hiddenapi::ApiList::Whitelist().GetIntValue()]
<< " SDK API uses." << std::endl;
} else {
os << stats.count << " hidden API(s) used: "
<< stats.linking_count << " linked against, "
<< stats.reflection_count << " through reflection" << std::endl;
- os << kPrefix << stats.api_counts[static_cast<unsigned>(hiddenapi::ApiList::kBlacklist)]
+ os << kPrefix << stats.api_counts[hiddenapi::ApiList::Blacklist().GetIntValue()]
<< " in blacklist" << std::endl;
- os << kPrefix << stats.api_counts[static_cast<unsigned>(hiddenapi::ApiList::kDarkGreylist)]
+ os << kPrefix << stats.api_counts[hiddenapi::ApiList::GreylistMaxO().GetIntValue()]
<< " in dark greylist" << std::endl;
- os << kPrefix << stats.api_counts[static_cast<unsigned>(hiddenapi::ApiList::kLightGreylist)]
+ os << kPrefix << stats.api_counts[hiddenapi::ApiList::Greylist().GetIntValue()]
<< " in light greylist" << std::endl;
}
}